language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | run-llama__llama_index | llama-index-core/llama_index/core/service_context_elements/llm_predictor.py | {
"start": 562,
"end": 2100
} | class ____(BaseComponent, DispatcherSpanMixin, ABC):
"""Base LLM Predictor."""
def model_dump(self, **kwargs: Any) -> Dict[str, Any]:
print("here", flush=True)
data = super().model_dump(**kwargs)
data["llm"] = self.llm.to_dict()
return data
def dict(self, **kwargs: Any) -> Dict[str, Any]:
"""Keep for backwards compatibility."""
return self.model_dump(**kwargs)
def to_dict(self, **kwargs: Any) -> Dict[str, Any]:
data = super().to_dict(**kwargs)
data["llm"] = self.llm.to_dict()
return data
@property
@abstractmethod
def llm(self) -> LLM:
"""Get LLM."""
@property
@abstractmethod
def callback_manager(self) -> CallbackManager:
"""Get callback manager."""
@property
@abstractmethod
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
@abstractmethod
def predict(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str:
"""Predict the answer to a query."""
@abstractmethod
def stream(self, prompt: BasePromptTemplate, **prompt_args: Any) -> TokenGen:
"""Stream the answer to a query."""
@abstractmethod
async def apredict(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str:
"""Async predict the answer to a query."""
@abstractmethod
async def astream(
self, prompt: BasePromptTemplate, **prompt_args: Any
) -> TokenAsyncGen:
"""Async predict the answer to a query."""
| BaseLLMPredictor |
python | dask__distributed | distributed/pytest_resourceleaks.py | {
"start": 10414,
"end": 16507
} | class ____:
checkers: list[ResourceChecker]
grace_delay: float
mark_failed: bool
# {nodeid: {checkers}}
skip_checkers: dict[str, set[ResourceChecker]]
# {nodeid: {checker: [(before, after)]}}
counters: dict[str, dict[ResourceChecker, list[tuple[Any, Any]]]]
# {nodeid: [(checker, before, after)]}
leaks: dict[str, list[tuple[ResourceChecker, Any, Any]]]
# {nodeid: {outcomes}}
outcomes: defaultdict[str, set[str]]
def __init__(
self,
checkers: list[ResourceChecker],
grace_delay: float,
mark_failed: bool,
):
self.checkers = checkers
self.grace_delay = grace_delay
self.mark_failed = mark_failed
self.skip_checkers = {}
self.counters = {}
self.leaks = {}
self.outcomes = defaultdict(set)
def cleanup(self) -> None:
gc.collect()
def checks_for_item(self, nodeid: str) -> list[ResourceChecker]:
return [c for c in self.checkers if c not in self.skip_checkers.get(nodeid, ())]
def measure(self, nodeid: str) -> list[tuple[ResourceChecker, Any]]:
# Return items in order
return [(c, c.measure()) for c in self.checks_for_item(nodeid)]
def measure_before_test(self, nodeid: str) -> None:
for checker in self.checks_for_item(nodeid):
checker.on_start_test()
for checker, before in self.measure(nodeid):
assert before is not None
self.counters[nodeid][checker].append((before, None))
def measure_after_test(self, nodeid: str) -> None:
outcomes = self.outcomes[nodeid]
# pytest_rerunfailures (@pytest.mark.flaky) breaks this plugin and causes
# outcomes to be empty.
if "passed" not in outcomes:
# Test failed or skipped
return
def run_measurements() -> list[tuple[ResourceChecker, Any, Any]]:
leaks = []
for checker, after in self.measure(nodeid):
c = self.counters[nodeid][checker]
before, _ = c[-1]
c[-1] = (before, after)
if checker.has_leak(before, after):
leaks.append((checker, before, after))
return leaks
t1 = time()
deadline = t1 + self.grace_delay
leaks = run_measurements()
if leaks:
self.cleanup()
for c, _, _ in leaks:
c.on_retry()
leaks = run_measurements()
while leaks and time() < deadline:
sleep(0.1)
self.cleanup()
for c, _, _ in leaks:
c.on_retry()
leaks = run_measurements()
if leaks:
self.leaks[nodeid] = leaks
else:
self.leaks.pop(nodeid, None)
for checker in self.checks_for_item(nodeid):
checker.on_stop_test()
# Note on hook execution order:
# pytest_runtest_protocol
# pytest_runtest_setup
# pytest_report_teststatus
# pytest_runtest_call
# pytest_report_teststatus
# pytest_runtest_teardown
# pytest_report_teststatus
# See also https://github.com/abalkin/pytest-leaks/blob/master/pytest_leaks.py
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(self, item, nextitem):
if not self.checkers:
return
nodeid = item.nodeid
assert nodeid not in self.counters
self.counters[nodeid] = {c: [] for c in self.checkers}
leaking_mark = item.get_closest_marker("leaking")
if leaking_mark:
unknown = sorted(set(leaking_mark.args) - set(all_checkers))
if unknown:
raise ValueError(
f"pytest.mark.leaking: unknown resources {unknown}; "
f"must be one of {list(all_checkers)}"
)
classes = tuple(all_checkers[a] for a in leaking_mark.args)
self.skip_checkers[nodeid] = {
c for c in self.checkers if isinstance(c, classes)
}
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item):
self.measure_before_test(item.nodeid)
yield
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_runtest_teardown(self, item):
yield
self.measure_after_test(item.nodeid)
leaks = self.leaks.get(item.nodeid)
if leaks and self.mark_failed:
# Trigger fail here to allow stopping with `-x`
pytest.fail()
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_report_teststatus(self, report):
nodeid = report.nodeid
self.outcomes[nodeid].add(report.outcome)
outcome = yield
if report.when == "teardown":
leaks = self.leaks.get(report.nodeid)
if leaks:
if self.mark_failed:
outcome.force_result(("failed", "L", "LEAKED"))
report.outcome = "failed"
report.longrepr = "\n".join(
[
f"{nodeid} leaking {checker.name}: "
f"{checker.format(before, after)}"
for checker, before, after in leaks
]
)
else:
outcome.force_result(("leaked", "L", "LEAKED"))
@pytest.hookimpl
def pytest_terminal_summary(self, terminalreporter, exitstatus):
tr = terminalreporter
leaked = tr.getreports("leaked")
if leaked:
# If mark_failed is False, leaks are output as a separate
# results section
tr.write_sep("=", "RESOURCE LEAKS")
for rep in leaked:
nodeid = rep.nodeid
for checker, before, after in self.leaks[nodeid]:
tr.line(
f"{rep.nodeid} leaking {checker.name}: "
f"{checker.format(before, after)}"
)
| LeakChecker |
python | faif__python-patterns | patterns/behavioral/observer.py | {
"start": 624,
"end": 1609
} | class ____:
_observers: List[Observer]
def __init__(self) -> None:
"""
Initialize the subject with an empty observer list.
"""
self._observers = []
def attach(self, observer: Observer) -> None:
"""
Attach an observer to the subject.
Args:
observer (Observer): The observer instance to attach.
"""
if observer not in self._observers:
self._observers.append(observer)
def detach(self, observer: Observer) -> None:
"""
Detach an observer from the subject.
Args:
observer (Observer): The observer instance to detach.
"""
try:
self._observers.remove(observer)
except ValueError:
pass
def notify(self) -> None:
"""
Notify all attached observers by calling their update method.
"""
for observer in self._observers:
observer.update(self)
| Subject |
python | celery__celery | t/unit/app/test_beat.py | {
"start": 3813,
"end": 4243
} | class ____(beat.Scheduler):
def __init__(self, *args, **kwargs):
self.sent = []
super().__init__(*args, **kwargs)
def send_task(self, name=None, args=None, kwargs=None, **options):
self.sent.append({'name': name,
'args': args,
'kwargs': kwargs,
'options': options})
return self.app.AsyncResult(uuid())
| mScheduler |
python | huggingface__transformers | src/transformers/models/udop/processing_udop.py | {
"start": 1171,
"end": 1658
} | class ____(ProcessingKwargs, total=False):
text_kwargs: UdopTextKwargs
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": False,
"truncation": False,
"stride": 0,
"return_overflowing_tokens": False,
"return_special_tokens_mask": False,
"return_offsets_mapping": False,
"return_length": False,
"verbose": True,
},
}
| UdopProcessorKwargs |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_health/asset_freshness_health.py | {
"start": 809,
"end": 2698
} | class ____(LoadableBy[AssetKey]):
"""Maintains the latest freshness state for the asset."""
freshness_state: FreshnessState
updated_timestamp: Optional[float] = None
@property
def health_status(self) -> AssetHealthStatus:
if self.freshness_state == FreshnessState.PASS:
return AssetHealthStatus.HEALTHY
elif self.freshness_state == FreshnessState.WARN:
return AssetHealthStatus.WARNING
elif self.freshness_state == FreshnessState.FAIL:
return AssetHealthStatus.DEGRADED
elif self.freshness_state == FreshnessState.NOT_APPLICABLE:
return AssetHealthStatus.NOT_APPLICABLE
else:
return AssetHealthStatus.UNKNOWN
@classmethod
async def compute_for_asset(
cls, asset_key: AssetKey, loading_context: LoadingContext
) -> "AssetFreshnessHealthState":
"""Gets the freshness state for the asset from the DB."""
freshness_state_record = await FreshnessStateRecord.gen(loading_context, asset_key)
if freshness_state_record is None:
# freshness policy has no evaluations yet
return cls(
freshness_state=FreshnessState.UNKNOWN,
updated_timestamp=None,
)
return cls(
freshness_state=freshness_state_record.freshness_state,
updated_timestamp=freshness_state_record.updated_at.timestamp(),
)
@classmethod
def _blocking_batch_load(
cls, keys: Iterable[AssetKey], context: LoadingContext
) -> Iterable[Optional["AssetFreshnessHealthState"]]:
asset_freshness_health_states = (
context.instance.get_asset_freshness_health_state_for_assets(list(keys))
)
return [asset_freshness_health_states.get(key) for key in keys]
@whitelist_for_serdes
@record.record
| AssetFreshnessHealthState |
python | getsentry__sentry | src/sentry/snuba/types.py | {
"start": 290,
"end": 1514
} | class ____(Protocol):
def __call__(
self,
selected_columns: list[str],
query: str,
snuba_params: SnubaParams,
equations: list[str] | None = None,
orderby: list[str] | None = None,
offset: int | None = None,
limit: int = 50,
auto_fields: bool = False,
auto_aggregations: bool = False,
include_equation_fields: bool = False,
allow_metric_aggregates: bool = False,
use_aggregate_conditions: bool = False,
conditions: list[Condition] | None = None,
functions_acl: list[str] | None = None,
transform_alias_to_input_format: bool = False,
sample: float | None = None,
has_metrics: bool = False,
use_metrics_layer: bool = False,
skip_tag_resolution: bool = False,
extra_columns: list[Column] | None = None,
on_demand_metrics_enabled: bool = False,
on_demand_metrics_type: MetricSpecType | None = None,
dataset: Dataset = Dataset.Discover,
fallback_to_transactions: bool = False,
query_source: QuerySource | None = None,
debug: bool = False,
*,
referrer: str,
) -> EventsResponse: ...
| DatasetQuery |
python | cherrypy__cherrypy | cherrypy/test/test_iterator.py | {
"start": 51,
"end": 262
} | class ____(object):
created = 0
datachunk = 'butternut squash' * 256
@classmethod
def incr(cls):
cls.created += 1
@classmethod
def decr(cls):
cls.created -= 1
| IteratorBase |
python | django__django | tests/dates/models.py | {
"start": 65,
"end": 315
} | class ____(models.Model):
title = models.CharField(max_length=100)
pub_date = models.DateField()
pub_datetime = models.DateTimeField(default=timezone.now)
categories = models.ManyToManyField("Category", related_name="articles")
| Article |
python | crytic__slither | slither/vyper_parsing/ast/types.py | {
"start": 3373,
"end": 3467
} | class ____(ASTNode):
target: ASTNode
op: ASTNode
value: ASTNode
@dataclass
| AugAssign |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass4.py | {
"start": 1712,
"end": 1803
} | class ____:
a: str = field(init=False, default="s")
b: bool = field()
@dataclass
| DC10 |
python | realpython__materials | directory-tree-generator-python/source_code_final/rptree/rptree.py | {
"start": 826,
"end": 2561
} | class ____:
def __init__(self, root_dir, dir_only=False):
self._root_dir = pathlib.Path(root_dir)
self._dir_only = dir_only
self._tree = []
def build_tree(self):
self._tree_head()
self._tree_body(self._root_dir)
return self._tree
def _tree_head(self):
self._tree.append(f"{self._root_dir}{os.sep}")
self._tree.append(PIPE)
def _tree_body(self, directory, prefix=""):
entries = self._prepare_entries(directory)
entries_count = len(entries)
for index, entry in enumerate(entries):
connector = ELBOW if index == entries_count - 1 else TEE
if entry.is_dir():
self._add_directory(
entry, index, entries_count, prefix, connector
)
else:
self._add_file(entry, prefix, connector)
def _prepare_entries(self, directory):
entries = directory.iterdir()
if self._dir_only:
entries = [entry for entry in entries if entry.is_dir()]
return entries
entries = sorted(entries, key=lambda entry: entry.is_file())
return entries
def _add_directory(
self, directory, index, entries_count, prefix, connector
):
self._tree.append(f"{prefix}{connector} {directory.name}{os.sep}")
if index != entries_count - 1:
prefix += PIPE_PREFIX
else:
prefix += SPACE_PREFIX
self._tree_body(
directory=directory,
prefix=prefix,
)
self._tree.append(prefix.rstrip())
def _add_file(self, file, prefix, connector):
self._tree.append(f"{prefix}{connector} {file.name}")
| _TreeGenerator |
python | huggingface__transformers | src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py | {
"start": 15101,
"end": 17477
} | class ____(VitPoseBackbonePreTrainedModel, BackboneMixin):
def __init__(self, config: VitPoseBackboneConfig):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
self.embeddings = VitPoseBackboneEmbeddings(config)
self.encoder = VitPoseBackboneEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
dataset_index: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
**kwargs,
):
r"""
dataset_index (`torch.Tensor` of shape `(batch_size,)`):
Index to use in the Mixture-of-Experts (MoE) blocks of the backbone.
This corresponds to the dataset index used during training, e.g. index 0 refers to COCO.
Examples:
```python
>>> from transformers import VitPoseBackboneConfig, VitPoseBackbone
>>> import torch
>>> config = VitPoseBackboneConfig(out_indices=[-1])
>>> model = VitPoseBackbone(config)
>>> pixel_values = torch.randn(1, 3, 256, 192)
>>> dataset_index = torch.tensor([1])
>>> outputs = model(pixel_values, dataset_index)
```"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
embedding_output = self.embeddings(pixel_values)
outputs: BaseModelOutput = self.encoder(
embedding_output, dataset_index=dataset_index, output_hidden_states=True
)
hidden_states = outputs.hidden_states
feature_maps = []
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
hidden_state = self.layernorm(hidden_state)
feature_maps.append(hidden_state)
return BackboneOutput(
feature_maps=tuple(feature_maps),
hidden_states=outputs.hidden_states if output_hidden_states else None,
)
__all__ = ["VitPoseBackbonePreTrainedModel", "VitPoseBackbone"]
| VitPoseBackbone |
python | pennersr__django-allauth | allauth/mfa/webauthn/forms.py | {
"start": 2393,
"end": 3561
} | class ____(forms.Form):
credential = forms.JSONField(required=True, widget=forms.HiddenInput)
reauthenticated = False
passwordless = False
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
def clean_credential(self):
credential = self.cleaned_data["credential"]
# Explicitly parse JSON payload -- otherwise, authenticate_complete()
# crashes with some random TypeError and we don't want to do
# Pokemon-style exception handling.
auth.parse_authentication_response(credential)
user = self.user
if user is None:
user = auth.extract_user_from_response(credential)
clear_rl = check_rate_limit(user)
authenticator = auth.complete_authentication(user, credential)
clear_rl()
return authenticator
def save(self):
authenticator = self.cleaned_data["credential"]
post_authentication(
context.request,
authenticator,
reauthenticated=self.reauthenticated,
passwordless=self.passwordless,
)
| AuthenticateWebAuthnForm |
python | doocs__leetcode | solution/0800-0899/0807.Max Increase to Keep City Skyline/Solution.py | {
"start": 0,
"end": 338
} | class ____:
def maxIncreaseKeepingSkyline(self, grid: List[List[int]]) -> int:
row_max = [max(row) for row in grid]
col_max = [max(col) for col in zip(*grid)]
return sum(
min(row_max[i], col_max[j]) - x
for i, row in enumerate(grid)
for j, x in enumerate(row)
)
| Solution |
python | getsentry__sentry | src/sentry/seer/anomaly_detection/types.py | {
"start": 1099,
"end": 1271
} | class ____(TypedDict):
organization_id: int
project_id: int
config: AnomalyDetectionConfig
context: AlertInSeer | list[TimeSeriesPoint]
| DetectAnomaliesRequest |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 35193,
"end": 35651
} | class ____(Interface):
virtual_path = Attribute(
'The virtual url path of the resource as a string.'
)
physical_path = Attribute(
'The physical url path of the resource as a string.'
)
virtual_path_tuple = Attribute(
'The virtual url path of the resource as a tuple. (New in 1.5)'
)
physical_path_tuple = Attribute(
'The physical url path of the resource as a tuple. (New in 1.5)'
)
| IResourceURL |
python | pypa__pip | src/pip/_internal/operations/install/wheel.py | {
"start": 14279,
"end": 14858
} | class ____(InstallationError):
def __init__(self, entry_point: str) -> None:
super().__init__(
f"Invalid script entry point: {entry_point} - A callable "
"suffix is required. See https://packaging.python.org/"
"specifications/entry-points/#use-for-scripts for more "
"information."
)
def _raise_for_invalid_entrypoint(specification: str) -> None:
entry = get_export_entry(specification)
if entry is not None and entry.suffix is None:
raise MissingCallableSuffix(str(entry))
| MissingCallableSuffix |
python | pydantic__pydantic | pydantic-core/tests/benchmarks/test_micro_benchmarks.py | {
"start": 44910,
"end": 47362
} | class ____:
@pytest.fixture(scope='class')
def validator(self):
return SchemaValidator(core_schema.decimal_schema())
@pytest.fixture(scope='class')
def pydantic_validator(self):
Decimal = decimal.Decimal
def to_decimal(v: str) -> decimal.Decimal:
try:
return Decimal(v)
except decimal.DecimalException as e:
raise PydanticCustomError('decimal_parsing', 'Input should be a valid decimal') from e
primitive_schema = core_schema.union_schema(
[
# if it's an int keep it like that and pass it straight to Decimal
# but if it's not make it a string
# we don't use JSON -> float because parsing to any float will cause
# loss of precision
core_schema.int_schema(strict=True),
core_schema.str_schema(strict=True, strip_whitespace=True),
core_schema.no_info_plain_validator_function(str),
]
)
json_schema = core_schema.no_info_after_validator_function(to_decimal, primitive_schema)
schema = core_schema.json_or_python_schema(
json_schema=json_schema,
python_schema=core_schema.lax_or_strict_schema(
lax_schema=core_schema.union_schema([core_schema.is_instance_schema(decimal.Decimal), json_schema]),
strict_schema=core_schema.is_instance_schema(decimal.Decimal),
),
serialization=core_schema.to_string_ser_schema(when_used='json'),
)
def check_finite(value: decimal.Decimal) -> decimal.Decimal:
if not value.is_finite():
raise PydanticKnownError('finite_number')
return value
schema = core_schema.no_info_after_validator_function(check_finite, schema)
return SchemaValidator(schema)
@pytest.mark.benchmark(group='decimal from str')
def test_decimal_from_string_core(self, benchmark, validator):
benchmark(validator.validate_python, '123.456789')
@pytest.mark.benchmark(group='decimal from str')
def test_decimal_from_string_pyd(self, benchmark, pydantic_validator):
benchmark(pydantic_validator.validate_python, '123.456789')
@pytest.mark.benchmark(group='decimal from str')
def test_decimal_from_string_limit(self, benchmark):
benchmark(decimal.Decimal, '123.456789')
| TestBenchmarkDecimal |
python | Netflix__metaflow | metaflow/_vendor/importlib_metadata/_meta.py | {
"start": 114,
"end": 758
} | class ____(Protocol):
def __len__(self) -> int:
... # pragma: no cover
def __contains__(self, item: str) -> bool:
... # pragma: no cover
def __getitem__(self, key: str) -> str:
... # pragma: no cover
def __iter__(self) -> Iterator[str]:
... # pragma: no cover
def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
"""
Return all values associated with a possibly multi-valued key.
"""
@property
def json(self) -> Dict[str, Union[str, List[str]]]:
"""
A JSON-compatible form of the metadata.
"""
| PackageMetadata |
python | matplotlib__matplotlib | lib/matplotlib/_type1font.py | {
"start": 960,
"end": 2095
} | class ____:
"""
A token in a PostScript stream.
Attributes
----------
pos : int
Position, i.e. offset from the beginning of the data.
raw : str
Raw text of the token.
kind : str
Description of the token (for debugging or testing).
"""
__slots__ = ('pos', 'raw')
kind = '?'
def __init__(self, pos, raw):
_log.debug('type1font._Token %s at %d: %r', self.kind, pos, raw)
self.pos = pos
self.raw = raw
def __str__(self):
return f"<{self.kind} {self.raw} @{self.pos}>"
def endpos(self):
"""Position one past the end of the token"""
return self.pos + len(self.raw)
def is_keyword(self, *names):
"""Is this a name token with one of the names?"""
return False
def is_slash_name(self):
"""Is this a name token that starts with a slash?"""
return False
def is_delim(self):
"""Is this a delimiter token?"""
return False
def is_number(self):
"""Is this a number token?"""
return False
def value(self):
return self.raw
| _Token |
python | great-expectations__great_expectations | tests/expectations/fixtures/expect_column_values_to_equal_three.py | {
"start": 3412,
"end": 6715
} | class ____(
ExpectColumnValuesToEqualThree__SecondIteration
):
@classmethod
@renderer(renderer_type="renderer.question")
def _question_renderer(cls, configuration, result=None, runtime_configuration=None):
column = configuration.kwargs.get("column")
mostly = configuration.kwargs.get("mostly")
if mostly:
return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
else:
return f'Do all the values in column "{column}" equal 3?'
@classmethod
@renderer(renderer_type="renderer.answer")
def _answer_renderer(cls, configuration=None, result=None, runtime_configuration=None):
column = result.expectation_config.kwargs.get("column")
mostly = result.expectation_config.kwargs.get("mostly")
if mostly:
if result.success:
return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
else:
return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
else: # noqa: PLR5501 # FIXME CoP
if result.success:
return f'All of the values in column "{column}" equal 3.'
else:
return f'Not all of the values in column "{column}" equal 3.'
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "regex", "mostly", "row_condition", "condition_parser"],
)
template_str = "values must be equal to 3"
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
| ExpectColumnValuesToEqualThree__ThirdIteration |
python | doocs__leetcode | solution/2700-2799/2763.Sum of Imbalance Numbers of All Subarrays/Solution.py | {
"start": 0,
"end": 631
} | class ____:
def sumImbalanceNumbers(self, nums: List[int]) -> int:
n = len(nums)
ans = 0
for i in range(n):
sl = SortedList()
cnt = 0
for j in range(i, n):
k = sl.bisect_left(nums[j])
h = k - 1
if h >= 0 and nums[j] - sl[h] > 1:
cnt += 1
if k < len(sl) and sl[k] - nums[j] > 1:
cnt += 1
if h >= 0 and k < len(sl) and sl[k] - sl[h] > 1:
cnt -= 1
sl.add(nums[j])
ans += cnt
return ans
| Solution |
python | pypa__pip | tests/unit/test_resolution_legacy_resolver.py | {
"start": 4732,
"end": 8804
} | class ____:
"""
Test _check_dist_requires_python().
"""
def test_compatible(self, caplog: pytest.LogCaptureFixture) -> None:
"""
Test a Python version compatible with the dist's Requires-Python.
"""
caplog.set_level(logging.DEBUG)
dist = make_fake_dist(requires_python="== 3.6.5")
_check_dist_requires_python(
dist,
version_info=(3, 6, 5),
ignore_requires_python=False,
)
assert not len(caplog.records)
def test_incompatible(self) -> None:
"""
Test a Python version incompatible with the dist's Requires-Python.
"""
dist = make_fake_dist(requires_python="== 3.6.4")
with pytest.raises(UnsupportedPythonVersion) as exc:
_check_dist_requires_python(
dist,
version_info=(3, 6, 5),
ignore_requires_python=False,
)
assert str(exc.value) == (
"Package 'my-project' requires a different Python: "
"3.6.5 not in '==3.6.4'"
)
def test_incompatible_with_ignore_requires(
self, caplog: pytest.LogCaptureFixture
) -> None:
"""
Test a Python version incompatible with the dist's Requires-Python
while passing ignore_requires_python=True.
"""
caplog.set_level(logging.DEBUG)
dist = make_fake_dist(requires_python="== 3.6.4")
_check_dist_requires_python(
dist,
version_info=(3, 6, 5),
ignore_requires_python=True,
)
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == "DEBUG"
assert record.message == (
"Ignoring failed Requires-Python check for package 'my-project': "
"3.6.5 not in '==3.6.4'"
)
def test_none_requires_python(self, caplog: pytest.LogCaptureFixture) -> None:
"""
Test a dist with Requires-Python None.
"""
caplog.set_level(logging.DEBUG)
dist = make_fake_dist()
# Make sure our test setup is correct.
assert dist.requires_python == SpecifierSet()
assert len(caplog.records) == 0
# Then there is no exception and no log message.
_check_dist_requires_python(
dist,
version_info=(3, 6, 5),
ignore_requires_python=False,
)
assert len(caplog.records) == 0
def test_invalid_requires_python(self, caplog: pytest.LogCaptureFixture) -> None:
"""
Test a dist with an invalid Requires-Python.
"""
caplog.set_level(logging.DEBUG)
dist = make_fake_dist(requires_python="invalid")
_check_dist_requires_python(
dist,
version_info=(3, 6, 5),
ignore_requires_python=False,
)
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == "WARNING"
assert record.message == (
"Package 'my-project' has an invalid Requires-Python: "
"Invalid specifier: 'invalid'"
)
@pytest.mark.parametrize(
"metadata_name",
[
"METADATA",
"PKG-INFO",
],
)
def test_empty_metadata_error(self, metadata_name: str) -> None:
"""Test dist.metadata raises FileNotFoundError."""
class NotWorkingFakeDist(FakeDist):
@property
def metadata(self) -> email.message.Message:
raise FileNotFoundError(metadata_name)
dist = make_fake_dist(klass=NotWorkingFakeDist) # type: ignore
with pytest.raises(NoneMetadataError) as exc:
_check_dist_requires_python(
dist,
version_info=(3, 6, 5),
ignore_requires_python=False,
)
assert str(exc.value) == (
f"None {metadata_name} metadata found for distribution: "
"<distribution 'my-project'>"
)
| TestCheckDistRequiresPython |
python | python-markdown__markdown | markdown/inlinepatterns.py | {
"start": 35744,
"end": 36024
} | class ____(ReferenceInlineProcessor):
"""Short form of reference: `[google]`. """
def evalId(self, data: str, index: int, text: str) -> tuple[str, int, bool]:
"""Evaluate the id of `[ref]`. """
return text.lower(), index, True
| ShortReferenceInlineProcessor |
python | realpython__materials | python-unittest/test_calculations.py | {
"start": 689,
"end": 2975
} | class ____(unittest.TestCase):
def test_mean(self):
self.assertEqual(mean([1, 2, 3, 4, 5, 6]), 3.5)
def test_median_odd(self):
self.assertEqual(median([1, 3, 3, 6, 7, 8, 9]), 6)
def test_median_even(self):
self.assertEqual(median([1, 2, 3, 4, 5, 6, 8, 9]), 4.5)
def test_median_unsorted(self):
self.assertEqual(median([7, 1, 3, 3, 2, 6]), 3)
def test_mode_single(self):
self.assertEqual(mode([1, 2, 2, 3, 4, 4, 4, 5]), [4])
def test_mode_multiple(self):
self.assertEqual(set(mode([1, 1, 2, 3, 4, 4, 5, 5])), {1, 4, 5})
# def make_suite():
# arithmetic_tests = [
# TestArithmeticOperations("test_add"),
# TestArithmeticOperations("test_subtract"),
# TestArithmeticOperations("test_multiply"),
# TestArithmeticOperations("test_divide"),
# ]
# return unittest.TestSuite(tests=arithmetic_tests)
# def make_suite():
# arithmetic_suite = unittest.TestSuite()
# arithmetic_suite.addTest(TestArithmeticOperations("test_add"))
# arithmetic_suite.addTest(TestArithmeticOperations("test_subtract"))
# arithmetic_suite.addTest(TestArithmeticOperations("test_multiply"))
# arithmetic_suite.addTest(TestArithmeticOperations("test_divide"))
# return arithmetic_suite
# def make_suite():
# statistical_tests = [
# TestStatisticalOperations("test_mean"),
# TestStatisticalOperations("test_median_odd"),
# TestStatisticalOperations("test_median_even"),
# TestStatisticalOperations("test_median_unsorted"),
# TestStatisticalOperations("test_mode_single"),
# TestStatisticalOperations("test_mode_multiple"),
# ]
# statistical_suite = unittest.TestSuite()
# statistical_suite.addTests(statistical_tests)
# return statistical_suite
# if __name__ == "__main__":
# suite = make_suite()
# runner = unittest.TextTestRunner(verbosity=2)
# runner.run(suite)
def load_tests(loader, standard_tests, pattern):
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromTestCase(TestArithmeticOperations))
suite.addTests(loader.loadTestsFromTestCase(TestStatisticalOperations))
return suite
if __name__ == "__main__":
unittest.main()
| TestStatisticalOperations |
python | django__django | tests/properties/models.py | {
"start": 131,
"end": 564
} | class ____(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
def _get_full_name(self):
return "%s %s" % (self.first_name, self.last_name)
def _set_full_name(self, combined_name):
self.first_name, self.last_name = combined_name.split(" ", 1)
full_name = property(_get_full_name)
full_name_2 = property(_get_full_name, _set_full_name)
| Person |
python | pytorch__pytorch | torchgen/model.py | {
"start": 80738,
"end": 84158
} | class ____:
# NB: I didn't put kwarg_only as a boolean field here, unlike
# c10::Argument, so that printing works correctly
name: str
type: Type
default: str | None
# The semantics of the annotation field are a little strange.
#
# Alias annotations parametrize Tensors (since Tensors are the only things
# that can alias.) This motivates why I write Tensor(a!)? (and not, for
# example, Tensor?(a!)), because the (a!) describes aliasing on the tensor,
# which may be optional (i.e., the alias annotation should bind first to
# Tensor, before the optional postfix annotation).
#
# However, despite being a property of Tensor, we (and c10::Argument)
# store the annotation at the top level of the Argument, rather than
# inside the embedded Tensor type. In the C++ version of this
# class, we then go through great lengths to mimic the type
# structure in the annotation structure so we can correlate
# annotations with types.
#
# Now, it turns out, in all applications in code generation, the
# structure of annotated types is very simple. So we just hard
# code it here. But if we ever do get anything more complex, this
# model will have to change!
annotation: Annotation | None
@property
def alias_info(self) -> Annotation | None:
return self.annotation
@staticmethod
def parse(arg: str) -> Argument:
name: str
default: str | None
assert " " in arg, f"illegal argument '{arg}'"
if "=" in arg:
assert arg.count("=") == 1, f"illegal argument with default value: '{arg}'"
type_and_annot_and_name, default = arg.split("=")
type_and_annot, name = type_and_annot_and_name.rsplit(" ", 1)
name_and_default = f"{name}={default}"
else:
type_and_annot, name_and_default = arg.rsplit(" ", 1)
name = name_and_default
default = None
# TODO: deduplicate annotation matching with Return
match = re.match(r"Tensor\((.+)\)(.*)", type_and_annot)
annotation: Annotation | None
if match:
# If you update this, make sure the __str__ still works too
assert match.group(2) in [
"",
"?",
"[]",
], "unrecognized alias analysis form with Tensor"
type_s = "Tensor" + match.group(2)
annotation = Annotation.parse(match.group(1))
else:
type_s = type_and_annot
annotation = None
type = Type.parse(type_s)
r = Argument(
name=name,
type=type,
default=default,
annotation=annotation,
)
assert str(r) == arg, f"{str(r)} != {arg}"
return r
@property
def is_write(self) -> bool:
return self.annotation is not None and self.annotation.is_write
def __str__(self) -> str:
type = f"{self.type}"
if self.annotation:
assert type in ["Tensor", "Tensor?", "Tensor[]"]
type = type.replace("Tensor", f"Tensor({self.annotation})")
if self.name is None:
return type
else:
mb_default = ""
if self.default:
mb_default = f"={self.default}"
return f"{type} {self.name}{mb_default}"
@dataclass(frozen=True)
| Argument |
python | keon__algorithms | algorithms/tree/bst/bst.py | {
"start": 159,
"end": 286
} | class ____(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
| Node |
python | networkx__networkx | networkx/algorithms/community/tests/test_centrality.py | {
"start": 441,
"end": 2932
} | class ____:
"""Unit tests for the
:func:`networkx.algorithms.community.centrality.girvan_newman`
function.
"""
def test_no_edges(self):
G = nx.empty_graph(3)
communities = list(nx.community.girvan_newman(G))
assert len(communities) == 1
validate_communities(communities[0], [{0}, {1}, {2}])
def test_undirected(self):
# Start with the graph .-.-.-.
G = nx.path_graph(4)
communities = list(nx.community.girvan_newman(G))
assert len(communities) == 3
# After one removal, we get the graph .-. .-.
validate_communities(communities[0], [{0, 1}, {2, 3}])
# After the next, we get the graph .-. . ., but there are two
# symmetric possible versions.
validate_possible_communities(
communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}]
)
# After the last removal, we always get the empty graph.
validate_communities(communities[2], [{0}, {1}, {2}, {3}])
def test_directed(self):
G = nx.DiGraph(nx.path_graph(4))
communities = list(nx.community.girvan_newman(G))
assert len(communities) == 3
validate_communities(communities[0], [{0, 1}, {2, 3}])
validate_possible_communities(
communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}]
)
validate_communities(communities[2], [{0}, {1}, {2}, {3}])
def test_selfloops(self):
G = nx.path_graph(4)
G.add_edge(0, 0)
G.add_edge(2, 2)
communities = list(nx.community.girvan_newman(G))
assert len(communities) == 3
validate_communities(communities[0], [{0, 1}, {2, 3}])
validate_possible_communities(
communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}]
)
validate_communities(communities[2], [{0}, {1}, {2}, {3}])
def test_most_valuable_edge(self):
G = nx.Graph()
G.add_weighted_edges_from([(0, 1, 3), (1, 2, 2), (2, 3, 1)])
# Let the most valuable edge be the one with the highest weight.
def heaviest(G):
return max(G.edges(data="weight"), key=itemgetter(2))[:2]
communities = list(nx.community.girvan_newman(G, heaviest))
assert len(communities) == 3
validate_communities(communities[0], [{0}, {1, 2, 3}])
validate_communities(communities[1], [{0}, {1}, {2, 3}])
validate_communities(communities[2], [{0}, {1}, {2}, {3}])
| TestGirvanNewman |
python | PyCQA__pylint | tests/functional/u/unnecessary/unnecessary_dunder_call.py | {
"start": 1282,
"end": 1345
} | class ____:
def __new__(cls):
object.__new__(cls)
| Bar1 |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_arithmatex.py | {
"start": 3762,
"end": 4989
} | class ____(util.MdCase):
"""Test hang cases."""
def test_hang_dollar(self):
"""
We are just making sure this works.
Previously this pattern would hang. It isn't supposed to match due to the space before the last dollar,
but it definitely shouldn't hang the process.
"""
self.check_markdown(
r'''
$z^{[1]} = \begin{bmatrix}w^{[1]T}_1 \\ w^{[1]T}_2 \\ w^{[1]T}_3 \\ w^{[1]T}_4 \end{bmatrix} \begin{bmatrix}x_1 \\ x_2 \\ x_3 \end{bmatrix} + \begin{bmatrix}b^{[1]}_1 \\ b^{[1]}_2 \\ b^{[1]}_3 \\ b^{[1]}_4 \end{bmatrix}= \begin{bmatrix}w^{[1]T}_1 x + b^{[1]}_1 \\ w^{[1]T}_2 x + b^{[1]}_2\\ w^{[1]T}_3 x + b^{[1]}_3 \\ w^{[1]T}_4 x + b^{[1]}_4 \end{bmatrix} $
''', # noqa: E501
r'''
<p>$z^{[1]} = \begin{bmatrix}w^{[1]T}_1 \ w^{[1]T}_2 \ w^{[1]T}_3 \ w^{[1]T}_4 \end{bmatrix} \begin{bmatrix}x_1 \ x_2 \ x_3 \end{bmatrix} + \begin{bmatrix}b^{[1]}_1 \ b^{[1]}_2 \ b^{[1]}_3 \ b^{[1]}_4 \end{bmatrix}= \begin{bmatrix}w^{[1]T}_1 x + b^{[1]}_1 \ w^{[1]T}_2 x + b^{[1]}_2\ w^{[1]T}_3 x + b^{[1]}_3 \ w^{[1]T}_4 x + b^{[1]}_4 \end{bmatrix} $</p>
''', # noqa: E501
True
)
| TestArithmatexHang |
python | pdm-project__pdm | src/pdm/resolver/providers.py | {
"start": 1796,
"end": 16559
} | class ____(AbstractProvider[Requirement, Candidate, str]):
def __init__(
self,
repository: BaseRepository,
allow_prereleases: bool | None = None,
overrides: dict[str, str] | None = None,
direct_minimal_versions: bool = False,
*,
locked_repository: LockedRepository | None = None,
) -> None:
if overrides is not None: # pragma: no cover
deprecation_warning(
"The `overrides` argument is deprecated and will be removed in the future.", stacklevel=2
)
if allow_prereleases is not None: # pragma: no cover
deprecation_warning(
"The `allow_prereleases` argument is deprecated and will be removed in the future.", stacklevel=2
)
project = repository.environment.project
self.repository = repository
self.allow_prereleases = project.pyproject.allow_prereleases # Root allow_prereleases value
self.fetched_dependencies: dict[tuple[str, str | None], list[Requirement]] = {}
self.excludes = {normalize_name(k) for k in project.pyproject.resolution.get("excludes", [])}
self.direct_minimal_versions = direct_minimal_versions
self.locked_repository = locked_repository
def requirement_preference(self, requirement: Requirement) -> Comparable:
"""Return the preference of a requirement to find candidates.
- Editable requirements are preferred.
- File links are preferred.
- The one with narrower specifierset is preferred.
"""
editable = requirement.editable
is_named = requirement.is_named
is_pinned = requirement.is_pinned
is_prerelease = bool(requirement.prerelease) or bool(requirement.specifier.prereleases)
specifier_parts = len(requirement.specifier)
return (not editable, is_named, not is_pinned, not is_prerelease, -specifier_parts)
def identify(self, requirement_or_candidate: Requirement | Candidate) -> str:
return requirement_or_candidate.identify()
def get_preference(
self,
identifier: str,
resolutions: dict[str, Candidate],
candidates: dict[str, Iterator[Candidate]],
information: dict[str, Iterator[RequirementInformation]],
backtrack_causes: Sequence[RequirementInformation],
) -> tuple[Comparable, ...]:
is_top = any(parent is None for _, parent in information[identifier])
backtrack_identifiers = {req.identify() for req, _ in backtrack_causes} | {
parent.identify() for _, parent in backtrack_causes if parent is not None
}
# Use the REAL identifier as it may be updated after candidate preparation.
deps: list[Requirement] = []
for candidate in candidates[identifier]:
try:
deps = self.get_dependencies(candidate)
except RequirementsConflicted:
continue
break
is_backtrack_cause = any(dep.identify() in backtrack_identifiers for dep in deps)
is_file_or_url = any(not requirement.is_named for requirement, _ in information[identifier])
operators = [spec.operator for req, _ in information[identifier] for spec in req.specifier]
is_python = identifier == "python"
is_pinned = any(op[:2] == "==" for op in operators)
constraints = len(operators)
return (
not is_python,
not is_top,
not is_file_or_url,
not is_pinned,
not is_backtrack_cause,
-constraints,
identifier,
)
@cached_property
def locked_candidates(self) -> dict[str, list[Candidate]]:
return self.locked_repository.all_candidates if self.locked_repository else {}
@cached_property
def overrides(self) -> dict[str, Requirement]:
"""A mapping of package name to the requirement for overriding."""
from pdm.formats.requirements import RequirementParser
project_overrides: dict[str, str] = {
normalize_name(k): v
for k, v in self.repository.environment.project.pyproject.resolution.get("overrides", {}).items()
}
requirements: dict[str, Requirement] = {}
for name, value in project_overrides.items():
req = get_requirement_from_override(name, value)
r = parse_requirement(req)
requirements[r.identify()] = r
# Read from --override files
parser = RequirementParser(self.repository.environment.session)
for override_file in self.repository.environment.project.core.state.overrides:
parser.parse_file(override_file)
for r in parser.requirements:
# There might be duplicates, we only keep the last one
requirements[r.identify()] = r
return requirements
def _is_direct_requirement(self, requirement: Requirement) -> bool:
from itertools import chain
project = self.repository.environment.project
all_dependencies = chain.from_iterable(project.all_dependencies.values())
return any(r.is_named and requirement.identify() == r.identify() for r in all_dependencies)
def _find_candidates(self, requirement: Requirement) -> Iterable[Candidate]:
if not requirement.is_named and not isinstance(self.repository, LockedRepository):
can = Candidate(requirement)
if not can.name:
can.prepare(self.repository.environment).metadata
yield can
else:
prerelease = requirement.prerelease
if prerelease is None and requirement.is_pinned and requirement.specifier.prereleases:
prerelease = True
if prerelease is None and (key := requirement.identify()) in self.locked_candidates:
# keep the prerelease if it is locked
candidates = self.locked_candidates[key]
for candidate in candidates:
if candidate.version is not None:
try:
parsed_version = parse_version(candidate.version)
except InvalidVersion: # pragma: no cover
pass
else:
if parsed_version.is_prerelease:
prerelease = True
break
found = self.repository.find_candidates(
requirement,
self.allow_prereleases if prerelease is None else prerelease,
minimal_version=self.direct_minimal_versions and self._is_direct_requirement(requirement),
)
current_version: str | None = None
collected_wheels: list[Candidate] = []
collected_others: list[Candidate] = []
for candidate in found:
assert candidate.version is not None
if current_version is None:
current_version = candidate.version
if candidate.version != current_version:
# If there are wheels for the given version, we should only return wheels
# to avoid build steps.
if collected_wheels:
yield collected_wheels[0]
elif collected_others:
yield collected_others[0]
current_version = candidate.version
collected_wheels.clear()
collected_others.clear()
if candidate.link and candidate.link.is_wheel:
collected_wheels.append(candidate)
else:
collected_others.append(candidate)
if collected_wheels:
yield collected_wheels[0]
elif collected_others:
yield collected_others[0]
def find_matches(
self,
identifier: str,
requirements: Mapping[str, Iterator[Requirement]],
incompatibilities: Mapping[str, Iterator[Candidate]],
) -> Callable[[], Iterator[Candidate]]:
def matches_gen() -> Iterator[Candidate]:
incompat = list(incompatibilities[identifier])
if identifier == "python":
candidates = find_python_matches(identifier, requirements)
return (c for c in candidates if c not in incompat)
elif identifier in self.overrides:
return iter(self._find_candidates(self.overrides[identifier]))
else:
name, extras = strip_extras(identifier)
if name in self.overrides:
req = dataclasses.replace(self.overrides[name], extras=extras)
return iter(self._find_candidates(req))
reqs = list(requirements[identifier])
if not reqs:
return iter(())
original_req = min(reqs, key=self.requirement_preference)
bare_name, extras = strip_extras(identifier)
if extras and bare_name in requirements:
# We should consider the requirements for both foo and foo[extra]
reqs.extend(requirements[bare_name])
reqs.sort(key=self.requirement_preference)
candidates = self._find_candidates(reqs[0])
return (
# In some cases we will use candidates from the bare requirement,
# this will miss the extra dependencies if any. So we associate the original
# requirement back with the candidate since it is used by `get_dependencies()`.
can.copy_with(original_req) if extras else can
for can in candidates
if can not in incompat and all(self.is_satisfied_by(r, can) for r in reqs)
)
return matches_gen
def _compare_file_reqs(self, req1: FileRequirement, req2: FileRequirement) -> bool:
backend = self.repository.environment.project.backend
if req1.path and req2.path:
return os.path.normpath(req1.path.absolute()) == os.path.normpath(req2.path.absolute())
left = backend.expand_line(url_without_fragments(req1.get_full_url()))
right = backend.expand_line(url_without_fragments(req2.get_full_url()))
return left == right
def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool:
if isinstance(requirement, PythonRequirement):
return is_python_satisfied_by(requirement, candidate)
elif (name := candidate.identify()) in self.overrides or strip_extras(name)[0] in self.overrides:
return True
if not requirement.is_named:
if candidate.req.is_named:
return False
can_req = candidate.req
if requirement.is_vcs and can_req.is_vcs:
return can_req.vcs == requirement.vcs and can_req.repo == requirement.repo # type: ignore[attr-defined]
return self._compare_file_reqs(requirement, can_req) # type: ignore[arg-type]
version = candidate.version
this_name = self.repository.environment.project.name
if version is None or candidate.name == this_name:
# This should be a URL candidate or self package, consider it to be matching
return True
# Allow prereleases if: 1) it is not specified in the tool settings or
# 2) the candidate doesn't come from PyPI index or 3) the requirement is pinned
allow_prereleases = (
self.allow_prereleases in (True, None) or not candidate.req.is_named or requirement.is_pinned
)
return requirement.specifier.contains(version, allow_prereleases)
def _get_dependencies_from_repository(self, candidate: Candidate) -> tuple[list[Requirement], PySpecSet, str]:
return self.repository.get_dependencies(candidate)
def get_dependencies(self, candidate: Candidate) -> list[Requirement]:
if isinstance(candidate, PythonCandidate):
return []
try:
deps, requires_python, _ = self._get_dependencies_from_repository(candidate)
except (RequirementError, InvalidPyVersion, InvalidSpecifier) as e:
# When the metadata is invalid, skip this candidate by marking it as conflicting.
# Here we pass an empty criterion so it doesn't provide any info to the resolution.
logger.error("Invalid metadata in %s: %s", candidate, e)
raise RequirementsConflicted(Criterion([], [], [])) from None
if candidate.req.extras:
# XXX: If the requirement has extras, add the original candidate
# (without extras) as its dependency. This ensures the same package with
# different extras resolve to the same version.
self_req = dataclasses.replace(
candidate.req.as_pinned_version(candidate.version),
extras=None,
marker=None,
)
if self_req not in deps:
deps.insert(0, self_req)
self.fetched_dependencies[candidate.dep_key] = deps[:]
# Filter out incompatible dependencies(e.g. functools32) early so that
# we don't get errors when building wheels.
valid_deps: list[Requirement] = []
for dep in deps:
if (
dep.requires_python
& requires_python
& candidate.req.requires_python
& PySpecSet(self.repository.env_spec.requires_python)
).is_empty():
continue
if dep.marker and not dep.marker.matches(self.repository.env_spec):
continue
if dep.identify() in self.excludes:
continue
dep.requires_python &= candidate.req.requires_python
valid_deps.append(dep)
# A candidate contributes to the Python requirements only when:
# It isn't an optional dependency, or the requires-python doesn't cover
# the req's requires-python.
# For example, A v1 requires python>=3.6, it not eligible on a project with
# requires-python=">=2.7". But it is eligible if A has environment marker
# A1; python_version>='3.8'
new_requires_python = candidate.req.requires_python & self.repository.environment.python_requires
if not (
candidate.identify() in self.overrides
or new_requires_python.is_empty()
or requires_python.is_superset(new_requires_python)
):
valid_deps.append(PythonRequirement.from_pyspec_set(requires_python))
return valid_deps
@register_provider("reuse")
| BaseProvider |
python | pytorch__pytorch | torch/_inductor/fx_passes/group_batch_fusion.py | {
"start": 49465,
"end": 49678
} | class ____(BatchPointwiseOpsPostGradFusion):
def __init__(self, **kwargs) -> None:
super().__init__(aten.relu.default, **kwargs)
@register_fusion("batch_aten_add", pre_grad=False)
| BatchReLuPostGradFusion |
python | Pylons__pyramid | tests/test_traversal.py | {
"start": 32196,
"end": 32986
} | class ____(unittest.TestCase):
def _callFUT(self, s):
from pyramid.traversal import quote_path_segment
return quote_path_segment(s)
def test_unicode(self):
la = text_(b'/La Pe\xc3\xb1a', 'utf-8')
result = self._callFUT(la)
self.assertEqual(result, '%2FLa%20Pe%C3%B1a')
def test_string(self):
s = '/ hello!'
result = self._callFUT(s)
self.assertEqual(result, '%2F%20hello!')
def test_int(self):
s = 12345
result = self._callFUT(s)
self.assertEqual(result, '12345')
def test_other(self):
class Foo:
def __str__(self):
return 'abc'
s = Foo()
result = self._callFUT(s)
self.assertEqual(result, 'abc')
| QuotePathSegmentTests |
python | google__jax | tests/layout_test.py | {
"start": 1032,
"end": 24889
} | class ____(jtu.JaxTestCase):
def test_auto_layout(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape1 = (128, 128)
shape2 = (128, 128)
s1 = NamedSharding(mesh, P('x', 'y'))
s2 = NamedSharding(mesh, P('x'))
def apply(x, y):
return x.T, y.T
def init(x, y):
return x * 2, y * 2
np_inp1 = np.arange(math.prod(shape1)).reshape(shape1)
np_inp2 = np.arange(math.prod(shape2)).reshape(shape2)
sds1 = jax.ShapeDtypeStruct(np_inp1.shape, np_inp1.dtype, sharding=s1)
sds2 = jax.ShapeDtypeStruct(np_inp2.shape, np_inp2.dtype, sharding=s2)
lowered_apply = jax.jit(apply, in_shardings=Format(Layout.AUTO),
out_shardings=Format(Layout.AUTO)).lower(sds1, sds2)
compiled_apply = lowered_apply.compile()
arg_formats, kw_layouts = compiled_apply.input_formats
self.assertEmpty(kw_layouts)
for i, o in zip(arg_formats, compiled_apply.output_formats):
self.assertEqual(i.layout.major_to_minor,
o.layout.major_to_minor[::-1])
init_compiled = jax.jit(
init, out_shardings=arg_formats).lower(sds1, sds2).compile()
for i, o in zip(init_compiled.input_formats[0],
init_compiled.output_formats):
self.assertEqual(i, o)
arr1 = jax.device_put(np_inp1, s1)
arr2 = jax.device_put(np_inp2, s2)
with jtu.count_aot_jit_cpp_cache_miss() as init_count:
init_out = init_compiled(arr1, arr2)
init_compiled(arr1, arr2)
self.assertEqual(init_count(), 1)
self.assertEqual(init_out[0].format, init_compiled.output_formats[0])
self.assertEqual(init_out[1].format, init_compiled.output_formats[1])
with jtu.count_aot_jit_cpp_cache_miss() as apply_count:
apply_out = compiled_apply(*init_out)
compiled_apply(*init_out)
self.assertEqual(apply_count(), 1)
self.assertEqual(apply_out[0].format, compiled_apply.output_formats[0])
self.assertEqual(apply_out[1].format, compiled_apply.output_formats[1])
self.assertTupleEqual(apply_out[0].format.layout.major_to_minor,
init_out[0].format.layout.major_to_minor[::-1])
self.assertTupleEqual(apply_out[1].format.layout.major_to_minor,
init_out[1].format.layout.major_to_minor[::-1])
self.assertArraysEqual(init_out[0], np_inp1 * 2)
self.assertArraysEqual(init_out[1], np_inp2 * 2)
self.assertArraysEqual(apply_out[0], (np_inp1 * 2).T)
self.assertArraysEqual(apply_out[1], (np_inp2 * 2).T)
def test_default_layout(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (4, 4, 2)
np_inp = np.arange(math.prod(shape)).reshape(shape)
s = NamedSharding(mesh, P('x', 'y'))
sds = jax.ShapeDtypeStruct(np_inp.shape, np_inp.dtype, sharding=s)
arr = jax.device_put(np_inp, s)
def f(x):
return x.T
lowered = jax.jit(f, in_shardings=None, out_shardings=None).lower(sds)
compiled = lowered.compile()
out = compiled(arr)
self.assertTupleEqual(
compiled.input_formats[0][0].layout.major_to_minor[::-1],
(2, 1, 0))
self.assertTupleEqual(
compiled.output_formats.layout.major_to_minor[::-1],
(2, 1, 0))
self.assertArraysEqual(out, np_inp.T)
self.assertEqual(out.sharding, NamedSharding(mesh, P(None, 'y', 'x')))
compiled_auto = jax.jit(f, in_shardings=Format(Layout.AUTO),
out_shardings=Format(Layout.AUTO)).lower(sds).compile()
self.assertTupleEqual(
compiled_auto.input_formats[0][0].layout.major_to_minor[::-1],
(2, 1, 0))
self.assertTupleEqual(
compiled_auto.output_formats.layout.major_to_minor[::-1],
(0, 1, 2))
with self.assertRaisesRegex(
ValueError, "jax.jit` does not accept device-local layouts directly"):
jax.jit(f, in_shardings=Layout.AUTO,
out_shardings=Layout.AUTO).lower(sds).compile()
def test_in_layouts_out_layouts(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (8, 8)
np_inp = np.arange(math.prod(shape)).reshape(shape)
s = NamedSharding(mesh, P('x', 'y'))
arr = jax.device_put(np_inp, s)
def f(x):
return x.T
compiled = jax.jit(f, in_shardings=Format(),
out_shardings=Format(Layout.AUTO)).lower(arr).compile()
self.assertTupleEqual(
compiled.input_formats[0][0].layout.major_to_minor[::-1],
(1, 0))
self.assertTupleEqual(
compiled.output_formats.layout.major_to_minor[::-1],
(0, 1))
out = compiled(arr)
self.assertArraysEqual(out, np_inp.T)
self.assertEqual(out.format, compiled.output_formats)
self.assertEqual(out.sharding, NamedSharding(mesh, P('y', 'x')))
def test_sharding_and_layouts(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (4, 8)
np_inp = np.arange(math.prod(shape)).reshape(shape)
s = NamedSharding(mesh, P('x', 'y'))
compiled = jax.jit(lambda x: x.T, in_shardings=Format(Layout.AUTO, s),
out_shardings=Format(Layout.AUTO, s)).lower(np_inp).compile()
out = compiled(np_inp)
self.assertTupleEqual(
compiled.input_formats[0][0].layout.major_to_minor[::-1],
(1, 0))
if not jtu.test_device_matches(['cpu']):
self.assertTupleEqual(
compiled.output_formats.layout.major_to_minor[::-1],
(0, 1))
self.assertArraysEqual(out, np_inp.T)
self.assertEqual(out.sharding, s)
def test_dce_in_layouts(self):
def f(x, y, z, a, b, c):
return z * 2, b.T
shape = (8, 2)
inps = [np.arange(math.prod(shape)).reshape(shape)] * 6
compiled = jax.jit(f, in_shardings=Format(Layout.AUTO),
out_shardings=Format(Layout.AUTO)).lower(*inps).compile()
arg_formats, _ = compiled.input_formats
out1, out2 = compiled(*inps)
compiled2 = jax.jit(f, in_shardings=arg_formats).lower(*inps).compile()
out3, out4 = compiled2(*inps)
for l1, l2 in safe_zip(arg_formats, compiled2.input_formats[0]):
self.assertEqual(l1, l2)
self.assertArraysEqual(out1, out3)
self.assertArraysEqual(out2, out4)
arrs = [jax.device_put(i, l) for i, l in zip(inps, arg_formats)]
out5, out6 = jax.jit(f)(*arrs)
self.assertArraysEqual(out1, out5)
self.assertArraysEqual(out2, out6)
def test_no_error_dced_args(self):
mesh = jtu.create_mesh((2, 1), ('x', 'y'))
shape = (8, 2)
s = NamedSharding(mesh, P('x', 'y'))
np_inp = np.arange(math.prod(shape)).reshape(shape)
arr1 = jax.device_put(np_inp, s)
arr2 = jax.device_put(np_inp, s)
arrs = [arr1, arr2]
def f(x, y):
return x * 2
jf = jax.jit(f, in_shardings=Format(Layout.AUTO, s),
out_shardings=Format(Layout.AUTO, s))
compiled = jf.lower(np_inp, np_inp).compile()
arg_formats, _ = compiled.input_formats
arrs = [jax.device_put(i, l) for i, l in zip(arrs, arg_formats)]
compiled(*arrs)
def test_aot_layout_mismatch(self):
if jtu.test_device_matches(['cpu', 'gpu']):
# The test fails on GPU because the compilation with both input and
# output set to auto layout is underspecified. The GPU compiler chooses
# the default layout as the input layout and that choice does not
# raise an exception.
self.skipTest('This test does not work on CPU or GPU backends.')
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (256, 4, 2)
np_inp = np.arange(math.prod(shape)).reshape(shape)
s = NamedSharding(mesh, P('x'))
sds = jax.ShapeDtypeStruct(np_inp.shape, np_inp.dtype, sharding=s)
arr = jax.device_put(np_inp, s)
def f(x):
return (x * 2).T
with self.assertRaisesRegex(
ValueError,
'Layout passed to jit does not match the layout on the respective arg'):
jax.jit(f, in_shardings=Format(Layout.AUTO)).lower(arr)
compiled = jax.jit(f, in_shardings=Format(Layout.AUTO),
out_shardings=Format(Layout.AUTO)).lower(sds).compile()
with self.assertRaisesRegex(
ValueError,
r'Computation was compiled for input layouts that disagree with the '
r'layouts of arguments passed to it.'):
compiled(arr)
@jtu.ignore_warning(category=DeprecationWarning,
message="backend and device argument")
def test_cpu_default_backend_layout(self):
inp = jax.device_put(np.ones((8, 8)), device=jax.devices('cpu')[0])
out_cpu = jax.jit(jnp.dot)(inp, inp)
jax.jit(jnp.dot, backend=jax.default_backend()).lower(
out_cpu, out_cpu).compile() # doesn't crash
def test_device_put_concrete_layout(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (8, 128)
np_inp = np.arange(math.prod(shape)).reshape(shape)
s = NamedSharding(mesh, P('x', 'y'))
arr = jax.device_put(np_inp, s)
compiled = jax.jit(
lambda x: x * 2, out_shardings=Format(Layout.AUTO)).lower(arr).compile()
col = compiled.output_formats
out = jax.device_put(np_inp, col)
self.assertEqual(out.format, col)
self.assertArraysEqual(out, np_inp)
for s in out.addressable_shards:
self.assertEqual(out.format.layout,
s.data.format.layout)
def test_device_put_non_concrete_layout_error(self):
np_inp = np.arange(16).reshape(8, 2)
l1 = Format(Layout.AUTO, SingleDeviceSharding(jax.devices()[0]))
with self.assertRaisesRegex(
ValueError, 'sharding and layout.*should be concrete'):
jax.device_put(np_inp, l1)
l2 = Format(Layout.AUTO)
with self.assertRaisesRegex(
ValueError, 'sharding and layout.*should be concrete'):
jax.device_put(np_inp, l2)
l3 = Format(None, SingleDeviceSharding(jax.devices()[0]))
out = jax.device_put(np_inp, l3)
self.assertArraysEqual(out, np_inp)
self.assertTrue(out._committed)
def invalid_layout_spec(self):
x = np.arange(8)
compiled = jax.jit(lambda x: x).lower(x).compile()
with self.assertRaisesRegex(
ValueError, 'Sharding has to be concrete when layout.*'):
Format(compiled.output_formats[0], None)
def test_layout_on_sds(self):
mesh = jtu.create_mesh((2, 1), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
np_inp = np.arange(16).reshape(8, 2)
arr = jax.device_put(np_inp, s)
out_format = jax.jit(jnp.sin, out_shardings=Format(Layout.AUTO)).lower(
arr).compile().output_formats
sds = jax.ShapeDtypeStruct(arr.shape, arr.dtype, sharding=out_format)
arg_format, _ = jax.jit(lambda x: x * 2).lower(sds).compile().input_formats
self.assertEqual(arg_format[0], out_format)
with self.assertRaisesRegex(
TypeError,
'Layout.AUTO` cannot be used in place of a device-local'
' layout in a `ShapeDtypeStruct`'):
jax.ShapeDtypeStruct(arr.shape, arr.dtype, sharding=Format(Layout.AUTO))
def test_make_array_from_callback(self):
mesh = jtu.create_mesh((2, 1), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
np_inp = np.arange(16).reshape(8, 2)
sds = jax.ShapeDtypeStruct(np_inp.shape, np_inp.dtype, sharding=s)
format = jax.jit(lambda x: x * 2).lower(sds).compile().output_formats
out = jax.make_array_from_callback(np_inp.shape, format,
lambda idx: np_inp[idx])
self.assertArraysEqual(out, np_inp)
self.assertEqual(out.format, format)
with self.assertRaisesRegex(
TypeError,
'`Layout.AUTO` cannot be used in place of a device-local'
' layout'):
jax.make_array_from_callback(np_inp.shape, Format(Layout.AUTO, s),
lambda idx: np_inp[idx])
with self.assertRaisesRegex(
TypeError, 'sharding should be an instance of `jax.sharding`'):
jax.make_array_from_callback(
np_inp.shape, Format(None, None), lambda idx: np_inp[idx])
def test_wsc_concrete_layout(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (16, 128)
s = NamedSharding(mesh, P('x'))
np_inp = np.arange(math.prod(shape)).reshape(shape)
arr = jax.device_put(np_inp, s)
# Create a custom layout instead of using `arr.layout` to test the API.
custom_dll = Layout(major_to_minor=(0, 1))
@jax.jit
def f(x):
y = x.T
# Constrain `y` to the original layout of `arr` because without it,
# the layout of `y` would be the transpose of `arr`.
return jax.lax.with_sharding_constraint(y, Format(custom_dll, s))
out = f(arr)
self.assertEqual(out.format.layout.major_to_minor,
custom_dll.major_to_minor)
self.assertEqual(out.format, arr.format)
self.assertArraysEqual(out, np_inp.T)
def test_wsc_bfloat16_concrete_layout(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (64, 128)
s = NamedSharding(mesh, P('x'))
inp = jnp.arange(math.prod(shape), dtype=jnp.bfloat16).reshape(shape)
arr = jax.device_put(inp, s)
# Create a custom layout instead of using `arr.layout` to test the API.
custom_dll = Layout(major_to_minor=(0, 1))
@jax.jit
def f(x):
y = x.T
# Constrain `y` to the original layout of `arr` because without it,
# the layout of `y` would be the transpose of `arr`.
return jax.lax.with_sharding_constraint(y, Format(custom_dll, s))
out = f(arr)
self.assertEqual(out.format.layout.major_to_minor,
custom_dll.major_to_minor)
self.assertEqual(out.format, arr.format)
self.assertArraysEqual(out, inp.T)
def test_device_put_user_concrete_layout(self):
shape = (8, 128)
np_inp = np.arange(math.prod(shape)).reshape(shape)
dll = Layout(major_to_minor=(1, 0))
s = SingleDeviceSharding(jax.devices()[0])
out = jax.device_put(np_inp, Format(dll, s))
self.assertEqual(out.format.layout.major_to_minor,
dll.major_to_minor)
self.assertArraysEqual(out, np_inp)
def test_device_put_user_concrete_layout_multi_device(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (16, 128)
s = NamedSharding(mesh, P('x'))
np_inp = np.arange(math.prod(shape)).reshape(shape)
jnp_inp = jnp.arange(math.prod(shape)).reshape(shape)
arr = jax.device_put(np_inp, s)
custom_format = Format(Layout(major_to_minor=(0, 1)), s)
out1 = jax.device_put(arr, custom_format)
with jax.set_mesh(mesh):
out2 = jax.device_put(arr, custom_format)
out3 = jax.device_put(jnp_inp, custom_format)
out4 = jax.device_put(np_inp, custom_format)
for o in [out1, out2, out3, out4]:
self.assertArraysEqual(o, np_inp)
self.assertEqual(o.format.layout.major_to_minor,
custom_format.layout.major_to_minor)
def test_concrete_layout_jit(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (16, 128)
s = NamedSharding(mesh, P('x'))
np_inp = np.arange(math.prod(shape)).reshape(shape)
arr = jax.device_put(np_inp, s)
def f(x):
return x.T
custom_dll = Layout(major_to_minor=(0, 1))
f = jax.jit(f, out_shardings=Format(custom_dll, s))
out = f(arr)
self.assertArraysEqual(out, np_inp.T)
self.assertEqual(out.format.layout.major_to_minor,
custom_dll.major_to_minor)
def test_compatible_aval_error(self):
custom_dll = Layout(major_to_minor=(0, 1, 2))
l = Format(custom_dll, SingleDeviceSharding(jax.devices()[0]))
inp = np.arange(8)
@jax.jit(in_shardings=l)
def f(x):
return x * 2
with self.assertRaisesRegex(
ValueError,
'.*Length of major_to_minor and the rank of the value should match.*'):
f(inp)
def test_incompatible_aval_error_device_put(self):
custom_dll = Layout(major_to_minor=(0, 1, 2))
l = Format(custom_dll, SingleDeviceSharding(jax.devices()[0]))
inp = np.arange(8)
with self.assertRaisesRegex(
ValueError,
'.*Length of major_to_minor and the rank of the value should match.*'):
jax.device_put(inp, l)
def test_concrete_layout_in_shardings(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
shape = (16, 128)
np_inp = np.arange(math.prod(shape)).reshape(shape)
arr = jax.device_put(np_inp, s)
custom_dll = Layout(major_to_minor=(0, 1))
@partial(jax.jit,
in_shardings=Format(custom_dll, s),
out_shardings=Format(Layout.AUTO))
def f(x):
return x.T
out = f(arr)
self.assertArraysEqual(out, np_inp.T)
self.assertEqual(out.format.layout.major_to_minor,
custom_dll.major_to_minor[::-1])
custom_dll2 = Layout(major_to_minor=(1, 0))
@jax.jit(in_shardings=Format(custom_dll2, s))
def g(x):
return x.T
with self.assertRaisesRegex(
ValueError,
'Layout passed to jit does not match the layout on the respective arg'):
g(arr)
def test_in_layouts_jit_jnp_input(self):
major_last_layout = Layout(major_to_minor=(1, 0))
sharding = jax.sharding.SingleDeviceSharding(jax.devices()[0])
f = jax.jit(lambda x: x + 1,
in_shardings=Format(major_last_layout, sharding))
arr = jnp.arange(8 * 128).reshape(8, 128)
out = f(arr)
self.assertArraysEqual(out, arr + 1)
# cpp dispatch should call into shard_args from cpp.
out2 = f(arr)
self.assertArraysEqual(out2, arr + 1)
np_inp = np.arange(8 * 128).reshape(8, 128)
out3 = f(np_inp)
self.assertArraysEqual(out3, np_inp + 1)
# cpp dispatch should call into shard_args from cpp.
out4 = f(np_inp)
self.assertArraysEqual(out4, np_inp + 1)
def test_layout_donation(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
shape = (16, 128)
np_inp = np.arange(math.prod(shape)).reshape(shape)
custom_dll = Layout(major_to_minor=(0, 1))
arr = jax.device_put(np_inp, Format(custom_dll, s))
@jax.jit(in_shardings=Format(custom_dll, s), donate_argnums=0)
def f(x):
return x
f(arr)
self.assertTrue(arr.is_deleted())
def test_layout_donation_auto(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
shape = (128, 16)
np_inp = np.arange(math.prod(shape)).reshape(shape)
arr = jax.device_put(np_inp, s)
@jax.jit(out_shardings=Format(Layout.AUTO), donate_argnums=0)
def f(x):
return x * x
f(arr)
self.assertTrue(arr.is_deleted())
def test_layout_donation_matching_in_and_out(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
shape = (128, 16)
np_inp = np.arange(math.prod(shape)).reshape(shape)
custom_dll = Layout(major_to_minor=(0, 1))
l = Format(custom_dll, s)
arr = jax.device_put(np_inp, l)
@jax.jit(in_shardings=l, out_shardings=l, donate_argnums=0)
def f(x):
return x * x
f(arr)
self.assertTrue(arr.is_deleted())
@jtu.skip_on_devices('cpu', 'gpu')
def test_layout_donation_mismatching_in_and_out_fails(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
shape = (16*2, 32016*2)
np_inp = np.arange(math.prod(shape), dtype=jnp.bfloat16).reshape(shape)
tiling = (((16, 128), (2, 1)) if jtu.get_tpu_version() == 7
else ((8, 128), (2, 1)))
custom_dll1 = Layout(major_to_minor=(1, 0), tiling=tiling)
l1 = Format(custom_dll1, s)
arr = jax.device_put(np_inp, s)
@jax.jit(out_shardings=l1, donate_argnums=0)
def f(x):
return x * x
sds = jax.ShapeDtypeStruct(np_inp.shape, np_inp.dtype, sharding=s)
f.lower(sds).compile()(arr)
self.assertFalse(arr.is_deleted())
def test_donation_error_on_auto(self):
@jax.jit(donate_argnums=0, in_shardings=Format(Layout.AUTO))
def f(x):
return x * 2
with self.assertRaisesRegex(
ValueError, ".*Did you mean to set the.*output layout.*AUTO.*"):
f(jnp.arange(8))
@jax.jit(donate_argnums=0, out_shardings=Format(Layout.AUTO))
def g(x):
return x * 2
with self.assertRaisesRegex(
ValueError, ".*Did you mean to set the.*input layout.*AUTO.*"):
g(jnp.arange(8))
def test_cpp_layout_cache_miss(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
shape = (16, 16)
np_inp = np.arange(math.prod(shape)).reshape(shape)
arr = jax.device_put(np_inp, s)
arr_m2m = arr.format.layout.major_to_minor
custom_format = Format(Layout(major_to_minor=arr_m2m[::-1]), s)
arr2 = jax.device_put(np_inp, custom_format)
@jax.jit
def f(x):
return x @ x.T
with jtu.count_pjit_cpp_cache_miss() as count:
out = f(arr)
out2 = f(arr2)
self.assertEqual(count(), 2)
self.assertArraysEqual(out, np_inp @ np_inp.T)
self.assertArraysEqual(out2, np_inp @ np_inp.T)
def test_layout_donation_with_default_layout(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
shape = (16, 16)
np_inp = np.arange(math.prod(shape)).reshape(shape)
arr = jax.device_put(np_inp, s)
out_format = Format(arr.format.layout, s)
@jax.jit(out_shardings=out_format, donate_argnums=0)
def f(x):
return x * 2
lowered_text = f.lower(arr).as_text()
self.assertIn('tf.aliasing_output = 0', lowered_text)
self.assertNotIn('jax.buffer_donor', lowered_text)
out = f(arr)
self.assertArraysEqual(out, np_inp * 2)
self.assertEqual(out.format, out_format)
def test_with_layout_constraint(self):
if not jtu.test_device_matches(['tpu']):
self.skipTest('Only works for TPU')
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (16, 128)
s = NamedSharding(mesh, P('x'))
np_inp = np.arange(math.prod(shape)).reshape(shape)
arr = jax.device_put(np_inp, s)
# Create a custom layout instead of using `arr.layout` to test the API.
custom_dll = Layout(major_to_minor=arr.format.layout.major_to_minor[::-1])
def f(x):
y = x.T
# Constrain `y` to the original layout of `arr` because without it,
# the layout of `y` would be the transpose of `arr`.
y = with_layout_constraint(y, custom_dll)
return y * 2
f(arr) # doesn't crash
f = jax.jit(f)
out = f(arr)
self.assertEqual(out.format.layout.major_to_minor,
custom_dll.major_to_minor)
self.assertArraysEqual(out, np_inp.T * 2)
lowered_text = f.lower(arr).as_text()
self.assertIn('LayoutConstraint', lowered_text)
def test_with_layout_constraint_vmap(self):
if not jtu.test_device_matches(['tpu']):
self.skipTest('Only works for TPU')
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (16, 128)
s = NamedSharding(mesh, P('x'))
np_inp = np.arange(math.prod(shape)).reshape(shape)
arr = jax.device_put(np_inp, s)
def f(x):
y = x.T
# Constrain `y` to the original layout of `arr` because without it,
# the layout of `y` would be the transpose of `arr`.
y = with_layout_constraint(y, Layout(major_to_minor=(0,)))
return y * 2
out = jax.jit(jax.vmap(f))(arr)
self.assertEqual(out.format.layout.major_to_minor, (0, 1))
def test_eval_shape_format(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
shape = (128, 16)
np_inp = np.arange(math.prod(shape)).reshape(shape)
custom_dll = Layout(major_to_minor=(0, 1))
l = Format(custom_dll, s)
arr = jax.device_put(np_inp, l)
@jax.jit(in_shardings=l, out_shardings=l)
def f(x):
return x * x
out = jax.eval_shape(f, arr)
self.assertEqual(out.format, l)
self.assertEqual(out.sharding, s)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| LayoutTest |
python | huggingface__transformers | src/transformers/models/blenderbot_small/modeling_blenderbot_small.py | {
"start": 50095,
"end": 50719
} | class ____(BlenderbotSmallPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = BlenderbotSmallDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->BlenderbotSmall, facebook/bart-base->facebook/blenderbot_small-90M
| BlenderbotSmallDecoderWrapper |
python | django__django | django/db/backends/sqlite3/_functions.py | {
"start": 14299,
"end": 14351
} | class ____(list):
step = list.append
| ListAggregate |
python | explosion__spaCy | spacy/schemas.py | {
"start": 17287,
"end": 18311
} | class ____(BaseModel):
# fmt: off
max_epochs: StrictInt = Field(..., title="Maximum number of epochs to train for")
dropout: StrictFloat = Field(..., title="Dropout rate")
n_save_every: Optional[StrictInt] = Field(..., title="Saving additional temporary model after n batches within an epoch")
n_save_epoch: Optional[StrictInt] = Field(..., title="Saving model after every n epoch")
optimizer: Optimizer = Field(..., title="The optimizer to use")
corpus: StrictStr = Field(..., title="Path in the config to the training data")
batcher: Batcher = Field(..., title="Batcher for the training data")
component: str = Field(..., title="Component to find the layer to pretrain")
layer: str = Field(..., title="Layer to pretrain. Whole model if empty.")
objective: Callable[["Vocab", Model], Model] = Field(..., title="A function that creates the pretraining objective.")
# fmt: on
class Config:
extra = "forbid"
arbitrary_types_allowed = True
| ConfigSchemaPretrain |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/scatter_nd_ops_test.py | {
"start": 34889,
"end": 35044
} | class ____(ScatterNdDeterminismTest,
ScatterNdNonAliasingAddTest):
pass
| ScatterNdNonAliasingAddDeterminismTest |
python | openai__openai-python | src/openai/types/webhooks/batch_expired_webhook_event.py | {
"start": 324,
"end": 756
} | class ____(BaseModel):
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the batch API request expired."""
data: Data
"""Event data payload."""
type: Literal["batch.expired"]
"""The type of the event. Always `batch.expired`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| BatchExpiredWebhookEvent |
python | dagster-io__dagster | python_modules/libraries/dagster-tableau/dagster_tableau/translator.py | {
"start": 1961,
"end": 2204
} | class ____:
"""A record representing a piece of content in Tableau.
Includes the content's type and data as returned from the API.
"""
content_type: TableauContentType
properties: Mapping[str, Any]
@record
| TableauContentData |
python | sanic-org__sanic | sanic/constants.py | {
"start": 63,
"end": 275
} | class ____(UpperStrEnum):
"""HTTP methods that are commonly used."""
GET = auto()
POST = auto()
PUT = auto()
HEAD = auto()
OPTIONS = auto()
PATCH = auto()
DELETE = auto()
| HTTPMethod |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 98318,
"end": 98553
} | class ____(Structure):
_fields_ = [("device", c_nvmlDevice_t),
("id", c_uint),
("profileId", c_uint),
("placement", c_nvmlGpuInstancePlacement_t)
]
| c_nvmlGpuInstanceInfo_t |
python | allegroai__clearml | clearml/backend_api/services/v2_13/projects.py | {
"start": 91938,
"end": 95760
} | class ____(Request):
"""
Get user and system tags used for the tasks under the specified projects
:param include_system: If set to 'true' then the list of the system tags is
also returned. The default value is 'false'
:type include_system: bool
:param projects: The list of projects under which the tags are searched. If not
passed or empty then all the projects are searched
:type projects: Sequence[str]
:param filter: Filter on entities to collect tags from
:type filter: dict
"""
_service = "projects"
_action = "get_task_tags"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"filter": {
"description": "Filter on entities to collect tags from",
"properties": {
"system_tags": {
"description": "The list of system tag values to filter by. Use 'null' value to specify empty system tags. Use '__Snot' value to specify that the following value should be excluded",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "The list of tag values to filter by. Use 'null' value to specify empty tags. Use '__Snot' value to specify that the following value should be excluded",
"items": {"type": "string"},
"type": "array",
},
},
"type": ["object", "null"],
},
"include_system": {
"default": False,
"description": "If set to 'true' then the list of the system tags is also returned. The default value is 'false'",
"type": ["boolean", "null"],
},
"projects": {
"description": "The list of projects under which the tags are searched. If not passed or empty then all the projects are searched",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
include_system: Optional[bool] = False,
projects: Optional[List[str]] = None,
filter: Optional[dict] = None,
**kwargs: Any
) -> None:
super(GetTaskTagsRequest, self).__init__(**kwargs)
self.include_system = include_system
self.projects = projects
self.filter = filter
@schema_property("include_system")
def include_system(self) -> Optional[bool]:
return self._property_include_system
@include_system.setter
def include_system(self, value: Optional[bool]) -> None:
if value is None:
self._property_include_system = None
return
self.assert_isinstance(value, "include_system", (bool,))
self._property_include_system = value
@schema_property("projects")
def projects(self) -> Optional[List[str]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
@schema_property("filter")
def filter(self) -> Optional[dict]:
return self._property_filter
@filter.setter
def filter(self, value: Optional[dict]) -> None:
if value is None:
self._property_filter = None
return
self.assert_isinstance(value, "filter", (dict,))
self._property_filter = value
| GetTaskTagsRequest |
python | scikit-learn__scikit-learn | sklearn/semi_supervised/_label_propagation.py | {
"start": 17049,
"end": 21709
} | class ____(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning.
This model is similar to the basic Label Propagation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'} or callable, default='rbf'
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape (n_samples, n_features),
and return a (n_samples, n_samples) shaped weight matrix.
gamma : float, default=20
Parameter for rbf kernel.
n_neighbors : int, default=7
Parameter for knn kernel which is a strictly positive integer.
alpha : float, default=0.2
Clamping factor. A value in (0, 1) that specifies the relative amount
that an instance should adopt the information from its neighbors as
opposed to its initial label.
alpha=0 means keeping the initial label information; alpha=1 means
replacing all initial information.
max_iter : int, default=30
Maximum number of iterations allowed.
tol : float, default=1e-3
Convergence tolerance: threshold to consider the system at steady
state.
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
X_ : ndarray of shape (n_samples, n_features)
Input array.
classes_ : ndarray of shape (n_classes,)
The distinct labels used in classifying instances.
label_distributions_ : ndarray of shape (n_samples, n_classes)
Categorical distribution for each item.
transduction_ : ndarray of shape (n_samples,)
Label assigned to each item during :term:`fit`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations run.
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning.
References
----------
`Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
<https://citeseerx.ist.psu.edu/doc_view/pid/d74c37aabf2d5cae663007cbd8718175466aea8c>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
LabelSpreading(...)
"""
_variant = "spreading"
_parameter_constraints: dict = {**BaseLabelPropagation._parameter_constraints}
_parameter_constraints["alpha"] = [Interval(Real, 0, 1, closed="neither")]
def __init__(
self,
kernel="rbf",
*,
gamma=20,
n_neighbors=7,
alpha=0.2,
max_iter=30,
tol=1e-3,
n_jobs=None,
):
# this one has different base parameters
super().__init__(
kernel=kernel,
gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha,
max_iter=max_iter,
tol=tol,
n_jobs=n_jobs,
)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == "knn":
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = csgraph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.issparse(laplacian):
diag_mask = laplacian.row == laplacian.col
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[:: n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| LabelSpreading |
python | gevent__gevent | src/gevent/tests/lock_tests.py | {
"start": 6490,
"end": 8499
} | class ____(BaseTestCase):
"""
Tests for Event objects.
"""
def eventtype(self):
raise NotImplementedError()
def test_is_set(self):
evt = self.eventtype()
self.assertFalse(evt.is_set())
evt.set()
self.assertTrue(evt.is_set())
evt.set()
self.assertTrue(evt.is_set())
evt.clear()
self.assertFalse(evt.is_set())
evt.clear()
self.assertFalse(evt.is_set())
def _check_notify(self, evt):
# All threads get notified
N = 5
results1 = []
results2 = []
def f():
evt.wait()
results1.append(evt.is_set())
evt.wait()
results2.append(evt.is_set())
b = Bunch(f, N)
b.wait_for_started()
_wait()
self.assertEqual(len(results1), 0)
evt.set()
b.wait_for_finished()
self.assertEqual(results1, [True] * N)
self.assertEqual(results2, [True] * N)
def test_notify(self):
evt = self.eventtype()
self._check_notify(evt)
# Another time, after an explicit clear()
evt.set()
evt.clear()
self._check_notify(evt)
def test_timeout(self):
evt = self.eventtype()
results1 = []
results2 = []
N = 5
def f():
evt.wait(0.0)
results1.append(evt.is_set())
t1 = time.time()
evt.wait(0.2)
r = evt.is_set()
t2 = time.time()
results2.append((r, t2 - t1))
Bunch(f, N).wait_for_finished()
self.assertEqual(results1, [False] * N)
for r, dt in results2:
self.assertFalse(r)
self.assertTimeWithinRange(dt, 0.18, 10)
# The event is set
results1 = []
results2 = []
evt.set()
Bunch(f, N).wait_for_finished()
self.assertEqual(results1, [True] * N)
for r, dt in results2:
self.assertTrue(r)
| EventTests |
python | pyca__cryptography | src/cryptography/hazmat/primitives/ciphers/modes.py | {
"start": 2715,
"end": 3144
} | class ____(ModeWithNonce):
name = "CTR"
def __init__(self, nonce: utils.Buffer):
utils._check_byteslike("nonce", nonce)
self._nonce = nonce
@property
def nonce(self) -> utils.Buffer:
return self._nonce
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None:
_check_aes_key_length(self, algorithm)
_check_nonce_length(self.nonce, self.name, algorithm)
| CTR |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 46241,
"end": 46341
} | class ____(generics.RetrieveAPIView):
queryset = BasicModel.objects.all()
| BasicNamingCollisionView |
python | huggingface__transformers | src/transformers/modeling_utils.py | {
"start": 41299,
"end": 44908
} | class ____:
"""
Base utilities to regroup getters and setters for embeddings.
Introduces the `input_layer_embed` attribute, which indicates
where the input embeddings come from and where they
should be set.
"""
_input_embed_layer = "embed_tokens" # default layer that holds input embeddings.
def get_input_embeddings(self) -> nn.Module:
"""
Returns the model's input embeddings.
Returns:
`nn.Module`: A torch module mapping vocabulary to hidden states.
"""
# 1) Check if the model has an attribute named 'embed_tokens' (the standard input embedding layer
# for most NLP models), and if so, return it.
name = getattr(self, "_input_embed_layer", "embed_tokens")
if (default_embedding := getattr(self, name, None)) is not None:
return default_embedding
# 2) encoder/decoder and VLMs like `Gemma3nForConditionalGeneration`
if hasattr(self, "model") and hasattr(self.model, "embed_tokens"):
return self.model.embed_tokens
# 3) vanilla decoder‑only architectures
elif hasattr(self, "embed_tokens"):
return self.embed_tokens
else:
base_model = getattr(self, "base_model_prefix", None)
if base_model is not None:
base_model = getattr(self, base_model, None)
if base_model is not None and base_model is not self:
return base_model.get_input_embeddings()
raise NotImplementedError(
f"`get_input_embeddings` not auto‑handled for {self.__class__.__name__}; "
"please override in the subclass."
)
def set_input_embeddings(self, value: nn.Module):
"""Fallback setter that handles **~70%** of models in the code-base.
Order of attempts:
1. `self.model.embed_tokens`
2. `self.embed_tokens`
3. delegate to the *base model* if one exists
4. otherwise raise `NotImplementedError` so subclasses still can (and
should) override for exotic layouts.
"""
# 1) encoder/decoder and VLMs like `Gemma3nForConditionalGeneration`
name = getattr(self, "_input_embed_layer", "embed_tokens")
if hasattr(self, "model") and hasattr(self.model, name):
setattr(self.model, name, value)
# 2) as well as vanilla decoder‑only architectures
elif hasattr(self, name):
setattr(self, name, value)
# 3) recurse once into the registered *base* model (e.g. for encoder/decoder)
elif getattr(self, self.base_model_prefix, self) is not self:
base_model = getattr(self, self.base_model_prefix, self)
base_model.set_input_embeddings(value)
else:
raise NotImplementedError(
f"`set_input_embeddings` not auto‑handled for {self.__class__.__name__}; please override in the subclass."
)
def get_output_embeddings(self):
if not hasattr(self, "lm_head"):
return None
try:
# Speech / vision backbones raise here, so we return None.
# Legit use of get_input_embs?
self.get_input_embeddings()
except NotImplementedError:
return None
return self.lm_head
def set_output_embeddings(self, new_embeddings):
"""
Sets the model's output embedding, defaulting to setting new_embeddings to lm_head.
"""
if getattr(self, "lm_head"):
self.lm_head = new_embeddings
| EmbeddingAccessMixin |
python | sympy__sympy | sympy/core/facts.py | {
"start": 10887,
"end": 17328
} | class ____:
"""Rules that describe how to deduce facts in logic space
When defined, these rules allow implications to quickly be determined
for a set of facts. For this precomputed deduction tables are used.
see `deduce_all_facts` (forward-chaining)
Also it is possible to gather prerequisites for a fact, which is tried
to be proven. (backward-chaining)
Definition Syntax
-----------------
a -> b -- a=T -> b=T (and automatically b=F -> a=F)
a -> !b -- a=T -> b=F
a == b -- a -> b & b -> a
a -> b & c -- a=T -> b=T & c=T
# TODO b | c
Internals
---------
.full_implications[k, v]: all the implications of fact k=v
.beta_triggers[k, v]: beta rules that might be triggered when k=v
.prereq -- {} k <- [] of k's prerequisites
.defined_facts -- set of defined fact names
"""
def __init__(self, rules):
"""Compile rules into internal lookup tables"""
if isinstance(rules, str):
rules = rules.splitlines()
# --- parse and process rules ---
P = Prover()
for rule in rules:
# XXX `a` is hardcoded to be always atom
a, op, b = rule.split(None, 2)
a = Logic.fromstring(a)
b = Logic.fromstring(b)
if op == '->':
P.process_rule(a, b)
elif op == '==':
P.process_rule(a, b)
P.process_rule(b, a)
else:
raise ValueError('unknown op %r' % op)
# --- build deduction networks ---
self.beta_rules = []
for bcond, bimpl in P.rules_beta:
self.beta_rules.append(
({_as_pair(a) for a in bcond.args}, _as_pair(bimpl)))
# deduce alpha implications
impl_a = deduce_alpha_implications(P.rules_alpha)
# now:
# - apply beta rules to alpha chains (static extension), and
# - further associate beta rules to alpha chain (for inference
# at runtime)
impl_ab = apply_beta_to_alpha_route(impl_a, P.rules_beta)
# extract defined fact names
self.defined_facts = {_base_fact(k) for k in impl_ab.keys()}
# build rels (forward chains)
full_implications = defaultdict(set)
beta_triggers = defaultdict(set)
for k, (impl, betaidxs) in impl_ab.items():
full_implications[_as_pair(k)] = {_as_pair(i) for i in impl}
beta_triggers[_as_pair(k)] = betaidxs
self.full_implications = full_implications
self.beta_triggers = beta_triggers
# build prereq (backward chains)
prereq = defaultdict(set)
rel_prereq = rules_2prereq(full_implications)
for k, pitems in rel_prereq.items():
prereq[k] |= pitems
self.prereq = prereq
def _to_python(self) -> str:
""" Generate a string with plain python representation of the instance """
return '\n'.join(self.print_rules())
@classmethod
def _from_python(cls, data : dict):
""" Generate an instance from the plain python representation """
self = cls('')
for key in ['full_implications', 'beta_triggers', 'prereq']:
d=defaultdict(set)
d.update(data[key])
setattr(self, key, d)
self.beta_rules = data['beta_rules']
self.defined_facts = set(data['defined_facts'])
return self
def _defined_facts_lines(self):
yield 'defined_facts = ['
for fact in sorted(self.defined_facts):
yield f' {fact!r},'
yield '] # defined_facts'
def _full_implications_lines(self):
yield 'full_implications = dict( ['
for fact in sorted(self.defined_facts):
for value in (True, False):
yield f' # Implications of {fact} = {value}:'
yield f' (({fact!r}, {value!r}), set( ('
implications = self.full_implications[(fact, value)]
for implied in sorted(implications):
yield f' {implied!r},'
yield ' ) ),'
yield ' ),'
yield ' ] ) # full_implications'
def _prereq_lines(self):
yield 'prereq = {'
yield ''
for fact in sorted(self.prereq):
yield f' # facts that could determine the value of {fact}'
yield f' {fact!r}: {{'
for pfact in sorted(self.prereq[fact]):
yield f' {pfact!r},'
yield ' },'
yield ''
yield '} # prereq'
def _beta_rules_lines(self):
reverse_implications = defaultdict(list)
for n, (pre, implied) in enumerate(self.beta_rules):
reverse_implications[implied].append((pre, n))
yield '# Note: the order of the beta rules is used in the beta_triggers'
yield 'beta_rules = ['
yield ''
m = 0
indices = {}
for implied in sorted(reverse_implications):
fact, value = implied
yield f' # Rules implying {fact} = {value}'
for pre, n in reverse_implications[implied]:
indices[n] = m
m += 1
setstr = ", ".join(map(str, sorted(pre)))
yield f' ({{{setstr}}},'
yield f' {implied!r}),'
yield ''
yield '] # beta_rules'
yield 'beta_triggers = {'
for query in sorted(self.beta_triggers):
fact, value = query
triggers = [indices[n] for n in self.beta_triggers[query]]
yield f' {query!r}: {triggers!r},'
yield '} # beta_triggers'
def print_rules(self) -> Iterator[str]:
""" Returns a generator with lines to represent the facts and rules """
yield from self._defined_facts_lines()
yield ''
yield ''
yield from self._full_implications_lines()
yield ''
yield ''
yield from self._prereq_lines()
yield ''
yield ''
yield from self._beta_rules_lines()
yield ''
yield ''
yield "generated_assumptions = {'defined_facts': defined_facts, 'full_implications': full_implications,"
yield " 'prereq': prereq, 'beta_rules': beta_rules, 'beta_triggers': beta_triggers}"
| FactRules |
python | automl__auto-sklearn | autosklearn/ensemble_building/run.py | {
"start": 193,
"end": 5336
} | class ____:
"""Class for storing information about a run used during ensemble building.
Note
----
This is for internal use by the EnsembleBuilder and not for general usage.
"""
# For matching prediction files
RE_MODEL_PREDICTION_FILE = (
r"^predictions_ensemble_([0-9]*)_([0-9]*)_([0-9]{1,3}\.[0-9]*).npy$"
)
# For matching run directories
RE_MODEL_DIR = r"^([0-9]*)_([0-9]*)_([0-9]{1,3}\.[0-9]*)$"
def __init__(self, path: Path) -> None:
"""Creates a Run from a path pointing to the directory of a run
Parameters
----------
path: Path
Expects something like /path/to/{seed}_{numrun}_{budget}
"""
name = path.name
seed, num_run, budget = name.split("_")
self.dir = path
self.seed = int(seed)
self.num_run = int(num_run)
self.budget = float(budget)
# These are ordered based on preference
self.losses: dict[str, float] = {}
self._mem_usage: float | None = None
# Items that will be delete when the run is saved back to file
self._cache: dict[str, np.ndarray] = {}
# The recorded time of ensemble/test predictions modified
self.recorded_mtimes: dict[str, float] = {}
self.record_modified_times()
@property
def mem_usage(self) -> float:
"""The memory usage of this run based on it's directory"""
if self._mem_usage is None:
self._mem_usage = round(sizeof(self.dir, unit="MB"), 2)
return self._mem_usage
def is_dummy(self) -> bool:
"""Whether this run is a dummy run or not"""
return self.num_run == 1
def was_modified(self) -> bool:
"""Query for when the ens file was last modified"""
recorded = self.recorded_mtimes.get("ensemble")
last = self.pred_path().stat().st_mtime
return recorded != last
def pred_path(self, kind: str = "ensemble") -> Path:
"""Get the path to certain predictions"""
fname = f"predictions_{kind}_{self.seed}_{self.num_run}_{self.budget}.npy"
return self.dir / fname
def record_modified_times(self) -> None:
"""Records the last time each prediction file type was modified, if it exists"""
self.recorded_mtimes = {}
for kind in ["ensemble", "test"]:
path = self.pred_path(kind) # type: ignore
if path.exists():
self.recorded_mtimes[kind] = path.stat().st_mtime
def has_predictions(self, kind: str = "ensemble") -> bool:
"""
Parameters
----------
kind: "ensemble" | "test" = "ensemble"
The kind of predictions to query for
Returns
-------
bool
Whether this run has the kind of predictions queried for
"""
return self.pred_path(kind).exists()
def predictions(
self,
kind: str = "ensemble",
precision: int | None = None,
) -> np.ndarray:
"""Load the predictions for this run
Parameters
----------
kind : "ensemble" | "test"
The kind of predictions to load
precisions : type | None = None
What kind of precision reduction to apply
Returns
-------
np.ndarray
The loaded predictions
"""
key = f"predictions_{kind}"
if key in self._cache:
return self._cache[key]
path = self.pred_path(kind)
with path.open("rb") as f:
# TODO: We should probably remove this requirement. I'm not sure why model
# predictions are being saved as pickled
predictions = np.load(f, allow_pickle=True)
if precision:
dtypes: dict[int, type] = {16: np.float16, 32: np.float32, 64: np.float64}
dtype = dtypes.get(precision, None)
if dtype is not None:
predictions = predictions.astype(dtype=dtype, copy=False)
self._cache[key] = predictions
return predictions
def __getstate__(self) -> dict:
"""Remove the cache when pickling."""
state = self.__dict__.copy()
del state["_cache"]
return state
def __setstate__(self, state: dict) -> None:
"""Reset state and instansiate blank cache."""
self.__dict__.update(state)
self._cache = {}
@property
def id(self) -> RunID:
"""Get the three components of it's id"""
return self.seed, self.num_run, self.budget
def __hash__(self) -> int:
return hash(self.id)
def __repr__(self) -> str:
return f"Run(id={self.id}, losses={self.losses})"
def __eq__(self, other: object) -> bool:
return isinstance(other, Run) and other.id == self.id
@staticmethod
def valid(path: Path) -> bool:
"""
Parameters
----------
path: Path
The path to check
Returns
-------
bool
Whether the path is a valid run dir
"""
return re.match(Run.RE_MODEL_DIR, path.name) is not None
| Run |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 4229,
"end": 4396
} | class ____(GreatExpectationsError):
def __init__(self, message: str):
super().__init__(f"Bad input to build_batch_request: {message}")
| BuildBatchRequestError |
python | pypa__pip | src/pip/_vendor/urllib3/exceptions.py | {
"start": 2863,
"end": 3105
} | class ____(TimeoutError, RequestError):
"""Raised when a socket timeout occurs while receiving data from a server"""
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
| ReadTimeoutError |
python | giampaolo__psutil | tests/test_windows.py | {
"start": 13955,
"end": 21806
} | class ____(WindowsTestCase):
@classmethod
def setUpClass(cls):
cls.pid = spawn_subproc().pid
@classmethod
def tearDownClass(cls):
terminate(cls.pid)
def test_issue_24(self):
p = psutil.Process(0)
with pytest.raises(psutil.AccessDenied):
p.kill()
def test_special_pid(self):
p = psutil.Process(4)
assert p.name() == 'System'
# use __str__ to access all common Process properties to check
# that nothing strange happens
str(p)
p.username()
assert p.create_time() >= 0.0
try:
rss, _vms = p.memory_info()[:2]
except psutil.AccessDenied:
# expected on Windows Vista and Windows 7
if platform.uname()[1] not in {'vista', 'win-7', 'win7'}:
raise
else:
assert rss > 0
def test_send_signal(self):
p = psutil.Process(self.pid)
with pytest.raises(ValueError):
p.send_signal(signal.SIGINT)
def test_num_handles_increment(self):
p = psutil.Process(os.getpid())
before = p.num_handles()
handle = win32api.OpenProcess(
win32con.PROCESS_QUERY_INFORMATION, win32con.FALSE, os.getpid()
)
after = p.num_handles()
assert after == before + 1
win32api.CloseHandle(handle)
assert p.num_handles() == before
def test_ctrl_signals(self):
p = psutil.Process(self.spawn_subproc().pid)
p.send_signal(signal.CTRL_C_EVENT)
p.send_signal(signal.CTRL_BREAK_EVENT)
p.kill()
p.wait()
with pytest.raises(psutil.NoSuchProcess):
p.send_signal(signal.CTRL_C_EVENT)
with pytest.raises(psutil.NoSuchProcess):
p.send_signal(signal.CTRL_BREAK_EVENT)
def test_username(self):
name = win32api.GetUserNameEx(win32con.NameSamCompatible)
if name.endswith('$'):
# When running as a service account (most likely to be
# NetworkService), these user name calculations don't produce the
# same result, causing the test to fail.
return pytest.skip('running as service account')
assert psutil.Process().username() == name
def test_cmdline(self):
sys_value = re.sub(r"[ ]+", " ", win32api.GetCommandLine()).strip()
psutil_value = ' '.join(psutil.Process().cmdline())
# The PyWin32 command line may retain quotes around argv[0] if they
# were used unnecessarily, while psutil will omit them. So remove
# the first 2 quotes from sys_value if not in psutil_value.
# A path to an executable will not contain quotes, so this is safe.
sys_value = sys_value.replace('"', "")
psutil_value = psutil_value.replace('"', "")
assert sys_value == psutil_value
# XXX - occasional failures
# def test_cpu_times(self):
# handle = win32api.OpenProcess(
# win32con.PROCESS_QUERY_INFORMATION, win32con.FALSE, os.getpid()
# )
# self.addCleanup(win32api.CloseHandle, handle)
# a = psutil.Process().cpu_times()
# b = win32process.GetProcessTimes(handle)
# assert abs(a.user - b['UserTime'] / 10000000.0) < 0.2
# assert abs(a.user - b['KernelTime'] / 10000000.0) < 0.2
def test_nice(self):
handle = win32api.OpenProcess(
win32con.PROCESS_QUERY_INFORMATION, win32con.FALSE, os.getpid()
)
self.addCleanup(win32api.CloseHandle, handle)
sys_value = win32process.GetPriorityClass(handle)
psutil_value = psutil.Process().nice()
assert psutil_value == sys_value
def test_memory_info(self):
handle = win32api.OpenProcess(
win32con.PROCESS_QUERY_INFORMATION, win32con.FALSE, self.pid
)
self.addCleanup(win32api.CloseHandle, handle)
sys_value = win32process.GetProcessMemoryInfo(handle)
psutil_value = psutil.Process(self.pid).memory_info()
assert sys_value['PeakWorkingSetSize'] == psutil_value.peak_wset
assert sys_value['WorkingSetSize'] == psutil_value.wset
assert (
sys_value['QuotaPeakPagedPoolUsage']
== psutil_value.peak_paged_pool
)
assert sys_value['QuotaPagedPoolUsage'] == psutil_value.paged_pool
assert (
sys_value['QuotaPeakNonPagedPoolUsage']
== psutil_value.peak_nonpaged_pool
)
assert (
sys_value['QuotaNonPagedPoolUsage'] == psutil_value.nonpaged_pool
)
assert sys_value['PagefileUsage'] == psutil_value.pagefile
assert sys_value['PeakPagefileUsage'] == psutil_value.peak_pagefile
assert psutil_value.rss == psutil_value.wset
assert psutil_value.vms == psutil_value.pagefile
def test_wait(self):
handle = win32api.OpenProcess(
win32con.PROCESS_QUERY_INFORMATION, win32con.FALSE, self.pid
)
self.addCleanup(win32api.CloseHandle, handle)
p = psutil.Process(self.pid)
p.terminate()
psutil_value = p.wait()
sys_value = win32process.GetExitCodeProcess(handle)
assert psutil_value == sys_value
def test_cpu_affinity(self):
def from_bitmask(x):
return [i for i in range(64) if (1 << i) & x]
handle = win32api.OpenProcess(
win32con.PROCESS_QUERY_INFORMATION, win32con.FALSE, self.pid
)
self.addCleanup(win32api.CloseHandle, handle)
sys_value = from_bitmask(
win32process.GetProcessAffinityMask(handle)[0]
)
psutil_value = psutil.Process(self.pid).cpu_affinity()
assert psutil_value == sys_value
def test_io_counters(self):
handle = win32api.OpenProcess(
win32con.PROCESS_QUERY_INFORMATION, win32con.FALSE, os.getpid()
)
self.addCleanup(win32api.CloseHandle, handle)
sys_value = win32process.GetProcessIoCounters(handle)
psutil_value = psutil.Process().io_counters()
assert psutil_value.read_count == sys_value['ReadOperationCount']
assert psutil_value.write_count == sys_value['WriteOperationCount']
assert psutil_value.read_bytes == sys_value['ReadTransferCount']
assert psutil_value.write_bytes == sys_value['WriteTransferCount']
assert psutil_value.other_count == sys_value['OtherOperationCount']
assert psutil_value.other_bytes == sys_value['OtherTransferCount']
def test_num_handles(self):
import ctypes
import ctypes.wintypes
PROCESS_QUERY_INFORMATION = 0x400
handle = ctypes.windll.kernel32.OpenProcess(
PROCESS_QUERY_INFORMATION, 0, self.pid
)
self.addCleanup(ctypes.windll.kernel32.CloseHandle, handle)
hndcnt = ctypes.wintypes.DWORD()
ctypes.windll.kernel32.GetProcessHandleCount(
handle, ctypes.byref(hndcnt)
)
sys_value = hndcnt.value
psutil_value = psutil.Process(self.pid).num_handles()
assert psutil_value == sys_value
def test_error_partial_copy(self):
# https://github.com/giampaolo/psutil/issues/875
exc = OSError()
exc.winerror = 299
with mock.patch("psutil._psplatform.cext.proc_cwd", side_effect=exc):
with mock.patch("time.sleep") as m:
p = psutil.Process()
with pytest.raises(psutil.AccessDenied):
p.cwd()
assert m.call_count >= 5
def test_exe(self):
# NtQuerySystemInformation succeeds if process is gone. Make sure
# it raises NSP for a non existent pid.
pid = psutil.pids()[-1] + 99999
proc = psutil._psplatform.Process(pid)
with pytest.raises(psutil.NoSuchProcess):
proc.exe()
| TestProcess |
python | walkccc__LeetCode | solutions/1982. Find Array Given Subset Sums/1982.py | {
"start": 0,
"end": 1164
} | class ____:
def recoverArray(self, n: int, sums: list[int]) -> list[int]:
def recover(sums: list[int]) -> list[int]:
if len(sums) == 1:
return []
count = collections.Counter(sums)
# Either num or -num must be in the final array.
# num + sumsExcludingNum = sumsIncludingNum
# -num + sumsIncludingNum = sumsExcludingNum
num = sums[1] - sums[0]
sumsExcludingNum = []
sumsIncludingNum = []
chooseSumsExcludingNum = True
for summ in sums:
if count[summ] == 0:
continue
count[summ] -= 1
count[summ + num] -= 1
sumsExcludingNum.append(summ)
sumsIncludingNum.append(summ + num)
if summ + num == 0:
chooseSumsExcludingNum = False
# Choose `sumsExludingNum` by default since we want to gradually strip
# `num` from each sum in `sums` to have the final array. However, we should
# always choose the group of sums with 0 since it's a must-have.
return ([num] + recover(sumsExcludingNum) if chooseSumsExcludingNum else
[-num] + recover(sumsIncludingNum))
return recover(sorted(sums))
| Solution |
python | davidhalter__jedi | jedi/inference/value/instance.py | {
"start": 22210,
"end": 22511
} | class ____(TreeArgumentsWrapper):
def __init__(self, instance, arguments):
super().__init__(arguments)
self.instance = instance
def unpack(self, func=None):
yield None, LazyKnownValue(self.instance)
yield from self._wrapped_arguments.unpack(func)
| InstanceArguments |
python | run-llama__llama_index | llama-index-core/llama_index/core/selectors/llm_selectors.py | {
"start": 1744,
"end": 4505
} | class ____(BaseSelector):
"""
LLM single selector.
LLM-based selector that chooses one out of many options.
Args:
LLM (LLM): An LLM.
prompt (SingleSelectPrompt): A LLM prompt for selecting one out of many options.
"""
def __init__(
self,
llm: LLM,
prompt: SingleSelectPrompt,
) -> None:
self._llm = llm
self._prompt: BasePromptTemplate = prompt
if self._prompt.output_parser is None:
raise ValueError("Prompt should have output parser.")
@classmethod
def from_defaults(
cls,
llm: Optional[LLM] = None,
prompt_template_str: Optional[str] = None,
output_parser: Optional[BaseOutputParser] = None,
) -> "LLMSingleSelector":
# optionally initialize defaults
llm = llm or Settings.llm
prompt_template_str = prompt_template_str or DEFAULT_SINGLE_SELECT_PROMPT_TMPL
output_parser = output_parser or SelectionOutputParser()
# construct prompt
prompt = SingleSelectPrompt(
template=prompt_template_str,
output_parser=output_parser,
prompt_type=PromptType.SINGLE_SELECT,
)
return cls(llm, prompt)
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {"prompt": self._prompt}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "prompt" in prompts:
self._prompt = prompts["prompt"]
def _select(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
# prepare input
choices_text = _build_choices_text(choices)
# predict
prediction = self._llm.predict(
prompt=self._prompt,
num_choices=len(choices),
context_list=choices_text,
query_str=query.query_str,
)
# parse output
assert self._prompt.output_parser is not None
parse = self._prompt.output_parser.parse(prediction)
return _structured_output_to_selector_result(parse)
async def _aselect(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
# prepare input
choices_text = _build_choices_text(choices)
# predict
prediction = await self._llm.apredict(
prompt=self._prompt,
num_choices=len(choices),
context_list=choices_text,
query_str=query.query_str,
)
# parse output
assert self._prompt.output_parser is not None
parse = self._prompt.output_parser.parse(prediction)
return _structured_output_to_selector_result(parse)
| LLMSingleSelector |
python | google__pytype | pytype/tests/test_fiddle_overlay.py | {
"start": 15941,
"end": 17973
} | class ____(test_base.BaseTest):
"""Tests for Config wrapping a function."""
def test_basic(self):
# Config values wrapping non-dataclasses are currently treated as Any
with self.DepTree([("fiddle.pyi", _FIDDLE_PYI)]):
self.Check("""
import fiddle
def Simple(x: int, y: str):
pass
a = fiddle.Config(Simple)
a.x = 1
a.y = 2
""")
def test_init_args(self):
with self.DepTree([("fiddle.pyi", _FIDDLE_PYI)]):
self.Check("""
import fiddle
def Simple(x: int, y: str):
pass
a = fiddle.Config(Simple, 1)
b = fiddle.Config(Simple, 1, 2) # no type checking yet
b = fiddle.Config(Simple, 1, 2, 3) # no arg checking yet
""")
def test_method(self):
# Treat methods the same way as functions.
with self.DepTree([("fiddle.pyi", _FIDDLE_PYI)]):
self.Check("""
import dataclasses
import fiddle
@dataclasses.dataclass
class DataClass:
x: int
@classmethod
def make(cls, x: int):
return cls(x)
a = fiddle.Config(DataClass.make, 1)
b = fiddle.Config(DataClass.make, "1") # no type checking yet
b = fiddle.Config(DataClass.make, 1, 2, 3) # no arg checking yet
""")
def test_matching(self):
# We should still recognise the Config class even if we currently treat it
# as Config[Any]
with self.DepTree([("fiddle.pyi", _FIDDLE_PYI)]):
self.Check("""
import fiddle
def Simple(x: int, y: str):
pass
def f() -> fiddle.Config[Simple]:
return fiddle.Config(Simple, 1)
""")
def test_no_crash_on_empty_args(self):
# pass
with self.DepTree([("fiddle.pyi", _FIDDLE_PYI)]):
self.Check("""
from typing import cast, Any
import fiddle
def foo(*args):
return
fiddle.Config(foo, *[1, 2], 3)
""")
if __name__ == "__main__":
test_base.main()
| TestFunctionConfig |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_group_notes.py | {
"start": 653,
"end": 3855
} | class ____(APITestCase):
def test_simple(self) -> None:
group = self.group
activity = Activity.objects.create(
group=group,
project=group.project,
type=ActivityType.NOTE.value,
user_id=self.user.id,
data={"text": "hello world"},
)
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/comments/"
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["id"] == str(activity.id)
def test_note_merge(self) -> None:
"""Test that when 2 (or more) issues with comments are merged, the chronological order of the comments are preserved."""
now = datetime.datetime.now(datetime.UTC)
project1 = self.create_project()
event1 = self.store_event(data={}, project_id=project1.id)
assert event1.group is not None
group1 = event1.group
note1 = Activity.objects.create(
group=group1,
project=project1,
type=ActivityType.NOTE.value,
user_id=self.user.id,
data={"text": "This looks bad :)"},
datetime=now - datetime.timedelta(days=70),
)
note2 = Activity.objects.create(
group=group1,
project=project1,
type=ActivityType.NOTE.value,
user_id=self.user.id,
data={"text": "Yeah we should probably look into this"},
datetime=now - datetime.timedelta(days=66),
)
project2 = self.create_project()
group2 = self.create_group(project2)
note3 = Activity.objects.create(
group=group2,
project=project2,
type=ActivityType.NOTE.value,
user_id=self.user.id,
data={"text": "I have been a good Sentry :)"},
datetime=now - datetime.timedelta(days=90),
)
note4 = Activity.objects.create(
group=group2,
project=project2,
type=ActivityType.NOTE.value,
user_id=self.user.id,
data={"text": "You have been a bad user :)"},
datetime=now - datetime.timedelta(days=88),
)
with self.tasks():
merge_groups([group1.id], group2.id)
assert not Group.objects.filter(id=group1.id).exists()
self.login_as(user=self.user)
url = f"/api/0/issues/{group2.id}/comments/"
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 4
assert response.data[0]["id"] == str(note2.id)
assert response.data[0]["data"]["text"] == note2.data["text"]
assert response.data[1]["id"] == str(note1.id)
assert response.data[1]["data"]["text"] == note1.data["text"]
assert response.data[2]["id"] == str(note4.id)
assert response.data[2]["data"]["text"] == note4.data["text"]
assert response.data[3]["id"] == str(note3.id)
assert response.data[3]["data"]["text"] == note3.data["text"]
| GroupNoteTest |
python | pytorch__pytorch | torch/_dynamo/variables/user_defined.py | {
"start": 91882,
"end": 91940
} | class ____(UserDefinedObjectVariable):
pass
| RandomVariable |
python | walkccc__LeetCode | solutions/1964. Find the Longest Valid Obstacle Course at Each Position/1964.py | {
"start": 0,
"end": 568
} | class ____:
# Similar to 300. Longest Increasing Subsequence
def longestObstacleCourseAtEachPosition(
self, obstacles: list[int],
) -> list[int]:
ans = []
# tails[i] := the minimum tail of all the increasing subsequences having
# length i + 1
tails = []
for obstacle in obstacles:
if not tails or obstacle >= tails[-1]:
tails.append(obstacle)
ans.append(len(tails))
else:
index = bisect.bisect_right(tails, obstacle)
tails[index] = obstacle
ans.append(index + 1)
return ans
| Solution |
python | getsentry__sentry | tests/sentry/middleware/integrations/parsers/test_jira.py | {
"start": 1025,
"end": 8406
} | class ____(TestCase):
factory = RequestFactory()
path_base = f"{IntegrationClassification.integration_prefix}jira"
def get_response(self, req: HttpRequest) -> HttpResponse:
return HttpResponse(status=200, content="passthrough")
def get_integration(self) -> Integration:
self.organization = self.create_organization(owner=self.user, region="us")
return self.create_integration(
organization=self.organization, external_id="jira:1", provider="jira"
)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_get_integration_from_request(self) -> None:
request = self.factory.post(path=f"{self.path_base}/issue-updated/")
parser = JiraRequestParser(request, self.get_response)
assert parser.get_integration_from_request() is None
integration = self.get_integration()
with patch(
"sentry.middleware.integrations.parsers.jira.parse_integration_from_request"
) as mock_parse:
mock_parse.return_value = integration
assert parser.get_integration_from_request() == integration
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_get_response_routing_to_control(self) -> None:
paths = [
"/ui-hook/",
"/descriptor/",
"/installed/",
"/uninstalled/",
"/search/org/123/",
"/configure/",
]
for path in paths:
request = self.factory.post(path=f"{self.path_base}{path}")
parser = JiraRequestParser(request, self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_200_OK
assert response.content == b"passthrough"
assert_no_webhook_payloads()
@responses.activate
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_get_response_routing_to_region_sync(self) -> None:
responses.add(
responses.POST,
region.to_url("/extensions/jira/issue/LR-123/"),
body="region response",
status=200,
)
request = self.factory.post(path=f"{self.path_base}/issue/LR-123/")
parser = JiraRequestParser(request, self.get_response)
with patch.object(parser, "get_integration_from_request") as method:
method.return_value = self.get_integration()
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_200_OK
assert response.content == b"region response"
assert_no_webhook_payloads()
@responses.activate
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_get_response_routing_to_region_sync_retry_errors(self) -> None:
responses.add(
responses.POST,
region.to_url("/extensions/jira/issue/LR-123/"),
body="region response",
status=503,
)
request = self.factory.post(path=f"{self.path_base}/issue/LR-123/")
parser = JiraRequestParser(request, self.get_response)
with patch.object(parser, "get_integration_from_request") as method:
method.return_value = self.get_integration()
response = parser.get_response()
# There are 5 retries.
assert len(responses.calls) == 6
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_200_OK
assert response.content == b"passthrough"
assert_no_webhook_payloads()
@responses.activate
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_get_response_routing_to_region_async(self) -> None:
request = self.factory.post(path=f"{self.path_base}/issue-updated/")
parser = JiraRequestParser(request, self.get_response)
integration = self.get_integration()
assert_no_webhook_payloads()
with patch.object(parser, "get_integration_from_request") as method:
method.return_value = integration
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_202_ACCEPTED
assert response.content == b""
assert len(responses.calls) == 0
assert_webhook_payloads_for_mailbox(
mailbox_name=f"jira:{integration.id}", region_names=[region.name], request=request
)
@responses.activate
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_get_response_missing_org_integration(self) -> None:
request = self.factory.post(path=f"{self.path_base}/issue-updated/")
parser = JiraRequestParser(request, self.get_response)
integration = self.create_provider_integration(
provider="jira",
external_id="blag",
)
assert_no_webhook_payloads()
with patch.object(parser, "get_integration_from_request") as method:
method.return_value = integration
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.content == b""
assert len(responses.calls) == 0
assert_no_webhook_payloads()
@override_regions(region_config)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@responses.activate
def test_get_response_invalid_path(self) -> None:
# Invalid path
request = self.factory.post(path="/new-route/for/no/reason/")
parser = JiraRequestParser(request, self.get_response)
with patch.object(parser, "get_integration_from_request") as method:
method.return_value = self.get_integration()
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_200_OK
assert response.content == b"passthrough"
assert len(responses.calls) == 0
assert_no_webhook_payloads()
@override_regions(region_config)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@responses.activate
def test_get_response_multiple_regions(self) -> None:
responses.add(
responses.POST,
eu_region.to_url("/extensions/jira/issue/LR-123/"),
body="region response",
status=200,
)
request = self.factory.post(path=f"{self.path_base}/issue/LR-123/")
parser = JiraRequestParser(request, self.get_response)
# Add a second organization. Jira only supports single regions.
other_org = self.create_organization(owner=self.user, region="eu")
integration = self.get_integration()
integration.add_organization(other_org.id)
with patch.object(parser, "get_integration_from_request") as method:
method.return_value = integration
# assert ValueError is raised if the integration is not valid
with pytest.raises(ValueError):
parser.get_response()
assert_no_webhook_payloads()
| JiraRequestParserTest |
python | django__django | tests/foreign_object/models/person.py | {
"start": 48,
"end": 197
} | class ____(models.Model):
# Table Column Fields
name = models.CharField(max_length=50)
def __str__(self):
return self.name
| Country |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE794.py | {
"start": 278,
"end": 407
} | class ____(BaseModel):
bar: str = StringField()
foo: bool = BooleanField()
# ...
bar = StringField() # PIE794
| User |
python | wandb__wandb | wandb/vendor/pygments/lexers/parsers.py | {
"start": 25866,
"end": 26222
} | class ____(DelegatingLexer):
"""
A lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
.. versionadded:: 1.6
"""
name = 'Treetop'
aliases = ['treetop']
filenames = ['*.treetop', '*.tt']
def __init__(self, **options):
super(TreetopLexer, self).__init__(RubyLexer, TreetopBaseLexer, **options)
| TreetopLexer |
python | ansible__ansible | test/lib/ansible_test/_internal/host_configs.py | {
"start": 6579,
"end": 7843
} | class ____(HostConfig, metaclass=abc.ABCMeta):
"""Base class for remote host configuration."""
name: t.Optional[str] = None
provider: t.Optional[str] = None
arch: t.Optional[str] = None
@property
def platform(self) -> str:
"""The name of the platform."""
return self.name.partition('/')[0]
@property
def version(self) -> str:
"""The version of the platform."""
return self.name.partition('/')[2]
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
assert isinstance(defaults, RemoteCompletionConfig)
super().apply_defaults(context, defaults)
if self.provider == 'default':
self.provider = None
self.provider = self.provider or defaults.provider or 'aws'
self.arch = self.arch or defaults.arch or Architecture.X86_64
@property
def is_managed(self) -> bool:
"""
True if this host is a managed instance, otherwise False.
Managed instances are used exclusively by ansible-test and can safely have destructive operations performed without explicit permission from the user.
"""
return True
@dataclasses.dataclass
| RemoteConfig |
python | huggingface__transformers | src/transformers/models/marian/modeling_marian.py | {
"start": 56225,
"end": 56810
} | class ____(MarianPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = MarianDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->Marian, facebook/bart-base->Helsinki-NLP/opus-mt-fr-en
| MarianDecoderWrapper |
python | pypa__hatch | tests/backend/builders/test_binary.py | {
"start": 8395,
"end": 25679
} | class ____:
def test_default(self, hatch, temp_dir, mocker):
subprocess_run = mocker.patch("subprocess.run", side_effect=cargo_install)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0"},
"tool": {
"hatch": {
"build": {"targets": {"binary": {"versions": ["bootstrap"]}}},
},
},
}
builder = BinaryBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
subprocess_run.assert_called_once_with(
["cargo", "install", "pyapp", "--force", "--root", mocker.ANY],
cwd=mocker.ANY,
env=ExpectedEnvVars({"PYAPP_PROJECT_NAME": "my-app", "PYAPP_PROJECT_VERSION": "0.1.0"}),
)
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert (build_path / "binary" / ("my-app-0.1.0.exe" if sys.platform == "win32" else "my-app-0.1.0")).is_file()
def test_default_build_target(self, hatch, temp_dir, mocker):
subprocess_run = mocker.patch("subprocess.run", side_effect=cargo_install)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0"},
"tool": {
"hatch": {
"build": {"targets": {"binary": {"versions": ["bootstrap"]}}},
},
},
}
builder = BinaryBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd({"CARGO_BUILD_TARGET": "target"}):
artifacts = list(builder.build())
subprocess_run.assert_called_once_with(
["cargo", "install", "pyapp", "--force", "--root", mocker.ANY],
cwd=mocker.ANY,
env=ExpectedEnvVars({"PYAPP_PROJECT_NAME": "my-app", "PYAPP_PROJECT_VERSION": "0.1.0"}),
)
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert (
build_path / "binary" / ("my-app-0.1.0-target.exe" if sys.platform == "win32" else "my-app-0.1.0-target")
).is_file()
def test_scripts(self, hatch, temp_dir, mocker):
subprocess_run = mocker.patch("subprocess.run", side_effect=cargo_install)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0", "scripts": {"foo": "bar.baz:cli"}},
"tool": {
"hatch": {
"build": {"targets": {"binary": {"versions": ["bootstrap"]}}},
},
},
}
builder = BinaryBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
subprocess_run.assert_called_once_with(
["cargo", "install", "pyapp", "--force", "--root", mocker.ANY],
cwd=mocker.ANY,
env=ExpectedEnvVars({
"PYAPP_PROJECT_NAME": "my-app",
"PYAPP_PROJECT_VERSION": "0.1.0",
"PYAPP_EXEC_SPEC": "bar.baz:cli",
}),
)
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert (build_path / "binary" / ("foo-0.1.0.exe" if sys.platform == "win32" else "foo-0.1.0")).is_file()
def test_scripts_build_target(self, hatch, temp_dir, mocker):
subprocess_run = mocker.patch("subprocess.run", side_effect=cargo_install)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0", "scripts": {"foo": "bar.baz:cli"}},
"tool": {
"hatch": {
"build": {"targets": {"binary": {"versions": ["bootstrap"]}}},
},
},
}
builder = BinaryBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd({"CARGO_BUILD_TARGET": "target"}):
artifacts = list(builder.build())
subprocess_run.assert_called_once_with(
["cargo", "install", "pyapp", "--force", "--root", mocker.ANY],
cwd=mocker.ANY,
env=ExpectedEnvVars({
"PYAPP_PROJECT_NAME": "my-app",
"PYAPP_PROJECT_VERSION": "0.1.0",
"PYAPP_EXEC_SPEC": "bar.baz:cli",
}),
)
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert (
build_path / "binary" / ("foo-0.1.0-target.exe" if sys.platform == "win32" else "foo-0.1.0-target")
).is_file()
def test_custom_cargo(self, hatch, temp_dir, mocker):
subprocess_run = mocker.patch("subprocess.run", side_effect=cargo_install)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0"},
"tool": {
"hatch": {
"build": {"targets": {"binary": {"versions": ["bootstrap"]}}},
},
},
}
builder = BinaryBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd({"CARGO": "cross"}):
artifacts = list(builder.build())
subprocess_run.assert_called_once_with(
["cross", "install", "pyapp", "--force", "--root", mocker.ANY],
cwd=mocker.ANY,
env=ExpectedEnvVars({"PYAPP_PROJECT_NAME": "my-app", "PYAPP_PROJECT_VERSION": "0.1.0"}),
)
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert (build_path / "binary" / ("my-app-0.1.0.exe" if sys.platform == "win32" else "my-app-0.1.0")).is_file()
def test_no_cargo(self, hatch, temp_dir, mocker):
mocker.patch("shutil.which", return_value=None)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0"},
"tool": {
"hatch": {
"build": {"targets": {"binary": {"versions": ["bootstrap"]}}},
},
},
}
builder = BinaryBuilder(str(project_path), config=config)
with pytest.raises(OSError, match="Executable `cargo` could not be found on PATH"), project_path.as_cwd():
next(builder.build())
def test_python_version(self, hatch, temp_dir, mocker):
subprocess_run = mocker.patch("subprocess.run", side_effect=cargo_install)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0"},
"tool": {
"hatch": {
"build": {"targets": {"binary": {"versions": ["bootstrap"], "python-version": "4.0"}}},
},
},
}
builder = BinaryBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
subprocess_run.assert_called_once_with(
["cargo", "install", "pyapp", "--force", "--root", mocker.ANY],
cwd=mocker.ANY,
env=ExpectedEnvVars({
"PYAPP_PROJECT_NAME": "my-app",
"PYAPP_PROJECT_VERSION": "0.1.0",
"PYAPP_PYTHON_VERSION": "4.0",
}),
)
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert (build_path / "binary" / ("my-app-0.1.0.exe" if sys.platform == "win32" else "my-app-0.1.0")).is_file()
def test_pyapp_version(self, hatch, temp_dir, mocker):
subprocess_run = mocker.patch("subprocess.run", side_effect=cargo_install)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0"},
"tool": {
"hatch": {
"build": {"targets": {"binary": {"versions": ["bootstrap"], "pyapp-version": "9000"}}},
},
},
}
builder = BinaryBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
subprocess_run.assert_called_once_with(
["cargo", "install", "pyapp", "--force", "--root", mocker.ANY, "--version", "9000"],
cwd=mocker.ANY,
env=ExpectedEnvVars({"PYAPP_PROJECT_NAME": "my-app", "PYAPP_PROJECT_VERSION": "0.1.0"}),
)
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert (build_path / "binary" / ("my-app-0.1.0.exe" if sys.platform == "win32" else "my-app-0.1.0")).is_file()
def test_verbosity(self, hatch, temp_dir, mocker):
subprocess_run = mocker.patch("subprocess.run", side_effect=cargo_install)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0"},
"tool": {
"hatch": {
"build": {"targets": {"binary": {"versions": ["bootstrap"]}}},
},
},
}
builder = BinaryBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd({"HATCH_QUIET": "1"}):
artifacts = list(builder.build())
subprocess_run.assert_called_once_with(
["cargo", "install", "pyapp", "--force", "--root", mocker.ANY],
cwd=mocker.ANY,
env=ExpectedEnvVars({"PYAPP_PROJECT_NAME": "my-app", "PYAPP_PROJECT_VERSION": "0.1.0"}),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert (build_path / "binary" / ("my-app-0.1.0.exe" if sys.platform == "win32" else "my-app-0.1.0")).is_file()
def test_local_build_with_build_target(self, hatch, temp_dir, mocker):
subprocess_run = mocker.patch("subprocess.run", side_effect=cargo_install)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0"},
"tool": {
"hatch": {
"build": {"targets": {"binary": {"versions": ["bootstrap"]}}},
},
},
}
builder = BinaryBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd({"PYAPP_REPO": "test-path", "CARGO_BUILD_TARGET": "target"}):
artifacts = list(builder.build())
subprocess_run.assert_called_once_with(
["cargo", "build", "--release", "--target-dir", mocker.ANY],
cwd="test-path",
env=ExpectedEnvVars({"PYAPP_PROJECT_NAME": "my-app", "PYAPP_PROJECT_VERSION": "0.1.0"}),
)
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert (
build_path / "binary" / ("my-app-0.1.0-target.exe" if sys.platform == "win32" else "my-app-0.1.0-target")
).is_file()
def test_local_build_no_build_target(self, hatch, temp_dir, mocker):
subprocess_run = mocker.patch("subprocess.run", side_effect=cargo_install)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0"},
"tool": {
"hatch": {
"build": {"targets": {"binary": {"versions": ["bootstrap"]}}},
},
},
}
builder = BinaryBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd({"PYAPP_REPO": "test-path"}):
artifacts = list(builder.build())
subprocess_run.assert_called_once_with(
["cargo", "build", "--release", "--target-dir", mocker.ANY],
cwd="test-path",
env=ExpectedEnvVars({"PYAPP_PROJECT_NAME": "my-app", "PYAPP_PROJECT_VERSION": "0.1.0"}),
)
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert (build_path / "binary" / ("my-app-0.1.0.exe" if sys.platform == "win32" else "my-app-0.1.0")).is_file()
def test_legacy(self, hatch, temp_dir, mocker):
subprocess_run = mocker.patch("subprocess.run", side_effect=cargo_install)
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "version": "0.1.0"},
"tool": {
"hatch": {
"build": {"targets": {"app": {"versions": ["bootstrap"]}}},
},
},
}
builder = AppBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
subprocess_run.assert_called_once_with(
["cargo", "install", "pyapp", "--force", "--root", mocker.ANY],
cwd=mocker.ANY,
env=ExpectedEnvVars({"PYAPP_PROJECT_NAME": "my-app", "PYAPP_PROJECT_VERSION": "0.1.0"}),
)
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert (build_path / "app" / ("my-app-0.1.0.exe" if sys.platform == "win32" else "my-app-0.1.0")).is_file()
| TestBuildBootstrap |
python | PrefectHQ__prefect | src/integrations/prefect-azure/tests/experimental/bundles/test_upload.py | {
"start": 1028,
"end": 5696
} | class ____:
"""Tests for the upload_bundle_to_azure_blob_storage function."""
async def test_upload_bundle_with_credentials_block(
self, tmp_bundle_file: Path, mock_blob_storage_credentials: MagicMock
) -> None:
"""Test uploading a bundle using a credentials block."""
container = "test-container"
key = "test-key"
credentials_block_name = "test-credentials"
# Mock the container client's upload_blob method
mock_container_client = (
mock_blob_storage_credentials.return_value.get_container_client.return_value
)
mock_container_client.upload_blob = AsyncMock()
# Call the function
result = await upload_bundle_to_azure_blob_storage(
local_filepath=tmp_bundle_file,
container=container,
key=key,
azure_blob_storage_credentials_block_name=credentials_block_name,
)
# Verify the result
assert result == {"container": container, "key": key}
# Verify the credentials were loaded from the block
mock_blob_storage_credentials.load.assert_called_once_with(
credentials_block_name,
_sync=False,
)
# Verify the container client was created with the correct container
mock_blob_storage_credentials.return_value.get_container_client.assert_called_once_with(
container=container
)
# Verify the blob was uploaded
mock_container_client.upload_blob.assert_called_once()
async def test_upload_bundle_with_nonexistent_file(self, tmp_path: Path) -> None:
"""Test uploading a bundle with a nonexistent file."""
nonexistent_file = tmp_path / "nonexistent.zip"
container = "test-container"
key = "test-key"
credentials_block_name = "test-credentials"
# Call the function and expect a ValueError
with pytest.raises(
ValueError, match=f"Bundle file not found: {nonexistent_file}"
):
await upload_bundle_to_azure_blob_storage(
local_filepath=nonexistent_file,
container=container,
key=key,
azure_blob_storage_credentials_block_name=credentials_block_name,
)
async def test_upload_bundle_with_upload_error(
self, tmp_bundle_file: Path, mock_blob_storage_credentials: MagicMock
) -> None:
"""Test uploading a bundle when the upload fails."""
container = "test-container"
key = "test-key"
credentials_block_name = "test-credentials"
# Mock the container client's upload_blob method to raise an exception
mock_container_client = (
mock_blob_storage_credentials.return_value.get_container_client.return_value
)
mock_container_client.upload_blob.side_effect = ResourceExistsError(
"Blob already exists"
)
# Call the function and expect a RuntimeError
with pytest.raises(
RuntimeError, match="Failed to upload bundle to Azure Blob Storage"
):
await upload_bundle_to_azure_blob_storage(
local_filepath=tmp_bundle_file,
container=container,
key=key,
azure_blob_storage_credentials_block_name=credentials_block_name,
)
async def test_upload_bundle_with_empty_key(
self, tmp_bundle_file: Path, mock_blob_storage_credentials: MagicMock
) -> None:
"""Test uploading a bundle with an empty key (should use the filename)."""
container = "test-container"
key = "" # Empty key
credentials_block_name = "test-credentials"
# Mock the container client's upload_blob method
mock_container_client = (
mock_blob_storage_credentials.return_value.get_container_client.return_value
)
mock_container_client.upload_blob = AsyncMock()
# Call the function
result = await upload_bundle_to_azure_blob_storage(
local_filepath=tmp_bundle_file,
container=container,
key=key,
azure_blob_storage_credentials_block_name=credentials_block_name,
)
# Verify the result uses the filename as the key
assert result == {"container": container, "key": tmp_bundle_file.name}
# Verify the blob was uploaded with the filename as the key
mock_container_client.upload_blob.assert_called_once()
call_args = mock_container_client.upload_blob.call_args[0]
assert call_args[0] == tmp_bundle_file.name
| TestUploadBundleToAzureBlobStorage |
python | pytorch__pytorch | test/nn/test_dropout.py | {
"start": 627,
"end": 3491
} | class ____(NNTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
def _test_alpha_dropout(self, cls, input):
mean = input.mean()
std = input.std()
for p in [0.2, 0.5, 0.8]:
module = cls(p)
input_var = input.detach().clone().requires_grad_()
output = module(input_var)
# output mean should be close to input mean
self.assertLess(abs(output.data.mean() - mean), 0.1)
# output std should be close to input std
self.assertLess(abs(output.data.std() - std), 0.1)
output.backward(input)
def test_AlphaDropout(self):
# generate random tensor with zero mean and unit std
input = torch.randn(5000)
self._test_alpha_dropout(nn.AlphaDropout, input)
def test_FeatureAlphaDropout(self):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
d = random.randint(1, 2)
num_features = 1000
input = torch.randn(num_features, b, d, w, h)
self._test_alpha_dropout(nn.FeatureAlphaDropout, input)
# no batch dims
input = torch.randn(50, 20, 64, 64)
self._test_alpha_dropout(nn.FeatureAlphaDropout, input)
@unittest.skipIf(
not (TEST_CUDA or TEST_PRIVATEUSE1), "CUDA and PRIVATEUSE1 unavailable"
)
def test_native_dropout_corner_case(self):
if TEST_CUDA:
device = "cuda"
elif TEST_PRIVATEUSE1:
device = torch._C._get_privateuse1_backend_name()
for train in [True, False]:
for p in [0.0, 1.0]:
for current_device in [device, "cpu"]:
x = torch.randn(5).to(device=current_device).requires_grad_()
x_ref = x.detach().requires_grad_()
o = torch.native_dropout(x, p, train)[0]
o_ref = torch.dropout(x_ref, p, train)
o.sum().backward()
o_ref.sum().backward()
assert o.equal(o_ref)
assert x.grad.equal(x_ref.grad)
def test_invalid_dropout_p(self):
v = torch.ones(1)
self.assertRaises(ValueError, lambda: nn.Dropout(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout1d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout1d(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(1.1))
self.assertRaises(ValueError, lambda: F.dropout(v, -0.1))
self.assertRaises(ValueError, lambda: F.dropout(v, 1.1))
| TestDropoutNN |
python | huggingface__transformers | src/transformers/models/siglip2/configuration_siglip2.py | {
"start": 9776,
"end": 12737
} | class ____(PreTrainedConfig):
r"""
[`Siglip2Config`] is the configuration class to store the configuration of a [`Siglip2Model`]. It is used to
instantiate a Siglip2 model according to the specified arguments, defining the text model and vision model configs.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Siglip2
[google/siglip2-base-patch16-224](https://huggingface.co/google/siglip2-base-patch16-224) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`Siglip2TextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`Siglip2VisionConfig`].
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import Siglip2Config, Siglip2Model
>>> # Initializing a Siglip2Config with google/siglip2-base-patch16-224 style configuration
>>> configuration = Siglip2Config()
>>> # Initializing a Siglip2Model (with random weights) from the google/siglip2-base-patch16-224 style configuration
>>> model = Siglip2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a Siglip2Config from a Siglip2TextConfig and a Siglip2VisionConfig
>>> from transformers import Siglip2TextConfig, Siglip2VisionConfig
>>> # Initializing a Siglip2Text and Siglip2Vision configuration
>>> config_text = Siglip2TextConfig()
>>> config_vision = Siglip2VisionConfig()
>>> config = Siglip2Config(text_config=config_text, vision_config=config_vision)
```"""
model_type = "siglip2"
sub_configs = {"text_config": Siglip2TextConfig, "vision_config": Siglip2VisionConfig}
def __init__(self, text_config=None, vision_config=None, **kwargs):
if text_config is None:
text_config = Siglip2TextConfig()
logger.info("`text_config` is `None`. Initializing the `Siglip2TextConfig` with default values.")
elif isinstance(text_config, dict):
text_config = Siglip2TextConfig(**text_config)
if vision_config is None:
vision_config = Siglip2VisionConfig()
logger.info("`vision_config` is `None`. initializing the `Siglip2VisionConfig` with default values.")
elif isinstance(vision_config, dict):
vision_config = Siglip2VisionConfig(**vision_config)
self.text_config = text_config
self.vision_config = vision_config
self.initializer_factor = 1.0
super().__init__(**kwargs)
__all__ = ["Siglip2Config", "Siglip2TextConfig", "Siglip2VisionConfig"]
| Siglip2Config |
python | pypa__hatch | docs/.hooks/render_default_test_env.py | {
"start": 1435,
"end": 1788
} | class ____(Preprocessor):
def run(self, lines): # noqa: PLR6301
return (
"\n".join(lines)
.replace(MARKER_DEPENDENCIES, get_dependencies_toml())
.replace(MARKER_MATRIX, get_matrix_toml())
.replace(MARKER_SCRIPTS, get_scripts_toml())
.splitlines()
)
| TestEnvDefaultsPreprocessor |
python | jazzband__django-simple-history | simple_history/template_utils.py | {
"start": 855,
"end": 6325
} | class ____:
"""
Class containing various utilities for formatting the template context for
a historical record.
"""
DEFAULT_MAX_DISPLAYED_DELTA_CHANGE_CHARS: Final = 100
def __init__(
self,
model: type[Model],
historical_record: HistoricalChanges,
*,
max_displayed_delta_change_chars=DEFAULT_MAX_DISPLAYED_DELTA_CHANGE_CHARS,
):
self.model = model
self.record = historical_record
self.max_displayed_delta_change_chars = max_displayed_delta_change_chars
def context_for_delta_changes(self, delta: ModelDelta) -> list[dict[str, Any]]:
"""
Return the template context for ``delta.changes``.
By default, this is a list of dicts with the keys ``"field"``,
``"old"`` and ``"new"`` -- corresponding to the fields of ``ModelChange``.
:param delta: The result from calling ``diff_against()`` with another historical
record. Its ``old_record`` or ``new_record`` field should have been
assigned to ``self.record``.
"""
context_list = []
for change in delta.changes:
formatted_change = self.format_delta_change(change)
context_list.append(
{
"field": formatted_change.field,
"old": formatted_change.old,
"new": formatted_change.new,
}
)
return context_list
def format_delta_change(self, change: ModelChange) -> ModelChange:
"""
Return a ``ModelChange`` object with fields formatted for being used as
template context.
"""
old = self.prepare_delta_change_value(change, change.old)
new = self.prepare_delta_change_value(change, change.new)
old, new = self.stringify_delta_change_values(change, old, new)
field_meta = self.model._meta.get_field(change.field)
return dataclasses.replace(
change,
field=capfirst(field_meta.verbose_name),
old=old,
new=new,
)
def prepare_delta_change_value(
self,
change: ModelChange,
value: ModelChangeValue,
) -> Any:
"""
Return the prepared value for the ``old`` and ``new`` fields of ``change``,
before it's passed through ``stringify_delta_change_values()`` (in
``format_delta_change()``).
For example, if ``value`` is a list of M2M related objects, it could be
"prepared" by replacing the related objects with custom string representations.
:param change:
:param value: Either ``change.old`` or ``change.new``.
"""
field_meta = self.model._meta.get_field(change.field)
if isinstance(field_meta, ManyToManyField):
reverse_field_name = get_m2m_reverse_field_name(field_meta)
# Display a list of only the instances of the M2M field's related model
display_value = [
obj_values_dict[reverse_field_name] for obj_values_dict in value
]
else:
display_value = value
return display_value
def stringify_delta_change_values(
self, change: ModelChange, old: Any, new: Any
) -> tuple[SafeString, SafeString]:
"""
Called by ``format_delta_change()`` after ``old`` and ``new`` have been
prepared by ``prepare_delta_change_value()``.
Return a tuple -- ``(old, new)`` -- where each element has been
escaped/sanitized and turned into strings, ready to be displayed in a template.
These can be HTML strings (remember to pass them through ``mark_safe()`` *after*
escaping).
If ``old`` or ``new`` are instances of ``list``, the default implementation will
use each list element's ``__str__()`` method, and also reapply ``mark_safe()``
if all the passed elements are safe strings.
"""
def stringify_value(value: Any) -> Union[str, SafeString]:
# If `value` is a list, stringify each element using `str()` instead of
# `repr()` (the latter is the default when calling `list.__str__()`)
if isinstance(value, list):
string = f"[{', '.join(map(conditional_str, value))}]"
# If all elements are safe strings, reapply `mark_safe()`
if all(map(is_safe_str, value)):
string = mark_safe(string) # nosec
else:
string = conditional_str(value)
return string
old_str, new_str = stringify_value(old), stringify_value(new)
diff_display = self.get_obj_diff_display()
old_short, new_short = diff_display.common_shorten_repr(old_str, new_str)
# Escape *after* shortening, as any shortened, previously safe HTML strings have
# likely been mangled. Other strings that have not been shortened, should have
# their "safeness" unchanged.
return conditional_escape(old_short), conditional_escape(new_short)
def get_obj_diff_display(self) -> "ObjDiffDisplay":
"""
Return an instance of ``ObjDiffDisplay`` that will be used in
``stringify_delta_change_values()`` to display the difference between
the old and new values of a ``ModelChange``.
"""
return ObjDiffDisplay(max_length=self.max_displayed_delta_change_chars)
| HistoricalRecordContextHelper |
python | huggingface__transformers | tests/models/mgp_str/test_modeling_mgp_str.py | {
"start": 4108,
"end": 7915
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (MgpstrForSceneTextRecognition,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": MgpstrForSceneTextRecognition, "image-feature-extraction": MgpstrModel}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_attention_outputs = False
def setUp(self):
self.model_tester = MgpstrModelTester(self)
self.config_tester = ConfigTester(self, config_class=MgpstrConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_batching_equivalence(self, atol=1e-4, rtol=1e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
@unittest.skip(reason="MgpstrModel does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
@unittest.skip(reason="MgpstrModel does not support feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_gradient_checkpointing_backward_compatibility(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if not model_class.supports_gradient_checkpointing:
continue
config.gradient_checkpointing = True
model = model_class(config)
self.assertTrue(model.is_gradient_checkpointing)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.patch_embeds_hidden_size, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
# We will verify our results on an image from the IIIT-5k dataset
def prepare_img():
url = "https://i.postimg.cc/ZKwLg2Gw/367-14.png"
im = Image.open(requests.get(url, stream=True).raw).convert("RGB")
return im
@require_vision
@require_torch
| MgpstrModelTest |
python | pytorch__pytorch | torch/distributed/_tools/mem_tracker.py | {
"start": 4639,
"end": 11633
} | class ____:
"""
Manages memory statistics and device attributes for tensor storages.
"""
def __init__(
self, size: int, element_size: int, device: torch.device, reftype: _RefType
) -> None:
"""
Initializes the ``_WeakRefInfo`` object with tensor storage properties.
Args:
size (int): The number of elements in the tensor storage.
element_size (int): The size of each element in the tensor storage.
device (torch.device): The device on which the tensor is allocated.
reftype (_RefType): The reference type of the tensor.
"""
self.size = size
self.element_size = element_size
self.reftype = reftype
# pyrefly: ignore [read-only]
self.device = device
self.mem_consumed = self._calculate_mem_consumed()
def _calculate_mem_consumed(self) -> int:
"""
Calculates the memory consumed by the tensor storage, considering device-specific allocation rules.
Returns:
int: The memory consumed in bytes.
"""
mem = self.size * self.element_size
if self.device.type == "cuda":
return math.ceil((mem) / _PYTORCH_MIN_ALLOCATE) * _PYTORCH_MIN_ALLOCATE
return mem
def update_mem_consumed(self, st: torch.UntypedStorage) -> int:
"""
Updates and returns the memory consumed if the storage size has changed.
Args:
st (torch.UntypedStorage): The tensor storage to check for size updates.
Returns:
int: The updated memory consumed in bytes.
"""
if st.size() != self.size:
self.size = st.size()
self.mem_consumed = self._calculate_mem_consumed()
return self.mem_consumed
@classmethod
def create_winfo(
cls,
st: torch.UntypedStorage,
device: torch.device,
reftype: _RefType,
callback: Optional[Callable[[Self, weakref.ref], Any]] = None,
) -> tuple[Self, weakref.ref]:
"""
Creates a new ``_WeakRefInfo`` instance and a weak reference to a ``torch.UntypedStorage`` object,
optionally attaching a callback to the weak reference.
Args:
st (torch.UntypedStorage): The storage object for which to create the weak reference info.
device (torch.device): The device associated with the storage object.
reftype (_RefType): The type of reference, used to categorize the storage.
callback (Optional[Callable[[Self, weakref.ref]]]): A callback function that is called when
the storage object is about to be finalized (garbage collected). The callback function
should accept two arguments: the ``_WeakRefInfo`` instance and the weak reference to the storage.
Returns:
Tuple[Self, weakref.ref]: A tuple containing the newly created ``_WeakRefInfo`` instance and the
weak reference to the storage object. The weak reference may have an attached callback if provided.
"""
winfo = cls(st.size(), st.element_size(), device, reftype)
w_st = weakref.ref(st, partial(callback, winfo) if callback else None)
return winfo, w_st
def _get_mem_divisor(units: str) -> int:
unit_dict = {"B": 1, "KiB": 2**10, "MiB": 2**20, "GiB": 2**30}
if units in unit_dict:
return unit_dict[units]
else:
raise ValueError(
f"Unsupported unit: {units}. Supported units are: {', '.join(unit_dict.keys())}"
)
def _rounding_fn(value: int, divisor: int, precision: int) -> Union[float, int]:
return value if divisor == 1 else round(value / divisor, precision)
def _print_snapshot(snapshot: dict[torch.device, dict[str, int]], units: str) -> None:
if len(snapshot) == 0:
print("No memory tracked.")
return
divisor = _get_mem_divisor(units)
for dev, dev_snap in snapshot.items():
if _rounding_fn(dev_snap[_TOTAL_KEY], divisor, 2) <= 0:
continue
print(
f"Device: {dev}",
*(
f"\t{k.value}: {_rounding_fn(v, divisor, 2)} {units}"
if isinstance(k, _RefType)
else f"\t{k}: {_rounding_fn(v, divisor, 2)} {units}"
for k, v in dev_snap.items()
),
sep="\n",
)
def _print_snapshot_tabular(
snapshot: dict[torch.device, dict[str, int]], units: str
) -> None:
if len(snapshot) == 0:
print("No memory tracked.")
return
try:
from tabulate import tabulate
except ImportError as err:
raise ImportError(
"Please install tabulate to use the tabulate option."
) from err
divisor = _get_mem_divisor(units)
table_data = []
key_list = list(next(iter(snapshot.values())).keys())
headers = ["Device"] + [
f"{key.value}" if isinstance(key, _RefType) else f"{key}" for key in key_list
]
for dev, dev_snap in snapshot.items():
if _rounding_fn(dev_snap[_TOTAL_KEY], divisor, 2) <= 0:
continue
row = [str(dev)]
row.extend(f"{_rounding_fn(v, divisor, 2)} {units}" for v in dev_snap.values())
table_data.append(row)
print(tabulate(table_data, headers=headers, tablefmt="rst"))
def _print_state_snapshots(
snapshots: dict[_State, list[dict[torch.device, dict[str, int]]]], units: str
) -> None:
for state, snapshot_list in snapshots.items():
print(f"{state.value}")
for i, snapshot in enumerate(snapshot_list):
print(f"# {i + 1}:")
_print_snapshot(snapshot, units)
print()
def _print_state_snapshots_tabular(
snapshots: dict[_State, list[dict[torch.device, dict[str, int]]]], units: str
) -> None:
try:
from tabulate import tabulate
except ImportError as err:
raise ImportError(
"Please install tabulate to use the tabulate option."
) from err
table_data = []
last_state_call = None
divisor = _get_mem_divisor(units)
for state, snapshot_list in snapshots.items():
for i, snapshot in enumerate(snapshot_list):
state_call = f"{state.value} # {i + 1}"
for dev, dev_snap in snapshot.items():
if _rounding_fn(dev_snap[_TOTAL_KEY], divisor, 2) <= 0:
continue
row = {
"State & Call": (
state_call if state_call != last_state_call else ""
),
"Device": str(dev),
}
last_state_call = state_call
for k, v in dev_snap.items():
row[f"{k.value}" if isinstance(k, _RefType) else f"{k}"] = (
f"{_rounding_fn(v, divisor, 2)} {units}"
)
table_data.append(row)
print(tabulate(table_data, headers="keys", tablefmt="rst"))
| _WeakRefInfo |
python | Textualize__textual | docs/examples/guide/content/renderables.py | {
"start": 538,
"end": 883
} | class ____(App):
"""App to demonstrate Rich renderables in Textual."""
def compose(self) -> ComposeResult:
with open(__file__) as self_file:
code = self_file.read()
code_view = CodeView()
code_view.code = code
yield code_view
if __name__ == "__main__":
app = CodeApp()
app.run()
| CodeApp |
python | pytorch__pytorch | torch/masked/maskedtensor/_ops_refs.py | {
"start": 1803,
"end": 2739
} | class ____(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, input):
if not is_masked_tensor(input):
raise ValueError("MaskedToDense forward: input must be a MaskedTensor.")
if input.layout == torch.strided:
return input
ctx.layout = input.layout
data = input.get_data()
mask = input.get_mask()
return MaskedTensor(data.to_dense(), mask.to_dense())
@staticmethod
# pyrefly: ignore [bad-override]
def backward(ctx, grad_output):
layout = ctx.layout
if layout == torch.sparse_coo:
return grad_output.to_sparse_coo()
elif layout == torch.sparse_csr:
return grad_output.to_sparse_csr()
elif layout == torch.strided:
return grad_output.to_dense()
raise ValueError("to_dense: Unsupported input layout: ", layout)
| _MaskedToDense |
python | doocs__leetcode | lcp/LCP 10. 二叉树任务调度/Solution.py | {
"start": 0,
"end": 356
} | class ____:
def minimalExecTime(self, root: TreeNode) -> float:
def dfs(root: TreeNode) -> Tuple[int, int]:
if not root:
return 0, 0
s1, t1 = dfs(root.left)
s2, t2 = dfs(root.right)
return s1 + s2 + root.val, max(t1, t2, (s1 + s2) / 2) + root.val
return dfs(root)[1]
| Solution |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vision.py | {
"start": 7911,
"end": 8671
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.get_product.return_value = {}
op = CloudVisionGetProductOperator(location=LOCATION_TEST, product_id=PRODUCT_ID_TEST, task_id="id")
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_product.assert_called_once_with(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudVisionProductGet |
python | getsentry__sentry | src/sentry/integrations/jira_server/actions/create_ticket.py | {
"start": 371,
"end": 1559
} | class ____(TicketEventAction):
id = "sentry.integrations.jira_server.notify_action.JiraServerCreateTicketAction"
label = "Create a Jira Server issue in {integration} with these "
ticket_type = "a Jira Server issue"
link = "https://docs.sentry.io/product/integrations/issue-tracking/jira/#issue-sync"
provider = IntegrationProviderSlug.JIRA_SERVER.value
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
fix_versions = self.data.get("fixVersions")
if fix_versions and not isinstance(fix_versions, list):
self.data["fixVersions"] = [fix_versions]
def generate_footer(self, rule_url: str) -> str:
return "This ticket was automatically created by Sentry via [{}|{}]".format(
self.rule.label,
absolute_uri(rule_url),
)
def translate_integration(self, integration: RpcIntegration) -> str:
return integration.metadata.get("domain_name", integration.name)
def get_form_instance(self) -> JiraServerNotifyServiceForm:
return JiraServerNotifyServiceForm(self.data, integrations=self.get_integrations())
| JiraServerCreateTicketAction |
python | huggingface__transformers | src/transformers/models/patchtst/modeling_patchtst.py | {
"start": 32578,
"end": 34445
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of
the model at the output of each layer plus the optional initial embedding outputs.
mask (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`, *optional*):
Bool masked tensor indicating which patches are masked
loc (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*):
Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length
scale (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*):
Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length
patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`):
Patched input to the Transformer
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
mask: Optional[torch.FloatTensor] = None
loc: Optional[torch.FloatTensor] = None
scale: Optional[torch.FloatTensor] = None
patch_input: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`PatchTSTForPretraining`].
"""
)
| PatchTSTModelOutput |
python | celery__celery | celery/exceptions.py | {
"start": 6540,
"end": 6603
} | class ____(CeleryError):
"""Task related errors."""
| TaskError |
python | kamyu104__LeetCode-Solutions | Python/delete-nodes-and-return-forest.py | {
"start": 233,
"end": 1034
} | class ____(object):
def delNodes(self, root, to_delete):
"""
:type root: TreeNode
:type to_delete: List[int]
:rtype: List[TreeNode]
"""
def delNodesHelper(to_delete_set, root, is_root, result):
if not root:
return None
is_deleted = root.val in to_delete_set
if is_root and not is_deleted:
result.append(root)
root.left = delNodesHelper(to_delete_set, root.left, is_deleted, result)
root.right = delNodesHelper(to_delete_set, root.right, is_deleted, result)
return None if is_deleted else root
result = []
to_delete_set = set(to_delete)
delNodesHelper(to_delete_set, root, True, result)
return result
| Solution |
python | pytorch__pytorch | test/test_optim.py | {
"start": 1880,
"end": 101851
} | class ____(TestCase):
"""
This test class validates the core optimizers and is structured as the correctness of:
- The update algorithms (forloop implementation)
* Every optimizer's algorithm is most readably implemented through a big for-loop
over all the parameters, which is what we refer to as the forloop or single tensor
implementation. These algorithms are manually validated by comparing to the paper
and systematically validated by assuring that the loss goes the right direction
when the optimizer has been applied.
* This implementation should compose with optimizer hyperparameters well, such as
supporting Tensor LRs, the capturable API, and sparse and complex parameters.
- Each varying implementation
* We then have implementations that improve upon the performance of the forloop
implementation by leveraging fusion, namely our foreach (mult_tensor) and fused
implementations.
* These variations are validated numerically by comparing with the forloop version
of the optimizer. In fact, we test most variations this way--we see the forloop
implementation as the ground truth and expect that improvements to it in any way
should be just as correct.
* Both params and optimizer states should be validated numerically.
- state_dict APIs
* The optimizer instance should be serializable
* Calling save and load should be deterministic
* Moving between devices should be seamless
* BC - load_state_dict should be able to handle older optimizer states
- Hook APIs (everything should fire in the right order)
- LR Scheduler integration (composing should not error + should go the right direction)
- Parameter groups (should be equivalent to having multiple optimizers)
- Erroring (what should error should error)
We also cover different ways of generating parameters and grads:
- With parameters, we either generate them randomly given specific shapes or we take
them from a sample NN module.
* Variety is important here because NN modules have type Parameter and randomly
generated tensors have type Tensor.
* Parameters can be sparse for a subset of the optimizers (check out OptimizerInfo)
* Complex parameters should be handled using view_as_real
* Parameters can be spread across different devices and different dtypes for any
given optimizer
* Parameters can be contiguous and noncontiguous
- With grads, we follow suit from the parameters.
* Grads can also be None, empty, or zero-valued, and this should not disrupt training.
"""
@onlyCPU
@optims(optim_db)
def test_optim_infos_do_not_specify_global_cliquey_kwargs(
self, device, dtype, optim_info
):
global_cliquey_flags = ["foreach", "fused", "differentiable"]
for optim_input in optim_info.optim_inputs_func(device=device):
self.assertFalse(
any(f for f in global_cliquey_flags if f in optim_input.kwargs)
)
@optims([optim for optim in optim_db if optim.optim_error_inputs_func is not None])
def test_errors(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
error_inputs = optim_info.optim_error_inputs_func(device=device, dtype=dtype)
for error_input in error_inputs:
optim_input = error_input.optimizer_error_input
params, kwargs = optim_input.params, optim_input.kwargs
if error_input.error_on == OptimizerErrorEnum.CONSTRUCTION_ERROR:
if issubclass(error_input.error_type, Warning):
with self.assertWarnsRegex(
error_input.error_type, error_input.error_regex
):
optim_cls(params, **kwargs)
else:
with self.assertRaisesRegex(
error_input.error_type, error_input.error_regex
):
optim_cls(params, **kwargs)
elif error_input.error_on == OptimizerErrorEnum.STEP_ERROR:
optim = optim_cls(params, **kwargs)
if issubclass(error_input.error_type, Warning):
with self.assertWarnsRegex(
error_input.error_type, error_input.error_regex
):
optim.step()
else:
with self.assertRaisesRegex(
error_input.error_type, error_input.error_regex
):
optim.step()
else:
raise NotImplementedError(f"Unknown error type {error_input.error_on}")
@parametrize("contiguous", [True, False])
@parametrize("with_lrsched", [True, False])
@optims(optim_db, dtypes=[torch.float32])
def test_forloop_goes_right_direction(
self, device, dtype, optim_info, contiguous, with_lrsched
):
optim_cls = optim_info.optim_cls
schedulers_constructors = (
optim_info.scheduler_inputs if with_lrsched else [None]
)
for schedulers_constructor in schedulers_constructors:
# with tensor LR we need fresh inputs for each scheduler
# or mutating it will carry across iters
optim_inputs = optim_info.optim_inputs_func(device=device)
for optim_input in optim_inputs:
if "foreach" in optim_info.supported_impls:
optim_input.kwargs["foreach"] = False # force forloop
if contiguous:
weight = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
bias = Parameter(torch.randn((10), device=device, dtype=dtype))
else:
weight = Parameter(
torch.randn((10, 5, 2), device=device, dtype=dtype)[..., 0]
)
bias = Parameter(
torch.randn((10, 2), device=device, dtype=dtype)[..., 0]
)
input = torch.randn(5, device=device, dtype=dtype)
params = [weight, bias] if optim_cls.__name__ != "Muon" else [weight]
optimizer = optim_cls(params, **optim_input.kwargs)
schedulers = [
s(optimizer)
for s in (schedulers_constructor if schedulers_constructor else [])
]
def closure():
optimizer.zero_grad()
wo = (
weight.mv(input)
if optim_cls.__name__ == "Muon"
else weight.mv(input) + bias
)
loss = wo.pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
@onlyCUDA
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@parametrize("with_lrsched", [True, False])
@optims(optim_db, dtypes=[torch.float32])
def test_forloop_goes_right_direction_multigpu(
self, device, dtype, optim_info, with_lrsched
):
optim_cls = optim_info.optim_cls
schedulers_constructors = (
optim_info.scheduler_inputs if with_lrsched else [None]
)
for schedulers_constructor in schedulers_constructors:
# We need a fresh set of inputs if we have a tensor LR
# to not carry mutations across iterations.
optim_inputs = optim_info.optim_inputs_func(device=device)
for optim_input in optim_inputs:
if "foreach" in optim_info.supported_impls:
optim_input.kwargs["foreach"] = False # force forloop
weight = Parameter(torch.randn((10, 5), device="cuda:0", dtype=dtype))
bias = Parameter(torch.randn((10), device="cuda:1", dtype=dtype))
inpt = torch.randn(5, device="cuda:0", dtype=dtype)
params = [weight, bias] if optim_cls.__name__ != "Muon" else [weight]
optimizer = optim_cls(params, **optim_input.kwargs)
schedulers = [
s(optimizer)
for s in (schedulers_constructor if schedulers_constructor else [])
]
def closure():
optimizer.zero_grad()
wo = (
weight.mv(inpt).cuda(1)
if optim_cls.__name__ == "Muon"
else weight.mv(inpt).cuda(1) + bias
)
loss = wo.pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
loss = optimizer.step(closure)
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
@optims(optim_db, dtypes=[torch.float32])
def test_param_group_with_lrscheduler_goes_right_direction(
self, device, dtype, optim_info
):
optim_cls = optim_info.optim_cls
for schedulers_c in optim_info.scheduler_inputs:
weight = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
weight2 = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
inpt = torch.randn(5, device=device, dtype=dtype)
# avoid endless recompiles by wrapping LR in a tensor if we're compiling
lr = torch.tensor(0.01) if torch.compiler.is_compiling() else 0.01
optimizer = optim_cls(
[{"params": [weight]}, {"params": [weight2], "lr": lr}]
)
schedulers = [scheduler_c(optimizer) for scheduler_c in schedulers_c]
def closure():
optimizer.zero_grad()
loss = (weight.mv(inpt) + weight2.mv(inpt)).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
weight2.grad = weight2.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
loss = optimizer.step(closure)
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
self.assertLess(closure().item(), initial_value)
@parametrize("num_dim", [0, 1, 2])
@optims(optim_db, dtypes=[torch.float32])
def test_tensor_lr(self, device, dtype, optim_info, num_dim):
optim_cls = optim_info.optim_cls
lr_devices = [device]
if _get_device_type(device) != "cpu":
lr_devices.append("cpu")
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
for optim_input, lr_device in product(all_optim_inputs, lr_devices):
weight = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
weight_c = weight.detach().clone().requires_grad_(True)
bias = Parameter(torch.randn((10), device=device, dtype=dtype))
bias_c = bias.detach().clone().requires_grad_(True)
inpt = torch.randn(5, device=device, dtype=dtype)
kwargs = optim_input.kwargs
if "lr" in kwargs:
del kwargs["lr"]
params = [weight, bias] if optim_cls.__name__ != "Muon" else [weight]
kwargs["lr"] = 1.0 if optim_info.step_requires_closure else 1e-3
optimizer_r = optim_cls(params, **kwargs)
try:
kwargs["lr"] = (
torch.tensor(kwargs["lr"]).reshape([1] * num_dim).to(lr_device)
)
params_c = [weight_c, bias_c]
if optim_cls.__name__ == "Muon":
params_c = [weight_c]
optimizer = optim_cls(params_c, **kwargs)
except ValueError as e:
self.assertRegex(str(e), ".*lr as a Tensor is not supported.*")
continue
def closure(optim, w, b, i):
optim.zero_grad()
wo = w.mv(i) if optim_cls.__name__ == "Muon" else w.mv(i) + b
loss = wo.pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
w.grad = w.grad.to_sparse()
b.grad = b.grad.to_sparse()
return loss
for _ in range(5):
if optim_info.step_requires_closure:
optimizer_r.step(
functools.partial(closure, optimizer_r, weight, bias, inpt)
)
optimizer.step(
functools.partial(closure, optimizer, weight_c, bias_c, inpt)
)
else:
closure(optimizer_r, weight, bias, inpt)
optimizer_r.step()
closure(optimizer, weight_c, bias_c, inpt)
optimizer.step()
self.assertEqual(weight, weight_c)
if optim_cls.__name__ != "Muon":
self.assertEqual(bias, bias_c)
@parametrize("with_lrsched", [True, False])
@optims(
[o for o in optim_db if o.supports_sparse or o.only_supports_sparse_grads],
dtypes=[torch.float64],
)
def test_rosenbrock_sparse(self, device, dtype, optim_info, with_lrsched):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
# Fused impls do not support sparse gradients
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable", "fused")
)
kwarg_updates, schedulers_constructors = optim_info.metadata_for_sparse
if with_lrsched and len(schedulers_constructors) == 0:
return
supported_inputs = []
if len(kwarg_updates) != 0:
seen = set()
for i in all_optim_inputs:
for k in kwarg_updates:
if k in i.kwargs:
del i.kwargs[k]
hashable_kwargs = tuple(sorted(i.kwargs.items()))
if len(i.kwargs) > 0 and hashable_kwargs not in seen:
supported_inputs.append(i)
seen.add(hashable_kwargs)
if "lr" in kwarg_updates:
i.kwargs["lr"] = kwarg_updates["lr"]
else:
supported_inputs = all_optim_inputs
for optim_input in supported_inputs:
kwargs = optim_input.kwargs
multi_tensor = kwargs.get("foreach", False)
# For rosenbrock tests, it is mandated that the param is a tensor with 2 numbers
if multi_tensor:
params_t = [
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5], dtype=dtype),
]
else:
params_t = [torch.tensor([1.5, 1.5])]
params = [Parameter(param_t) for param_t in params_t]
optimizer = optim_cls(params, **kwargs)
schedulers = [
s(optimizer) for s in (schedulers_constructors if with_lrsched else [])
]
if not optim_info.only_supports_sparse_grads:
params_c = [Parameter(param_t.clone()) for param_t in params_t]
optimizer_c = optim_cls(params_c, **kwargs)
schedulers_c = [
s(optimizer_c)
for s in (schedulers_constructors if with_lrsched else [])
]
solution = torch.tensor([1, 1])
with torch.no_grad():
initial_dist = sum(param.dist(solution) for param in params)
def get_grad(param, sparse_grad, w):
grad = drosenbrock(param)
# NB: We torture test the optimizer by returning an
# uncoalesced sparse tensor
# Depending on w, provide only the x or y gradient
if sparse_grad:
if w:
i = torch.tensor([[0, 0]], dtype=torch.int64)
x = grad[0]
v = torch.tensor([x / 4.0, x - x / 4.0])
else:
i = torch.tensor([[1, 1]], dtype=torch.int64)
y = grad[1]
v = torch.tensor([y - y / 4.0, y / 4.0])
grad_out = torch.sparse_coo_tensor(i, v, (2,), dtype=v.dtype)
else:
if w:
grad_out = torch.tensor([grad[0], 0], dtype=param.dtype)
else:
grad_out = torch.tensor([0, grad[1]], dtype=param.dtype)
return grad_out
def eval(params, sparse_grad, w):
optimizer.zero_grad()
if multi_tensor:
loss = sum(rosenbrock(param) for param in params)
else:
loss = rosenbrock(params[0])
loss.backward()
grads_out = [get_grad(param, sparse_grad, w) for param in params]
with torch.no_grad():
params[0].grad = grads_out[0]
if multi_tensor:
params[1].grad = grads_out[1].to(dtype=dtype)
return loss
for i in range(1800):
# Do cyclic coordinate descent
w = i % 2
optimizer.step(functools.partial(eval, params, True, w))
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params[0]))
else:
scheduler.step()
if not optim_info.only_supports_sparse_grads:
optimizer_c.step(functools.partial(eval, params_c, False, w))
for scheduler in schedulers_c:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params_c[0]))
else:
scheduler.step()
# Tolerance is increased due to floating point error from different
# code path for dense case: x v.s. x - x / 4.0 + x / 4.0
self.assertEqual(params, params_c, atol=5e-6, rtol=5e-6)
if not kwargs.get("maximize", False):
self.assertLessEqual(
sum(param.dist(solution) for param in params), initial_dist
)
else:
self.assertGreaterEqual(
sum(rosenbrock(param) for param in params),
sum(rosenbrock(param_t) for param_t in params_t),
)
@skipMPS
@optims([o for o in optim_db if o.supports_complex], dtypes=[torch.complex64])
def test_complex(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
# Also skip fused, since our fused kernels do not support complex
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable", "fused")
)
for optim_input in all_optim_inputs:
# Last param is intentionally real to test that we can mix real and complex
complex_params = [
torch.randn(10, 5, device=device, dtype=dtype, requires_grad=True),
torch.randn(10, device=device, dtype=dtype, requires_grad=True),
torch.randn(
10, 5, device=device, dtype=torch.float32, requires_grad=True
),
]
real_params = [
(
torch.view_as_real(param).detach().clone().requires_grad_()
if param.is_complex()
else param.detach().clone().requires_grad_()
)
for param in complex_params
]
complex_optimizer = optim_cls(complex_params, **optim_input.kwargs)
real_optimizer = optim_cls(real_params, **optim_input.kwargs)
real_steps = []
complex_steps = []
grads_losses = []
def real_closure():
for param in real_params:
grad = torch.randn_like(param)
param.grad = grad
real_steps.append(param.detach().clone())
grads_losses.append(grad.clone())
loss = torch.randn(1)
grads_losses.append(loss.clone())
return loss
def complex_closure():
for param in complex_params:
if torch.is_complex(param):
grad = torch.view_as_complex(grads_losses.pop(0))
complex_steps.append(torch.view_as_real_copy(param.detach()))
else:
grad = grads_losses.pop(0)
complex_steps.append(param.detach().clone())
param.grad = grad
return grads_losses.pop(0)
for _ in range(3):
if optim_info.step_requires_closure:
# LBFGS, for example, requires closure and calls it internally
real_optimizer.step(real_closure)
complex_optimizer.step(complex_closure)
else:
# For other optimizers, we call closure explicitly to set the gradients
real_closure()
complex_closure()
real_optimizer.step()
complex_optimizer.step()
# Final Parameters should be the same
complex_params_asreal = [
torch.view_as_real(param) if param.is_complex() else param
for param in complex_params
]
self.assertEqual(real_params, complex_params_asreal)
# All intermediate steps should also be the same
# also checks steps taken within for example a line search
self.assertEqual(complex_steps, real_steps)
@skipMPS
@optims([o for o in optim_db if o.supports_complex], dtypes=[torch.complex64])
def test_complex_2d(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
# Also skip fused, since our fused kernels do not support complex
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable", "fused")
)
for optim_input in all_optim_inputs:
if optim_info.step_requires_closure:
# Why? The way we implement complex is by turning complex params into view_as_real
# alternatives. For example, an size (M,N) tensor will become (M,N,2). In this test,
# we break apart a tensor into its real and imaginary parts, which would be 2x(M,N).
# For other pointwise optimizers, this distinction is trivial, but for LBFGS where
# there are reductions across all parameters (and all the grads get flattened into
# one long Tensor), this ordering matters. Why? Reductions are not deterministic
# because addition between floating point numbers is not associative, i.e.,
# a + b + c != a + c + b. Thus, we add a seed here to control the discrepancy that
# will happen with LBFGS. Note that in test_complex above, there is no need for a seed
# nor for increased tolerance, because results should be bitwise equivalent.
torch.manual_seed(2024)
a1 = torch.randn(2, device=device, dtype=dtype, requires_grad=True)
a1_real = a1.real.detach().clone()
a1_imag = a1.imag.detach().clone()
a1_real.requires_grad_()
a1_imag.requires_grad_()
optim1 = optim_cls([a1], **optim_input.kwargs)
optim2 = optim_cls([a1_real, a1_imag], **optim_input.kwargs)
a1_reals = TensorTracker()
a1_imags = TensorTracker()
a1_grad_reals = TensorTracker()
a1_grad_imags = TensorTracker()
losses = TensorTracker()
def closure1():
optim1.zero_grad()
loss = rosenbrock(a1).abs()
loss.backward()
# Track clones to best test accuracy
a1_reals.add(a1.real)
a1_imags.add(a1.imag)
a1_grad_reals.add(a1.grad.real)
a1_grad_imags.add(a1.grad.imag)
losses.add(loss)
return loss
def closure2():
optim2.zero_grad()
a1_reals.pop_check_set(a1_real, self)
a1_imags.pop_check_set(a1_imag, self)
a2 = torch.complex(a1_real, a1_imag)
loss = rosenbrock(a2).abs()
losses.pop_check_set(loss, self)
loss.backward()
a1_grad_reals.pop_check_set(a1_real.grad, self)
a1_grad_imags.pop_check_set(a1_imag.grad, self)
return loss
for _ in range(3):
if optim_info.step_requires_closure:
# LBFGS, for example, requires closure and calls it internally
optim1.step(closure1)
optim2.step(closure2)
else:
closure1()
closure2()
optim1.step()
optim2.step()
self.assertEqual(a1.real, a1_real)
self.assertEqual(a1.imag, a1_imag)
self.assertTrue(a1_reals.all_popped())
self.assertTrue(a1_imags.all_popped())
self.assertTrue(a1_grad_reals.all_popped())
self.assertTrue(a1_grad_imags.all_popped())
self.assertTrue(losses.all_popped())
def test_adamw_serialization(self, device):
model = torch.nn.Linear(5, 5).to(device)
optim = torch.optim.AdamW(model.parameters())
loaded_dict = optim.state_dict()
# Test that Adam respects the decoupled_weight_decay key
new_optim = torch.optim.Adam(model.parameters())
new_optim.load_state_dict(loaded_dict)
self.assertTrue(new_optim.param_groups[0]["decoupled_weight_decay"])
# Test that decoupled_weight_decay is always True for AdamW
adam_optim = torch.optim.Adam(model.parameters())
adam_state_dict = adam_optim.state_dict()
self.assertFalse(adam_state_dict["param_groups"][0]["decoupled_weight_decay"])
new_optim = torch.optim.AdamW(model.parameters())
new_optim.load_state_dict(adam_state_dict)
self.assertTrue(new_optim.param_groups[0]["decoupled_weight_decay"])
# Test that state_dicts from the old AdamW (with no decoupled_weight_decay key)
# will have decoupled_weight_decay=True in new AdamW:
old_adamw_dict = deepcopy(loaded_dict)
del old_adamw_dict["param_groups"][0]["decoupled_weight_decay"]
self.assertFalse("decoupled_weight_decay" in old_adamw_dict["param_groups"][0])
new_optim = torch.optim.AdamW(model.parameters())
new_optim.load_state_dict(old_adamw_dict)
self.assertTrue(new_optim.param_groups[0]["decoupled_weight_decay"])
def _compare_between(
self, inputs, models, optimizers, assert_eq_kwargs=None, assert_step_dtype=None
):
# why 7? iteration 7 is where we start to see differences for RAdam
# params interacting with the small eps value, because that's right
# after rho_t becomes greater than 5 in step 6.
if assert_eq_kwargs is None:
assert_eq_kwargs = {}
kIterations = 7
tracker = TensorTracker(assert_eq_kwargs)
for i in range(kIterations):
state, updated_params = [], []
if not isinstance(inputs, list):
inputs = [inputs, inputs]
for input, model, optimizer in zip(inputs, models, optimizers):
optimizer.zero_grad()
if i == 3:
# Freeze a layer to test if the step of this layer in 'fused' or 'foreach'
# is same as the step in 'forloop'.
model[2].requires_grad_(False)
if i == 5:
# Unfreeze the layer after 2 iters.
model[2].requires_grad_(True)
# Test that step behaves as expected (a no-op) when grads are set to None
if i != 2:
output = model(input)
loss = output.sum()
loss.backward()
optimizer.step()
state.append(optimizer.state)
updated_params.append(model.parameters())
og_state, new_state = state
for og_p, new_p in zip(updated_params[0], updated_params[1]):
tracker.add(og_p)
tracker.pop_check_set(new_p, self)
# check that optimizer states are the same
og_p_state = og_state[og_p]
new_p_state = new_state[new_p]
if assert_step_dtype is not None:
if torch.is_tensor(og_p_state.get("step", None)):
self.assertEqual(og_p_state["step"].dtype, assert_step_dtype)
if torch.is_tensor(new_p_state.get("step", None)):
self.assertEqual(new_p_state["step"].dtype, assert_step_dtype)
for k in og_p_state:
tracker.add(og_p_state[k])
tracker.pop_check_set(new_p_state[k], self)
self.assertTrue(tracker.all_popped())
def _test_derived_optimizers(
self,
device,
dtype,
optim_info,
flag,
reduced_precision=False,
assert_step_dtype=None,
):
"""
Given a flag 'fused' or 'foreach', test for parity of optimizer state
and updated parameters between when the flag is set to True and False
for provided optimizer configurations.
"""
assert flag in ("foreach", "fused")
assert_eq_kwargs = {} if not reduced_precision else FP16_REDUCED_PRECISION
optim_inputs = optim_info.optim_inputs_func(device=device, dtype=dtype)
optim_cls = optim_info.optim_cls
for optim_input in optim_inputs:
models, optimizers = [], []
kwargs = deepcopy(optim_input.kwargs)
if kwargs.get("capturable", False) and _get_device_type(device) == "cpu":
# capturable is not supported on CPU
continue
for flag_value in (False, True):
kwargs[flag] = flag_value
input = torch.tensor(
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=dtype, device=device
).reshape(3, 2)
torch.manual_seed(1)
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid(),
)
model.to(dtype=dtype, device=device)
# foreach/fused optimizers should be tested with a
# zero_size tensor as its last param.
# ref: https://github.com/pytorch/pytorch/issues/100701
empty_param = torch.empty(
(), device=device, dtype=dtype, requires_grad=True
)
empty_param.grad = torch.rand_like(empty_param)
params = list(model.parameters()) + [empty_param]
optimizer = optim_cls(params, **kwargs)
models.append(model)
optimizers.append(optimizer)
self._compare_between(
input, models, optimizers, assert_eq_kwargs, assert_step_dtype
)
@skipMPS # MPS doesn't support torch.float64, see https://github.com/pytorch/pytorch/issues/115350
@optims(
[optim for optim in optim_db if "foreach" in optim.supported_impls],
dtypes=[torch.float64],
)
def test_foreach_matches_forloop(self, device, dtype, optim_info):
self._test_derived_optimizers(device, dtype, optim_info, "foreach")
@onlyCUDA
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@parametrize("impl", ["foreach", "fused"])
@optims(
[
optim
for optim in optim_db
if "foreach" in optim.supported_impls or "fused" in optim.supported_impls
]
)
def test_mixed_device_dtype(self, device, dtype, optim_info, impl):
"""
Similar in essence to _test_derived_optimizers above. The main difference is that
_test_derived_optimizers uses model parameters whereas we randomly pass in
parameters of different dtypes and devices here. We need multiple GPUs (vs just a
CPU and GPU) because fused adam only works on GPUs. (Thus we only run the tests
that call into this helper when TEST_MULTIGPU.)
"""
assert impl in ("foreach", "fused")
if impl == "foreach" and "foreach" not in optim_info.supported_impls:
return unittest.skip(
f"foreach not supported for {optim_info.optim_cls.__name__}"
)
elif impl == "fused" and "cuda" not in optim_info.supports_fused_on:
return unittest.skip(
f"fused not supported for {optim_info.optim_cls.__name__} on cuda"
)
params = [
torch.rand(2, 3, dtype=torch.float64, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.float32, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.float16, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.bfloat16, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.float64, device="cuda:1", requires_grad=True),
torch.rand(2, 3, dtype=torch.float32, device="cuda:1", requires_grad=True),
torch.rand(2, 3, dtype=torch.float16, device="cuda:1", requires_grad=True),
torch.rand(2, 3, dtype=torch.bfloat16, device="cuda:1", requires_grad=True),
torch.randint(
1024, (2, 3), dtype=torch.int64, device="cuda:1", requires_grad=False
),
]
for p in params:
if p.requires_grad:
p.grad = torch.rand_like(p, device=p.device, dtype=p.dtype)
kIterations = 7 if impl == "foreach" else 1
optim_inputs = optim_info.optim_inputs_func(device=device)
optim_cls = optim_info.optim_cls
for optim_input in optim_inputs:
updated_params, state = [], []
kwargs = deepcopy(optim_input.kwargs)
if kwargs.get("capturable", False) and _get_device_type(device) == "cpu":
# capturable is not supported on CPU
continue
for use_impl in (False, True):
kwargs[impl] = use_impl
params_clone = []
for p in params:
p_clone = p.detach().clone()
if p.requires_grad:
p_clone.requires_grad = True
p_clone.grad = p.grad.detach().clone()
params_clone.append(p_clone)
optimizer = optim_cls(params_clone, **kwargs)
for _ in range(kIterations):
optimizer.step()
state.append(optimizer.state)
updated_params.append(params_clone)
og_state, new_state = state
for og_p, new_p in zip(updated_params[0], updated_params[1]):
# Increasing the tolerance as we are collating lots of ops together for optimizers and
# the designated tolerances are for single op only.
single_rtol, single_atol = torch.testing._comparison.get_tolerances(
new_p.dtype, rtol=None, atol=None
)
rtol = 5 * single_rtol
atol = 5 * single_atol
self.assertEqual(og_p, new_p, rtol=rtol, atol=atol)
# check that optimizer states are the same
og_p_state = og_state[og_p]
new_p_state = new_state[new_p]
for k in og_p_state:
actual = new_p_state[k]
self.assertEqual(og_p_state[k], actual, rtol=rtol, atol=atol)
@onlyCUDA
@optims(
[optim for optim in optim_db if "foreach" in optim.supported_impls],
dtypes=[torch.float64],
)
def test_set_default_dtype_works_with_foreach(self, device, dtype, optim_info):
# https://github.com/pytorch/pytorch/issues/110940
# We coerce step to always be float32 unless the
# default dtype is higher prec float64
old_default_dtype = torch.get_default_dtype()
for default_dtype in [torch.float64, torch.float16]:
try:
torch.set_default_dtype(default_dtype)
self._test_derived_optimizers(
device,
dtype,
optim_info,
"foreach",
reduced_precision=default_dtype == torch.float16,
assert_step_dtype=(
torch.float64
if default_dtype == torch.float64
else torch.float32
),
)
finally:
torch.set_default_dtype(old_default_dtype)
@onlyCUDA
@largeTensorTest("72GB", "cuda")
@optims(
[optim for optim in optim_db if "foreach" in optim.supported_impls],
dtypes=[torch.float16],
)
def test_foreach_large_tensor(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
optim_inputs = optim_info.optim_inputs_func(device=device)
for optim_input in optim_inputs:
params = [torch.ones(2**32, device=device, dtype=dtype)]
params[0].grad = torch.zeros_like(params[0])
optimizer = optim_cls(params, foreach=True, **optim_input.kwargs)
optimizer.step()
@onlyCUDA
@optims(
[optim for optim in optim_db if "foreach" in optim.supported_impls],
dtypes=[torch.float32],
)
def test_peak_memory_foreach(self, device, dtype, optim_info):
nparams = 10
optim_inputs = optim_info.optim_inputs_func(device=device)
optim_cls = optim_info.optim_cls
for optim_input in optim_inputs:
kwargs = deepcopy(optim_input.kwargs)
max_mems = []
for flag_value in (False, True):
kwargs["foreach"] = flag_value
# The 16 * 8 = 128 is critical here! Our CUDACachingAllocator allocates in blocks
# of 512, meaning any tensor that occupies <512 bytes of memory will allocate a
# whole 512 bytes anyway. We use 128 (cuz datasize would be 4 bytes) so that param
# is size 512 exactly, making our later calculations for intermediate_size easy.
param = torch.rand(16, 8, device=device, dtype=dtype)
params = [torch.rand_like(param) for _ in range(nparams)]
optimizer = optim_cls(params, **kwargs)
for p in params:
p.grad = torch.rand_like(p)
optimizer.step()
import gc
gc.collect()
torch.cuda.reset_peak_memory_stats()
optimizer.step()
gc.collect()
max_mems.append(torch.cuda.max_memory_allocated())
st_max_mem, mt_max_mem = max_mems
intermediate_size = nparams * param.nelement() * param.element_size()
nintermediates = 1 # we expect a budget of 1 intermediate most of the time
# Check the param group directly to handle if the compiler set capturable
if optimizer.param_groups[0].get(
"capturable", False
) or optim_cls.__name__ in ["Adadelta", "ASGD", "RAdam"]:
# with capturable in Adam(W), we have 2 extra intermediates for the bias_corrections
# with Adadelta, we have 2 extra for (acc_delta + eps) and (square_avg + eps)
# ASGD allocates axs, 2x mus, 2x etas, and grads at the same time
nintermediates = 3
if optim_cls.__name__ == "NAdam":
# with capturable in NAdam, we have 3 extra intermediates for the
# bias_correction, mus, and mu_nexts
if TEST_WITH_TORCHDYNAMO:
# With dynamo, the eager/FX backend appears to hold memory longer than
# vanilla eager: https://github.com/pytorch/pytorch/issues/125511
nintermediates = 8
else:
nintermediates = 5
if optim_cls.__name__ == "RAdam":
# RAdam has four intermediates with capturable
# num, unrect_step_size, buffer, grouped_grads
if TEST_WITH_TORCHDYNAMO:
# With dynamo, the eager/FX backend appears to hold memory than
# vanilla eager: https://github.com/pytorch/pytorch/issues/125511
nintermediates = 6
else:
nintermediates = 4
elif optim_cls.__name__ in ["NAdam", "Adagrad", "RMSprop", "Adafactor"]:
# NAdam uses two intermediates at the same time (grads & exp_avg_sq_sqrt)
# Adagrad uses std and grads at the same time
# RMSprop uses avg and grads
# Adafactor uses row/col var and its mean
nintermediates = 2
if optim_cls.__name__ == "Adafactor" and kwargs.get("maximize", False):
# When maximize is True, Adafactor also tracks device_grad
nintermediates = 3
# Dynamo ST uses less mem than eager in the case of Adam/Adagrad/Nadam/RAdam
# which makes the foreach memory check fail
if TEST_WITH_TORCHDYNAMO:
st_max_mem += 6000
expected_max_mem = st_max_mem + intermediate_size * nintermediates
# hipcc currently can't generate efficient code for the small buffer optimization
# code path (see Note [small buffer optimization] for details), thus we always
# dynamically allocate the tensor metadata for ROCM. Adjusting the expected max
# memory usage to account for this.
if TEST_WITH_ROCM:
expected_max_mem *= 1.02
self.assertLessEqual(mt_max_mem, expected_max_mem)
@optims(
[optim for optim in optim_db if "fused" in optim.supported_impls],
dtypes=floating_types_and(
torch.bfloat16,
torch.float16,
),
)
def test_fused_matches_forloop(self, device, dtype, optim_info):
if _get_device_type(device) not in optim_info.supports_fused_on:
self.skipTest(
f"{device} is not supported for fused on {optim_info.optim_cls.__name__}"
)
if _get_device_type(device) == "mps" and dtype not in (
torch.float16,
torch.float32,
torch.bfloat16,
):
self.skipTest(
"MPS supports only torch.float16, torch.float32 and torch.bfloat16"
)
self._test_derived_optimizers(device, dtype, optim_info, "fused")
@optims(
[optim for optim in optim_db if "fused" in optim.supported_impls],
dtypes=(torch.float32,),
)
def test_fused_error_on_params_on_meta(self, device, dtype, optim_info):
if _get_device_type(device) not in optim_info.supports_fused_on:
self.skipTest(
f"{device} is not supported for fused on {optim_info.optim_cls.__name__}"
)
with torch.device("meta"):
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid(),
).to(dtype)
optimizer = optim_info.optim_cls(model.parameters(), fused=True)
with torch.device("meta"):
for p in model.parameters():
p.grad = torch.rand_like(p)
with self.assertRaisesRegex(
RuntimeError,
"`fused=True` requires all the params to be floating point Tensors",
):
optimizer.step()
optimizer.zero_grad(set_to_none=True)
model.to_empty(device=device)
for p in model.parameters():
p.grad = torch.rand_like(p)
optimizer.step()
@onlyNativeDeviceTypes
@largeTensorTest("64GB")
@optims(
[optim for optim in optim_db if "fused" in optim.supported_impls],
dtypes=[torch.float16],
)
def test_fused_large_tensor(self, device, dtype, optim_info):
if device not in optim_info.supports_fused_on:
self.skipTest(
f"{device} is not supported for fused on {optim_info.optim_cls.__name__}"
)
optim_cls = optim_info.optim_cls
optim_inputs = optim_info.optim_inputs_func(device=device)
for optim_input in optim_inputs:
params = [torch.ones(2**32, device=device, dtype=dtype)]
params[0].grad = torch.zeros_like(params[0])
optimizer = optim_cls(params, fused=True, **optim_input.kwargs)
optimizer.step()
@onlyCUDA
@optims(
[optim for optim in optim_db if "fused" in optim.supported_impls],
dtypes=[torch.float32],
)
def test_fused_does_not_step_if_foundinf(self, device, dtype, optim_info):
if device not in optim_info.supports_fused_on:
self.skipTest(
f"{device} is not supported for fused on {optim_info.optim_cls.__name__}"
)
optim_cls = optim_info.optim_cls
optim_inputs = optim_info.optim_inputs_func(device=device)
num_params = 5
for optim_input in optim_inputs:
for no_grad_scale in (False, True):
params = [
torch.ones((1,), device=device, dtype=dtype)
for _ in range(num_params)
]
params_c = [param.detach().clone() for param in params]
for p in params:
p.grad = torch.ones_like(p)
optimizer = optim_cls(params, fused=True, **optim_input.kwargs)
optimizer.grad_scale = (
None
if no_grad_scale
else torch.ones((1,), dtype=dtype, device=device)
)
optimizer.found_inf = torch.ones((), dtype=dtype, device=device)
optimizer.step()
for p in params:
if "step" in optimizer.state[p]:
self.assertEqual(
torch.zeros((), dtype=dtype, device=device),
optimizer.state[p]["step"],
)
self.assertEqual(params, params_c)
@parametrize("impl", ["fused", "capturable"])
@optims(
[optim for optim in optim_db if "fused" in optim.supported_impls],
dtypes=[torch.float32],
)
def test_cpu_load_state_dict(self, device, dtype, impl, optim_info):
# NOTE: This SIMULATES a fused/capturable optimizer with state moved to CPU, issue 103256
# How do we get there? Users typically create CUDA models on fused optimizers and then
# store checkpoints on CPU as CUDA memory is limited with torch.load(...map_location="cpu").
# Since this is a unit test, it is more expedient to simulate what the state_dict
# would look like, which is basically CPU tensors with fused/capturable flag = True.
optim_cls = optim_info.optim_cls
opt_name = optim_cls.__name__
if opt_name in ("SGD", "Adagrad") and impl == "capturable":
# Capturable SGD/Adagrad does not exist
self.skipTest(f"{opt_name} does not currently support capturable")
if _get_device_type(device) == "cpu":
self.skipTest("Test is only for non-cpu devices")
elif (
impl == "fused"
and _get_device_type(device) not in optim_info.supports_fused_on
):
self.skipTest(f"{device} is not supported for fused on {opt_name}")
elif impl == "capturable" and _get_device_type(device) == "mps":
self.skipTest("MPS does not support capturable")
cpu_optim_inputs = optim_info.optim_inputs_func(device="cpu")
for optim_input in cpu_optim_inputs:
param = torch.tensor([0.1, 0.2], dtype=dtype, device="cpu")
optimizer = optim_cls([param], **optim_input.kwargs)
param.grad = torch.rand_like(param)
optimizer.step()
optim_state_dict_cpu = deepcopy(optimizer.state_dict())
optim_state_dict_cpu["param_groups"][0][impl] = True
# load
optim_input.kwargs[impl] = True
param_device = param.detach().clone().to(device=device)
optimizer_device = optim_cls([param_device], **optim_input.kwargs)
optimizer_device.load_state_dict(optim_state_dict_cpu)
optimizer_device.zero_grad()
param_device.grad = torch.rand_like(param_device)
optimizer_device.step()
@optims(optim_db, dtypes=[torch.float32])
def test_param_groups_weight_decay(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
for optim_input in all_optim_inputs:
weight_kwargs = optim_input.kwargs
weight2_kwargs = deepcopy(optim_input.kwargs)
weight2_kwargs["weight_decay"] = 0.0
weight = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
weight2 = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
input = torch.randn(5, device=device, dtype=dtype)
optimizer = optim_cls(
[
dict(params=[weight], **weight_kwargs),
dict(params=[weight2], **weight2_kwargs),
]
)
loss = (weight.mv(input) + weight2.mv(input)).pow(2).sum()
initial_value = loss.item()
for _ in range(20):
optimizer.zero_grad()
loss = (weight.mv(input) + weight2.mv(input)).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
weight2.grad = weight2.grad.to_sparse()
optimizer.step()
# Test that the direction of loss moved appropriately
if optim_input.kwargs.get("maximize", False):
self.assertGreater(loss.item(), initial_value)
else:
self.assertLess(loss.item(), initial_value)
@optims(optim_db, dtypes=[torch.float32])
def test_param_groups_lr(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
for optim_input in all_optim_inputs:
# optim_input.kwargs will be the param group kwargs, which should have >0 lr
if "lr" not in optim_input.kwargs or optim_input.kwargs["lr"] == 0:
optim_input.kwargs["lr"] = 1e-3
outer_kwargs = {"lr": 1e-28}
if optim_cls.__name__ == "Rprop":
# Allow min step size to be 0
outer_kwargs["step_sizes"] = (0, 50)
weight = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
bias = Parameter(torch.randn((10), device=device, dtype=dtype))
irrelevant = Parameter(torch.randn((2, 2), device=device, dtype=dtype))
irrelevant_clone = irrelevant.clone()
input = torch.randn(5, device=device, dtype=dtype)
params = [weight, bias] if optim_cls.__name__ != "Muon" else [weight]
optimizer = optim_cls(
[
dict(params=params, **optim_input.kwargs),
dict(params=[irrelevant]),
],
**outer_kwargs,
)
wo = (
weight.mv(input)
if optim_cls.__name__ == "Muon"
else weight.mv(input) + bias
)
loss = wo.pow(2).sum()
initial_value = loss.item()
for _ in range(20):
optimizer.zero_grad()
wo = (
weight.mv(input)
if optim_cls.__name__ == "Muon"
else weight.mv(input) + bias
)
loss = wo.pow(2).sum()
loss.backward()
irrelevant.grad = torch.rand_like(irrelevant)
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
irrelevant.grad = irrelevant.grad.to_sparse()
optimizer.step()
# Test that the direction of loss moved appropriately
if optim_input.kwargs.get("maximize", False):
self.assertGreater(loss.item(), initial_value)
else:
self.assertLess(loss.item(), initial_value)
# Test that irrelevant parameters were not updated since lr was almost 0
self.assertEqual(irrelevant, irrelevant_clone)
@optims(optim_db, dtypes=[torch.float32])
def test_step_is_noop_when_params_have_no_grad(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
params = [
torch.randn(2, 3, requires_grad=False, device=device, dtype=dtype)
for _ in range(2)
]
def closure():
return torch.tensor([1], device=device, dtype=dtype)
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
optimizer.step(closure)
@optims(optim_db, dtypes=[torch.float32])
def test_step_is_noop_for_zero_grads(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
param = torch.randn((5, 1), device=device, dtype=dtype, requires_grad=True)
old_param = param.detach().clone()
def closure():
return torch.tensor([1], device=device, dtype=dtype)
for optim_input in all_optim_inputs:
kwargs = optim_input.kwargs
# params will decay even if grads are empty if weight_decay != 0,
# and capturable doesn't work for CPU tensors
if kwargs.get("weight_decay", 0) != 0:
continue
# AdamW/Muon params will be updated regardless of grads due to lr, so make lr smaller
if optim_cls.__name__ == "AdamW" or optim_cls.__name__ == "Muon":
kwargs["lr"] = (
torch.tensor(1e-5)
if isinstance(kwargs.get("lr", 1e-5), torch.Tensor)
else 1e-5
)
if kwargs.get("differentiable", False):
params = [param.detach()]
else:
params = [param]
optimizer = optim_cls(params, **kwargs)
if optim_info.only_supports_sparse_grads:
# Intentionally construct a multidimensional empty v for the sparse grad
# Single dim v passes the test while multidim correctly repros the issue
# https://github.com/pytorch/pytorch/issues/82486
i = torch.empty((1, 0), device=device, dtype=dtype)
v = torch.empty((0, 1), device=device, dtype=dtype)
params[0].grad = torch.sparse_coo_tensor(
i, v, (5, 1), device=device, dtype=dtype
)
else:
params[0].grad = torch.zeros_like(params[0])
optimizer.step(closure)
self.assertEqual(old_param, params[0])
@optims(optim_db, dtypes=[torch.float32])
def test_grads_are_never_inplaced_into(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
param = torch.randn((5, 1), device=device, dtype=dtype, requires_grad=True)
def closure():
return torch.tensor([1], device=device, dtype=dtype)
for optim_input in all_optim_inputs:
kwargs = optim_input.kwargs
if kwargs.get("differentiable", False):
params = [param.detach()]
else:
params = [param]
optimizer = optim_cls(params, **kwargs)
if optim_info.only_supports_sparse_grads:
# Intentionally construct a multidimensional empty v for the sparse grad
# Single dim v passes the test while multidim correctly repros the issue
# https://github.com/pytorch/pytorch/issues/82486
i = torch.empty((1, 0), device=device, dtype=dtype)
v = torch.empty((0, 1), device=device, dtype=dtype)
params[0].grad = torch.sparse_coo_tensor(
i, v, (5, 1), device=device, dtype=dtype
)
else:
params[0].grad = torch.rand_like(params[0])
old_version = params[0].grad._version
for _ in range(5):
optimizer.step(closure)
self.assertEqual(params[0].grad._version, old_version)
@optims(optim_db, dtypes=[torch.float32])
def test_optimizer_can_be_printed(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
params = [
Parameter(torch.randn(2, 3, requires_grad=True, device=device, dtype=dtype))
for _ in range(2)
]
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
optimizer.__repr__()
@parametrize("is_named_optim0", [True, False])
@parametrize("is_named_optim1", [True, False])
@optims(optim_db, dtypes=[torch.float32])
def test_state_dict_deterministic(
self, device, dtype, optim_info, is_named_optim0, is_named_optim1
):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
weight = Parameter(
torch.randn(2, 3, requires_grad=True, device=device, dtype=dtype)
)
bias = Parameter(torch.randn(2, requires_grad=True, device=device, dtype=dtype))
input = torch.randn(3, requires_grad=True, device=device, dtype=dtype)
params = [weight, bias]
if optim_cls.__name__ == "Muon":
params = [weight]
def make_named_param(param, is_named):
if not is_named:
return param
return [(f"name{i}", p) for i, p in enumerate(param)]
def without_param_names(state_dict):
new_state_dict = deepcopy(state_dict)
for pg in new_state_dict["param_groups"]:
pg.pop("param_names", None)
return new_state_dict
def fwd_bwd(optim, w, b, i):
optim.zero_grad()
wo = w.mv(i) if optim_cls.__name__ == "Muon" else w.mv(i) + b
loss = wo.pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
if w.grad is not None:
w.grad = w.grad.to_sparse()
if b.grad is not None:
b.grad = b.grad.to_sparse()
return loss
for optim_input in all_optim_inputs:
params_in = make_named_param(params, is_named=is_named_optim0)
optimizer = optim_cls(params_in, **optim_input.kwargs)
closure = functools.partial(fwd_bwd, optimizer, weight, bias, input)
# Prime the optimizer
for _ in range(10):
if optim_info.step_requires_closure:
optimizer.step(closure)
else:
closure()
optimizer.step()
# Clone the weights and construct a new optimizer for them
with torch.no_grad():
weight_c = Parameter(weight.clone())
bias_c = Parameter(bias.clone())
params_c_list = (
[weight_c, bias_c] if optim_cls.__name__ != "Muon" else [weight_c]
)
params_c = make_named_param(params_c_list, is_named=is_named_optim1)
optimizer_c = optim_cls(params_c, **optim_input.kwargs)
closure_c = functools.partial(fwd_bwd, optimizer_c, weight_c, bias_c, input)
# Load the state dict from the original optimizer into the new one
optimizer_c.load_state_dict(deepcopy(optimizer.state_dict()))
# Run both optimizers in parallel
for _ in range(10):
if optim_info.step_requires_closure:
optimizer.step(closure)
optimizer_c.step(closure_c)
else:
closure()
closure_c()
optimizer.step()
optimizer_c.step()
self.assertEqual(weight, weight_c)
if optim_cls.__name__ != "Muon":
self.assertEqual(bias, bias_c)
# Make sure state dict is deterministic with equal (not identical) parameters
# Param names are optional and not needed to be the consistent.
self.assertEqual(
without_param_names(optimizer.state_dict()),
without_param_names(optimizer_c.state_dict()),
)
# Make sure repeated parameters have identical representation (see #36831)
optimizer_c.param_groups.extend(optimizer_c.param_groups)
self.assertEqual(
without_param_names(optimizer.state_dict())["param_groups"][-1],
without_param_names(optimizer_c.state_dict())["param_groups"][-1],
)
@optims(optim_db, dtypes=[torch.float32])
def test_can_load_older_state_dict(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
def _get_model_and_input_tensor(device, dtype, optim_cls):
if optim_cls.__name__ == "Muon":
# Muon only accepts 2D parameter.
model = torch.nn.Linear(10, 4, bias=False)
input = torch.rand(10, device=device, dtype=dtype)
else:
model = torch.nn.Sequential(
torch.nn.Conv2d(4, 2, 1, stride=2),
torch.nn.BatchNorm2d(2, eps=1e-05, momentum=0.1),
)
input = torch.rand(1, 4, 16, 16, device=device, dtype=dtype)
model.to(dtype=dtype, device=device)
return model, input
for optim_input in all_optim_inputs:
torch.manual_seed(1)
model, input = _get_model_and_input_tensor(device, dtype, optim_cls)
optimizer = optim_cls(model.parameters(), **optim_input.kwargs)
def fwd_bwd(optim, mod, i):
optim.zero_grad()
loss = mod(i).sum()
loss.backward()
return loss
for _ in range(3):
if optim_info.step_requires_closure:
optimizer.step(functools.partial(fwd_bwd, optimizer, model, input))
else:
fwd_bwd(optimizer, model, input)
optimizer.step()
# old_state_dict has all new flags del'd
old_state_dict = deepcopy(optimizer.state_dict())
old_state_dict_pg = old_state_dict["param_groups"]
for group in old_state_dict_pg:
for flag in optim_info.not_og_supported_flags:
if flag in group:
del group[flag]
optimizer.load_state_dict(old_state_dict)
# Make sure we can still step
if optim_info.step_requires_closure:
optimizer.step(functools.partial(fwd_bwd, optimizer, model, input))
else:
fwd_bwd(optimizer, model, input)
optimizer.step()
@parametrize("is_named_optim0", [True, False])
@parametrize("is_named_optim1", [True, False])
@optims(
[o for o in optim_db if not o.only_supports_sparse_grads],
dtypes=[torch.float32],
)
def test_can_load_from_to_named_state_dict(
self, device, dtype, optim_info, is_named_optim0, is_named_optim1
):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
def _get_model_and_input_tensor(device, dtype, optim_cls):
if optim_cls.__name__ == "Muon":
# Muon only accepts 2D parameter.
model = torch.nn.Linear(10, 4, bias=False)
input = torch.rand(10, device=device, dtype=dtype)
else:
model = torch.nn.Sequential(
torch.nn.Conv2d(4, 2, 1, stride=2),
torch.nn.BatchNorm2d(2, eps=1e-05, momentum=0.1),
)
input = torch.rand(1, 4, 16, 16, device=device, dtype=dtype)
model.to(dtype=dtype, device=device)
return model, input
for optim_input in all_optim_inputs:
torch.manual_seed(1)
model, input = _get_model_and_input_tensor(device, dtype, optim_cls)
def fwd_bwd(optim, mod, i):
optim.zero_grad()
loss = mod(i).sum()
loss.backward()
return loss
# test for parameters, named_parameters, and 2 groups:
params_to_optimizer = (
model.named_parameters() if is_named_optim0 else model.parameters()
)
optimizer = optim_cls(params_to_optimizer, **optim_input.kwargs)
for _ in range(3):
if optim_info.step_requires_closure:
optimizer.step(functools.partial(fwd_bwd, optimizer, model, input))
else:
fwd_bwd(optimizer, model, input)
optimizer.step()
# old_state_dict has all new flags del'd
old_state_dict = deepcopy(optimizer.state_dict())
params_to_optimizer2 = (
model.named_parameters() if is_named_optim1 else model.parameters()
)
optimizer2 = optim_cls(params_to_optimizer2, **optim_input.kwargs)
optimizer2.load_state_dict(old_state_dict)
# Make sure we can still step
if optim_info.step_requires_closure:
optimizer2.step(functools.partial(fwd_bwd, optimizer2, model, input))
else:
fwd_bwd(optimizer2, model, input)
optimizer2.step()
ref_names = [p[0] for p in model.named_parameters()]
# Make sure that param_names are preserved when provided to at least one of the optimizers
if is_named_optim0 or is_named_optim1:
self.assertEqual(
optimizer2.state_dict()["param_groups"][0]["param_names"],
ref_names,
)
@parametrize("is_named_optim", [True, False])
@optims(optim_db, dtypes=[torch.float32])
def test_save_load_equality_with_weights_only(
self, device, dtype, optim_info, is_named_optim
):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
weight = Parameter(
torch.randn(2, 3, requires_grad=True, device=device, dtype=dtype)
)
bias = Parameter(torch.randn(2, requires_grad=True, device=device, dtype=dtype))
input = torch.randn(3, requires_grad=True, device=device, dtype=dtype)
params = [weight, bias] if optim_cls.__name__ != "Muon" else [weight]
def make_named_param(param, is_named):
if not is_named:
return param
return [(f"name{i}", p) for i, p in enumerate(param)]
def fwd_bwd(optim, w, b, i):
optim.zero_grad()
wo = w.mv(i) if optim_cls.__name__ == "Muon" else w.mv(i) + b
loss = wo.pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
for optim_input in all_optim_inputs:
params_in = make_named_param(params, is_named=is_named_optim)
optimizer = optim_cls(params_in, **optim_input.kwargs)
closure = functools.partial(fwd_bwd, optimizer, weight, bias, input)
# Prime the optimizer
for _ in range(3):
optimizer.step(closure)
sd = optimizer.state_dict()
# === Check saved/loaded state_dict are the same (including weights_only load). ===
with tempfile.TemporaryFile() as f:
torch.save(sd, f)
f.seek(0)
sd_copy = torch.load(f)
self.assertEqual(sd_copy, sd)
del sd_copy
f.seek(0)
sd_copy_wo = torch.load(f, weights_only=True)
self.assertEqual(sd_copy_wo, sd)
@optims(optim_db, dtypes=[torch.float32])
def test_load_nontensor_step(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
params = [
Parameter(torch.randn(2, 3, device=device, dtype=dtype)) for _ in range(2)
]
for p in params:
p.grad = torch.rand_like(p)
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
p.grad = p.grad.to_sparse()
# Needed for second order optims like LBFGS
closure_loss = torch.rand(1, device=device, dtype=dtype)
def closure():
return closure_loss if optim_info.step_requires_closure else None
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
for _ in range(3):
optimizer.step(closure)
state_dict = deepcopy(optimizer.state_dict())
for p_state in state_dict["state"].values():
if "step" in p_state and torch.is_tensor(p_state["step"]):
p_state["step"] = p_state["step"].item()
optimizer.load_state_dict(state_dict)
optimizer.step(closure)
@onlyCUDA
@optims(optim_db, dtypes=[torch.float32])
def test_state_dict_with_cuda_params(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
# We limit our configs to CPU only, because we will be moving them to CUDA later
cpu_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
"cpu", dtype, optim_info, skip=("differentiable",)
)
# Needed for second order optims like LBFGS
closure_loss = torch.rand(1, device=device, dtype=dtype)
def closure():
return closure_loss if optim_info.step_requires_closure else None
for optim_input in cpu_optim_inputs:
if (
"fused" in optim_input.kwargs
and "cuda" not in optim_info.supports_fused_on
):
self.skipTest(
f"cuda is not supported for fused on {optim_cls.__name__}"
)
params = [
Parameter(torch.randn(2, 3, device="cpu", dtype=dtype))
for _ in range(2)
]
for p in params:
p.grad = torch.randn_like(p)
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
p.grad = p.grad.to_sparse()
optimizer = optim_cls(params, **optim_input.kwargs)
for _ in range(3):
optimizer.step(closure)
with torch.no_grad():
params_cuda = [p.to(device="cuda") for p in params]
for i, p in enumerate(params_cuda):
p.grad = params[i].grad.to(device="cuda")
optimizer_cuda = optim_cls(params_cuda, **optim_input.kwargs)
state_dict_cpu = deepcopy(optimizer.state_dict())
state_dict_cuda = deepcopy(optimizer.state_dict())
optimizer_cuda.load_state_dict(state_dict_cuda)
# Make sure state_dict_cuda isn't modified by merely calling load_state_dict
self.assertEqual(state_dict_cpu, state_dict_cuda)
# Make sure that device of state['step'] is still CPU _unless_ torch.compile() added a capturable!
capturable = state_dict_cpu["param_groups"][0].get("capturable", False)
fused = state_dict_cpu["param_groups"][0].get("fused", False)
new_state_dict = optimizer_cuda.state_dict()
for state_cpu, state_cuda in zip(
state_dict_cpu["state"].values(), new_state_dict["state"].values()
):
if "step" in state_cpu and torch.is_tensor(state_cpu["step"]):
self.assertEqual(
state_cuda["step"].device.type,
"cuda" if capturable or fused else "cpu",
)
for _ in range(5):
optimizer.step(closure)
optimizer_cuda.step(closure)
self.assertEqual(params, params_cuda)
self.assertEqual(optimizer.state_dict(), optimizer_cuda.state_dict())
@staticmethod
def _state_dict_pre_hook(optimizer: Optimizer) -> None:
optimizer.state["test"] = 1
@staticmethod
def _state_dict_post_hook(
optimizer: Optimizer, state_dict: dict[str, Any]
) -> dict[str, Any]:
if "test" in state_dict["state"]:
state_dict["state"].pop("test")
state_dict["ran_state_dict_pre_hook"] = True
else:
state_dict["ran_state_dict_pre_hook"] = False
return state_dict
@optims(optim_db, dtypes=[torch.float32])
def test_state_dict_pre_hook(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
optim.register_state_dict_pre_hook(self.__class__._state_dict_pre_hook)
state_dict = optim.state_dict()
self.assertEqual(state_dict["state"]["test"], 1)
@optims(optim_db, dtypes=[torch.float32])
def test_state_dict_post_hook(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
optim.register_state_dict_post_hook(self.__class__._state_dict_post_hook)
state_dict = optim.state_dict()
self.assertFalse(state_dict["ran_state_dict_pre_hook"])
@optims(optim_db, dtypes=[torch.float32])
def test_state_dict_pre_post_hook(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
optim.register_state_dict_pre_hook(self.__class__._state_dict_pre_hook)
optim.register_state_dict_post_hook(self.__class__._state_dict_post_hook)
state_dict = optim.state_dict()
self.assertFalse("test" in state_dict["state"])
self.assertTrue(state_dict["ran_state_dict_pre_hook"])
@staticmethod
def _load_state_dict_pre_hook1(
optimizer: Optimizer, state_dict: dict[str, Any]
) -> None:
state_dict["param_groups"][0]["lr"] = 0.002
@staticmethod
def _load_state_dict_pre_hook2(
optimizer: Optimizer, state_dict: dict[str, Any]
) -> dict[str, Any]:
# The typical use case for returning a state dict is to drastically modify the state dict.
# I will simulate by simply making a deep copy and ensuring that my_state_dict still gets used
my_state_dict = deepcopy(state_dict)
my_state_dict["param_groups"][0]["lr"] = 0.003
return my_state_dict
@staticmethod
def _load_state_dict_post_hook(optimizer: Optimizer) -> None:
optimizer.state["ran_load_state_dict_pre_hook2"] = (
optimizer.param_groups[0]["lr"] == 0.003
)
optimizer.state["ran_load_state_dict_post_hook"] = True
@optims(optim_db, dtypes=[torch.float32])
def test_load_state_dict_pre_hook_and_prepend(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
state_dict = optim.state_dict()
# usually one would have a new optim instance here, but it's all the same here
optim.register_load_state_dict_pre_hook(
self.__class__._load_state_dict_pre_hook1
)
optim.load_state_dict(state_dict)
self.assertEqual(optim.param_groups[0]["lr"], 0.002)
optim.register_load_state_dict_pre_hook(
self.__class__._load_state_dict_pre_hook2, prepend=True
)
optim.load_state_dict(state_dict)
# If prepend were False would be 0.003 but since prepend is True, the other hook overrides
self.assertEqual(optim.param_groups[0]["lr"], 0.002)
@optims(optim_db, dtypes=[torch.float32])
def test_load_state_dict_post_hook(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
optim.register_load_state_dict_post_hook(
self.__class__._load_state_dict_post_hook
)
optim.load_state_dict(optim.state_dict())
self.assertFalse(optim.state["ran_load_state_dict_pre_hook2"])
self.assertTrue(optim.state["ran_load_state_dict_post_hook"])
@optims(optim_db, dtypes=[torch.float32])
def test_load_state_dict_pre_post_hook(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
optim.register_load_state_dict_pre_hook(
self.__class__._load_state_dict_pre_hook2
)
optim.register_load_state_dict_post_hook(
self.__class__._load_state_dict_post_hook
)
optim.load_state_dict(optim.state_dict())
self.assertTrue(optim.state["ran_load_state_dict_pre_hook2"])
self.assertTrue(optim.state["ran_load_state_dict_post_hook"])
@optims(optim_db, dtypes=[torch.float32])
def test_step_post_hook(self, device, dtype, optim_info):
def post_hook(opt: Optimizer, args: tuple[Any, ...], kwargs: dict[Any, Any]):
nonlocal data
data += 2
params = [torch.tensor([[1, 1]], device=device, dtype=dtype)]
def dummy_closure():
return 1
closure = dummy_closure if optim_info.step_requires_closure else None
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
optim = optim_info.optim_cls(params, **optim_input.kwargs)
data = 2
hook_handle = optim.register_step_post_hook(post_hook)
optim.step(closure)
optim.step(closure)
# check if post hooks were registered
self.assertEqual(data, 6)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
optim.step(closure)
self.assertEqual(data, 6)
@optims(optim_db, dtypes=[torch.float32])
def test_step_pre_hook(self, device, dtype, optim_info):
def pre_hook(opt: Optimizer, args: tuple[Any, ...], kwargs: dict[Any, Any]):
nonlocal data
data += 2
# Create a random 2D tensor for compatibility with Muon.
params = [torch.tensor([[1, 1]], device=device, dtype=dtype)]
def dummy_closure():
return 1
closure = dummy_closure if optim_info.step_requires_closure else None
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
optim = optim_info.optim_cls(params, **optim_input.kwargs)
data = 5
hook_handle = optim.register_step_pre_hook(pre_hook)
optim.step(closure)
optim.step(closure)
# check if pre hooks were registered
self.assertEqual(data, 9)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
optim.step(closure)
self.assertEqual(data, 9)
@optims(optim_db, dtypes=[torch.float32])
def test_step_all_hooks(self, device, dtype, optim_info):
def global_pre_hook(
opt: Optimizer, args: tuple[Any, ...], kwargs: dict[Any, Any]
):
nonlocal data
data.append(0)
def global_post_hook(
opt: Optimizer, args: tuple[Any, ...], kwargs: dict[Any, Any]
):
nonlocal data
data.append(5)
def local_pre_hook(
opt: Optimizer, args: tuple[Any, ...], kwargs: dict[Any, Any]
):
nonlocal data
data.append(1)
def local_post_hook(
opt: Optimizer, args: tuple[Any, ...], kwargs: dict[Any, Any]
):
nonlocal data
data.append(2)
params = [torch.tensor([[1, 1]], device=device, dtype=dtype)]
def dummy_closure():
return 1
closure = dummy_closure if optim_info.step_requires_closure else None
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
optim = optim_info.optim_cls(params, **optim_input.kwargs)
optim2 = SGD(params)
data = []
# register global hooks to both optimizers
global_pre_handle = register_optimizer_step_pre_hook(global_pre_hook)
global_post_handle = register_optimizer_step_post_hook(global_post_hook)
# register local hooks
first_pre_handle = optim.register_step_pre_hook(local_pre_hook)
first_post_handle = optim.register_step_post_hook(local_post_hook)
second_pre_handle = optim2.register_step_pre_hook(local_pre_hook)
second_post_handle = optim2.register_step_post_hook(local_post_hook)
optim.step(closure)
self.assertListEqual(data, [0, 1, 2, 5])
optim2.step(closure)
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5])
optim.step(closure)
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
# remove all hooks
global_pre_handle.remove()
global_post_handle.remove()
first_pre_handle.remove()
first_post_handle.remove()
second_pre_handle.remove()
second_post_handle.remove()
optim.step(closure)
optim2.step(closure)
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
@optims(optim_db, dtypes=[torch.float32])
def test_deepcopy_copies_all_public_attrs(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
params = [
Parameter(torch.randn(2, 3, device=device, dtype=dtype)) for _ in range(2)
]
for p in params:
p.grad = torch.rand_like(p)
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
p.grad = p.grad.to_sparse()
# Needed for second order optims like LBFGS
def closure():
return 1 if optim_info.step_requires_closure else None
def getPublicAttrs(obj):
return {k for k in obj.__dict__ if not k.startswith("_")}
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
# Make some state
for _ in range(3):
if optim_info.step_requires_closure:
optimizer.step(closure)
else:
closure()
optimizer.step()
self.assertEqual(
getPublicAttrs(optimizer), getPublicAttrs(deepcopy(optimizer))
)
@optims(
[optim for optim in optim_db if optim.step_requires_closure],
dtypes=[torch.float32],
)
def test_second_order_optims_return_consistent_types(
self, device, dtype, optim_info
):
# Motivated by #7586
optim_cls = optim_info.optim_cls
params = [
torch.randn(10, 5, device=device, dtype=dtype),
torch.randn(10, device=device, dtype=dtype),
]
def closure():
return torch.tensor([10], device=device, dtype=dtype)
for optim_input in optim_info.optim_inputs_func(device=device):
# Currently, the only second order optim is LBFGS, so we just go ahead and modify
# "tolerance_grad", but this may not scale if we add second order optims in the future
kwargs = optim_input.kwargs
kwargs["tolerance_grad"] = math.inf
optim_inf = optim_cls(params, **kwargs)
kwargs["tolerance_grad"] = -math.inf
optim_neg_inf = optim_cls(params, **kwargs)
res1 = optim_inf.step(closure)
res2 = optim_neg_inf.step(closure)
self.assertEqual(type(res1), type(res2))
@onlyCUDA
@optims(
[
optim
for optim in optim_db
if "cpu" in optim.supports_fused_on and "cuda" in optim.supports_fused_on
],
dtypes=floating_types_and(
torch.bfloat16,
torch.float16,
),
)
def test_fused_cpu_matches_cuda(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
optim_inputs = optim_info.optim_inputs_func(device="cpu")
for optim_input in optim_inputs:
inpts, models, optimizers = [], [], []
for dev in ("cpu", "cuda"):
kwargs = optim_input.kwargs
kwargs["fused"] = True
inpt = torch.tensor(
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=dtype, device=dev
).reshape(3, 2)
torch.manual_seed(1)
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid(),
)
model.to(dtype=dtype, device=dev)
# foreach/fused optimizers should be tested with a
# zero_size tensor as its last param.
# ref: https://github.com/pytorch/pytorch/issues/100701
empty_param = torch.empty(
(), device=dev, dtype=dtype, requires_grad=True
)
empty_param.grad = torch.rand_like(empty_param)
params = list(model.parameters()) + [empty_param]
optimizer = optim_cls(params, **kwargs)
inpts.append(inpt)
models.append(model)
optimizers.append(optimizer)
self._compare_between(inpts, models, optimizers)
@onlyCUDA
@optims(
[
o
for o in optim_db
if ("foreach" in o.supported_impls and o.optim_cls.__name__ != "Adafactor")
],
dtypes=[torch.float32],
)
def test_defaults_changed_to_foreach(self, device, dtype, optim_info):
# Test that the default implementations for optimizers are changed to foreach
# except Adafactor, which defaults to the single tensor impl for memory efficiency.
from torch.optim import Adam, AdamW
optim_cls = optim_info.optim_cls
model = torch.nn.Linear(5, 5)
model.to(dtype=dtype, device=device)
inpt = torch.rand(2, 5, dtype=dtype, device=device)
import inspect
# AdamW dispatches to superclass' adam
if optim_cls is AdamW:
module = inspect.getmodule(Adam)
module_name = "_multi_tensor_adam"
else:
module = inspect.getmodule(optim_cls)
module_name = f"_multi_tensor_{optim_cls.__name__.lower()}"
for optim_input in optim_info.optim_inputs_func(device=device):
optim = optim_cls(model.parameters(), **optim_input.kwargs)
optim.zero_grad()
output = model(inpt)
loss = output.sum()
loss.backward()
with patch.object(module, module_name) as mocked_foreach_impl:
optim.step()
self.assertTrue(mocked_foreach_impl.called)
@optims(optim_db, dtypes=[torch.float32])
def test_non_empty_state(self, device, dtype, optim_info):
# There are internal tests that check that the state is not empty
optim_cls = optim_info.optim_cls
# Muon only accepts 2D parameter.
model = torch.nn.Linear(5, 5, bias=False)
model.to(dtype=dtype, device=device)
inpt = torch.rand(2, 5, dtype=dtype, device=device)
for optim_input in optim_info.optim_inputs_func(device=device):
optim = optim_cls(model.parameters(), **optim_input.kwargs)
optim.zero_grad()
output = model(inpt)
loss = output.sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
for param in model.parameters():
if param.grad is not None:
param.grad = param.grad.to_sparse()
if optim_info.step_requires_closure:
optim.step(lambda: 1.0)
else:
optim.step()
for state in optim.state.values():
self.assertGreater(len(state), 0)
@parametrize("dtype", [torch.float32])
def test_step_iteration(self, device, dtype):
def _get_model_and_input_tensor(device, dtype):
model = torch.nn.Sequential(
torch.nn.Conv2d(4, 2, 1, stride=2),
torch.nn.BatchNorm2d(2, eps=1e-05, momentum=0.1),
)
input = torch.rand(1, 4, 16, 16, device=device, dtype=dtype)
model.to(dtype=dtype, device=device)
return model, input
counter = 0
def fwd_bwd(optim, mod, i):
nonlocal counter
counter += 1
optim.zero_grad()
loss = mod(i).sum()
loss.backward()
return loss
model, input = _get_model_and_input_tensor(device, dtype)
optimizer = torch.optim.LBFGS(
model.parameters(), max_iter=1, max_eval=5, line_search_fn="strong_wolfe"
)
optimizer.step(functools.partial(fwd_bwd, optimizer, model, input))
self.assertEqual(counter, 6)
instantiate_device_type_tests(TestOptimRenewed, globals(), allow_mps=True)
if __name__ == "__main__":
run_tests()
| TestOptimRenewed |
python | pytorch__pytorch | test/quantization/core/test_quantized_functional.py | {
"start": 542,
"end": 10477
} | class ____(QuantizationTestCase):
def test_relu_api(self):
X = torch.arange(-5, 5, dtype=torch.float)
scale = 2.0
zero_point = 1
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point, dtype=torch.quint8)
qY = torch.relu(qX)
qY_hat = F.relu(qX)
self.assertEqual(qY, qY_hat)
def _test_conv_api_impl(
self, qconv_fn, conv_fn, batch_size, in_channels_per_group,
input_feature_map_size, out_channels_per_group, groups, kernel_size,
stride, padding, dilation, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_channelwise,
):
for i in range(len(kernel_size)):
assume(input_feature_map_size[i] + 2 * padding[i]
>= dilation[i] * (kernel_size[i] - 1) + 1)
(X, X_q, W, W_q, b) = _make_conv_test_input(
batch_size, in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, X_scale,
X_zero_point, W_scale, W_zero_point, use_bias, use_channelwise)
Y_exp = conv_fn(X, W, b, stride, padding, dilation, groups)
Y_exp = torch.quantize_per_tensor(
Y_exp, scale=Y_scale, zero_point=Y_zero_point, dtype=torch.quint8)
Y_act = qconv_fn(
X_q, W_q, b, stride, padding, dilation, groups,
padding_mode="zeros", scale=Y_scale, zero_point=Y_zero_point)
# Make sure the results match
# assert_array_almost_equal compares using the following formula:
# abs(desired-actual) < 1.5 * 10**(-decimal)
# (https://numpy.org/doc/stable/reference/generated/numpy.testing.assert_almost_equal.html)
# We use decimal = 0 to ignore off-by-1 differences between reference
# and test. Off-by-1 differences arise due to the order of round and
# zero_point addition operation, i.e., if addition followed by round is
# used by reference and round followed by addition is used by test, the
# results may differ by 1.
# For example, the result of round(2.5) + 1 is 3 while round(2.5 + 1) is
# 4 assuming the rounding mode is round-to-nearest, ties-to-even.
np.testing.assert_array_almost_equal(
Y_exp.int_repr().numpy(), Y_act.int_repr().numpy(), decimal=0)
@given(batch_size=st.integers(1, 3),
in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
L=st.integers(4, 16),
out_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 4),
kernel=st.integers(1, 7),
stride=st.integers(1, 2),
pad=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans(),
qengine=st.sampled_from(("qnnpack", "fbgemm")))
def test_conv1d_api(
self, batch_size, in_channels_per_group, L, out_channels_per_group,
groups, kernel, stride, pad, dilation,
X_scale, X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point,
use_bias, use_channelwise, qengine,
):
# Tests the correctness of the conv1d function.
if qengine not in torch.backends.quantized.supported_engines:
return
if qengine == 'qnnpack':
if IS_PPC:
return
use_channelwise = False
input_feature_map_size = (L, )
kernel_size = (kernel, )
stride = (stride, )
padding = (pad, )
dilation = (dilation, )
with override_quantized_engine(qengine):
qconv_fn = qF.conv1d
conv_fn = F.conv1d
self._test_conv_api_impl(
qconv_fn, conv_fn, batch_size, in_channels_per_group,
input_feature_map_size, out_channels_per_group, groups,
kernel_size, stride, padding, dilation, X_scale, X_zero_point,
W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias,
use_channelwise)
@given(batch_size=st.integers(1, 3),
in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
H=st.integers(4, 16),
W=st.integers(4, 16),
out_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 4),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans(),
qengine=st.sampled_from(("qnnpack", "fbgemm")))
def test_conv2d_api(
self, batch_size, in_channels_per_group, H, W, out_channels_per_group,
groups, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation,
X_scale, X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point,
use_bias, use_channelwise, qengine,
):
# Tests the correctness of the conv2d function.
if qengine not in torch.backends.quantized.supported_engines:
return
if qengine == 'qnnpack':
if IS_PPC:
return
input_feature_map_size = (H, W)
kernel_size = (kernel_h, kernel_w)
stride = (stride_h, stride_w)
padding = (pad_h, pad_w)
dilation = (dilation, dilation)
with override_quantized_engine(qengine):
qconv_fn = qF.conv2d
conv_fn = F.conv2d
self._test_conv_api_impl(
qconv_fn, conv_fn, batch_size, in_channels_per_group,
input_feature_map_size, out_channels_per_group, groups,
kernel_size, stride, padding, dilation, X_scale, X_zero_point,
W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias,
use_channelwise)
@given(batch_size=st.integers(1, 3),
in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
D=st.integers(4, 8),
H=st.integers(4, 8),
W=st.integers(4, 8),
out_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 4),
kernel_d=st.integers(1, 4),
kernel_h=st.integers(1, 4),
kernel_w=st.integers(1, 4),
stride_d=st.integers(1, 2),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_d=st.integers(0, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans(),
qengine=st.sampled_from(("fbgemm",)))
def test_conv3d_api(
self, batch_size, in_channels_per_group, D, H, W,
out_channels_per_group, groups, kernel_d, kernel_h, kernel_w,
stride_d, stride_h, stride_w, pad_d, pad_h, pad_w, dilation, X_scale,
X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias,
use_channelwise, qengine,
):
# Tests the correctness of the conv3d function.
# Currently conv3d only supports FbGemm engine
if qengine not in torch.backends.quantized.supported_engines:
return
input_feature_map_size = (D, H, W)
kernel_size = (kernel_d, kernel_h, kernel_w)
stride = (stride_d, stride_h, stride_w)
padding = (pad_d, pad_h, pad_w)
dilation = (dilation, dilation, dilation)
with override_quantized_engine(qengine):
qconv_fn = qF.conv3d
conv_fn = F.conv3d
self._test_conv_api_impl(
qconv_fn, conv_fn, batch_size, in_channels_per_group,
input_feature_map_size, out_channels_per_group, groups,
kernel_size, stride, padding, dilation, X_scale, X_zero_point,
W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias,
use_channelwise)
@given(N=st.integers(1, 10),
C=st.integers(1, 10),
H=st.integers(4, 8),
H_out=st.integers(4, 8),
W=st.integers(4, 8),
W_out=st.integers(4, 8),
scale=st.floats(.1, 2),
zero_point=st.integers(0, 4))
def test_grid_sample(self, N, C, H, H_out, W, W_out, scale, zero_point):
X = torch.rand(N, C, H, W)
X_q = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point, dtype=torch.quint8)
grid = torch.rand(N, H_out, W_out, 2)
out = F.grid_sample(X_q, grid)
out_exp = torch.quantize_per_tensor(F.grid_sample(X, grid), scale=scale, zero_point=zero_point, dtype=torch.quint8)
np.testing.assert_array_almost_equal(
out.int_repr().numpy(), out_exp.int_repr().numpy(), decimal=0)
if __name__ == "__main__":
raise_on_run_directly("test/test_quantization.py")
| TestQuantizedFunctionalOps |
python | gevent__gevent | src/greentest/3.14/test__interpreters.py | {
"start": 14172,
"end": 16439
} | class ____(TestBase):
def setUp(self):
super().setUp()
self.id = _interpreters.create()
def test_signatures(self):
# See https://github.com/python/cpython/issues/126654
msg = r'_interpreters.exec\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.exec(self.id, 'a', 1)
with self.assertRaisesRegex(TypeError, msg):
_interpreters.exec(self.id, 'a', shared=1)
msg = r'_interpreters.run_string\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.run_string(self.id, 'a', shared=1)
msg = r'_interpreters.run_func\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.run_func(self.id, lambda: None, shared=1)
# See https://github.com/python/cpython/issues/135855
msg = r'_interpreters.set___main___attrs\(\) argument 2 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.set___main___attrs(self.id, 1)
def test_invalid_shared_none(self):
msg = r'must be dict, not None'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.exec(self.id, 'a', shared=None)
with self.assertRaisesRegex(TypeError, msg):
_interpreters.run_string(self.id, 'a', shared=None)
with self.assertRaisesRegex(TypeError, msg):
_interpreters.run_func(self.id, lambda: None, shared=None)
with self.assertRaisesRegex(TypeError, msg):
_interpreters.set___main___attrs(self.id, None)
def test_invalid_shared_encoding(self):
# See https://github.com/python/cpython/issues/127196
bad_shared = {"\uD82A": 0}
msg = 'surrogates not allowed'
with self.assertRaisesRegex(UnicodeEncodeError, msg):
_interpreters.exec(self.id, 'a', shared=bad_shared)
with self.assertRaisesRegex(UnicodeEncodeError, msg):
_interpreters.run_string(self.id, 'a', shared=bad_shared)
with self.assertRaisesRegex(UnicodeEncodeError, msg):
_interpreters.run_func(self.id, lambda: None, shared=bad_shared)
| CommonTests |
python | getsentry__sentry-python | sentry_sdk/client.py | {
"start": 5282,
"end": 7806
} | class ____:
"""
.. versionadded:: 2.0.0
The basic definition of a client that is used for sending data to Sentry.
"""
spotlight = None # type: Optional[SpotlightClient]
def __init__(self, options=None):
# type: (Optional[Dict[str, Any]]) -> None
self.options = options if options is not None else DEFAULT_OPTIONS # type: Dict[str, Any]
self.transport = None # type: Optional[Transport]
self.monitor = None # type: Optional[Monitor]
self.log_batcher = None # type: Optional[LogBatcher]
self.metrics_batcher = None # type: Optional[MetricsBatcher]
def __getstate__(self, *args, **kwargs):
# type: (*Any, **Any) -> Any
return {"options": {}}
def __setstate__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
pass
@property
def dsn(self):
# type: () -> Optional[str]
return None
@property
def parsed_dsn(self):
# type: () -> Optional[Dsn]
return None
def should_send_default_pii(self):
# type: () -> bool
return False
def is_active(self):
# type: () -> bool
"""
.. versionadded:: 2.0.0
Returns whether the client is active (able to send data to Sentry)
"""
return False
def capture_event(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[str]
return None
def _capture_log(self, log):
# type: (Log) -> None
pass
def _capture_metric(self, metric):
# type: (Metric) -> None
pass
def capture_session(self, *args, **kwargs):
# type: (*Any, **Any) -> None
return None
if TYPE_CHECKING:
@overload
def get_integration(self, name_or_class):
# type: (str) -> Optional[Integration]
...
@overload
def get_integration(self, name_or_class):
# type: (type[I]) -> Optional[I]
...
def get_integration(self, name_or_class):
# type: (Union[str, type[Integration]]) -> Optional[Integration]
return None
def close(self, *args, **kwargs):
# type: (*Any, **Any) -> None
return None
def flush(self, *args, **kwargs):
# type: (*Any, **Any) -> None
return None
def __enter__(self):
# type: () -> BaseClient
return self
def __exit__(self, exc_type, exc_value, tb):
# type: (Any, Any, Any) -> None
return None
| BaseClient |
python | joblib__joblib | joblib/compressor.py | {
"start": 2278,
"end": 3443
} | class ____:
"""A wrapper around a compressor file object.
Attributes
----------
obj: a file-like object
The object must implement the buffer interface and will be used
internally to compress/decompress the data.
prefix: bytestring
A bytestring corresponding to the magic number that identifies the
file format associated to the compressor.
extension: str
The file extension used to automatically select this compressor during
a dump to a file.
"""
def __init__(self, obj, prefix=b"", extension=""):
self.fileobj_factory = obj
self.prefix = prefix
self.extension = extension
def compressor_file(self, fileobj, compresslevel=None):
"""Returns an instance of a compressor file object."""
if compresslevel is None:
return self.fileobj_factory(fileobj, "wb")
else:
return self.fileobj_factory(fileobj, "wb", compresslevel=compresslevel)
def decompressor_file(self, fileobj):
"""Returns an instance of a decompressor file object."""
return self.fileobj_factory(fileobj, "rb")
| CompressorWrapper |
python | huggingface__transformers | src/transformers/models/aria/modeling_aria.py | {
"start": 5819,
"end": 8267
} | class ____(nn.Module):
"""
Aria Projector module.
This module projects vision features into the language model's embedding space, enabling interaction between vision and language components.
Args:
config (`AriaConfig`):
Configuration object for the model.
"""
def __init__(
self,
config: AriaConfig,
):
super().__init__()
self.patch_to_query_dict = config.projector_patch_to_query_dict
self.in_features = config.vision_config.hidden_size
self.num_heads = config.vision_config.num_attention_heads
self.kv_dim = config.vision_config.hidden_size
self.hidden_features = config.text_config.hidden_size
self.output_dim = config.text_config.hidden_size
self.query = nn.Parameter(torch.zeros(config.max_value_projector_patch_to_query_dict, self.in_features))
self.cross_attn = AriaCrossAttention(config)
self.layer_norm = nn.LayerNorm(self.in_features)
self.feed_forward = AriaProjectorMLP(self.in_features, self.hidden_features, self.output_dim)
def forward(self, key_value_states: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
"""
Forward pass of the Projector module.
Args:
key_value_states (`torch.Tensor`):
Input tensor of shape (batch_size, num_patches, kv_dim).
attn_mask (`torch.Tensor`, *optional*, default is None):
Attention mask.
Returns:
`torch.Tensor`: Output tensor of shape (batch_size, query_number, output_dim).
"""
batch_size, num_patches = key_value_states.shape[0], key_value_states.shape[1]
if num_patches not in self.patch_to_query_dict:
raise KeyError(
f"Number of patches {num_patches} not found in patch_to_query_dict amongst possible values {self.patch_to_query_dict.keys()}."
)
query_num = self.patch_to_query_dict[num_patches]
queries = self.query[:query_num].unsqueeze(0).repeat(batch_size, 1, 1)
if attn_mask is not None:
attn_mask = attn_mask.repeat_interleave(self.num_heads, 0)
attn_mask = attn_mask.unsqueeze(1).expand(-1, queries.size(1), -1)
attention_out = self.cross_attn(key_value_states, queries, attn_mask=attn_mask)
out = self.feed_forward(self.layer_norm(attention_out))
return out
| AriaProjector |
python | plotly__plotly.py | plotly/graph_objs/histogram2d/colorbar/_title.py | {
"start": 233,
"end": 3999
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2d.colorbar"
_path_str = "histogram2d.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2d.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.histogram2d.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2d.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2d.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2d.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | pytorch__pytorch | test/package/test_dependency_hooks.py | {
"start": 329,
"end": 3970
} | class ____(PackageTestCase):
"""Dependency management hooks API tests.
- register_mock_hook()
- register_extern_hook()
"""
def test_single_hook(self):
buffer = BytesIO()
my_externs = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
exporter.register_extern_hook(my_extern_hook)
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_externs, {"module_a"})
def test_multiple_extern_hooks(self):
buffer = BytesIO()
my_externs = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
# This also checks ordering, since `remove()` will fail if the value is not in the set.
def my_extern_hook2(package_exporter, module_name):
my_externs.remove(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
exporter.register_extern_hook(my_extern_hook)
exporter.register_extern_hook(my_extern_hook2)
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_externs, set())
def test_multiple_mock_hooks(self):
buffer = BytesIO()
my_mocks = set()
def my_mock_hook(package_exporter, module_name):
my_mocks.add(module_name)
# This also checks ordering, since `remove()` will fail if the value is not in the set.
def my_mock_hook2(package_exporter, module_name):
my_mocks.remove(module_name)
with PackageExporter(buffer) as exporter:
exporter.mock(["package_a.subpackage", "module_a"])
exporter.register_mock_hook(my_mock_hook)
exporter.register_mock_hook(my_mock_hook2)
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_mocks, set())
def test_remove_hooks(self):
buffer = BytesIO()
my_externs = set()
my_externs2 = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
def my_extern_hook2(package_exporter, module_name):
my_externs2.add(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
handle = exporter.register_extern_hook(my_extern_hook)
exporter.register_extern_hook(my_extern_hook2)
handle.remove()
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_externs, set())
self.assertEqual(my_externs2, {"module_a"})
def test_extern_and_mock_hook(self):
buffer = BytesIO()
my_externs = set()
my_mocks = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
def my_mock_hook(package_exporter, module_name):
my_mocks.add(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern("module_a")
exporter.mock("package_a")
exporter.register_extern_hook(my_extern_hook)
exporter.register_mock_hook(my_mock_hook)
exporter.save_source_string("foo", "import module_a; import package_a")
self.assertEqual(my_externs, {"module_a"})
self.assertEqual(my_mocks, {"package_a"})
if __name__ == "__main__":
run_tests()
| TestDependencyHooks |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/run_request.py | {
"start": 1927,
"end": 2595
} | class ____(NamedTuple("_SkipReason", [("skip_message", PublicAttr[Optional[str]])])):
"""Represents a skipped evaluation, where no runs are requested. May contain a message to indicate
why no runs were requested.
Args:
skip_message (Optional[str]): A message displayed in the Dagster UI for why this evaluation resulted
in no requested runs.
"""
def __new__(cls, skip_message: Optional[str] = None):
return super().__new__(
cls,
skip_message=check.opt_str_param(skip_message, "skip_message"),
)
@public
@whitelist_for_serdes(kwargs_fields={"asset_graph_subset"})
@record_custom
| SkipReason |
python | patrys__httmock | httmock.py | {
"start": 3991,
"end": 7776
} | class ____(object):
"""
Acts as a context manager to allow mocking
"""
STATUS_CODE = 200
def __init__(self, *handlers):
self.handlers = handlers
def __enter__(self):
self._real_session_send = requests.Session.send
self._real_session_prepare_request = requests.Session.prepare_request
for handler in self.handlers:
handler_clean_call(handler)
def _fake_send(session, request, **kwargs):
response = self.intercept(request, **kwargs)
if isinstance(response, requests.Response):
# this is pasted from requests to handle redirects properly:
kwargs.setdefault('stream', session.stream)
kwargs.setdefault('verify', session.verify)
kwargs.setdefault('cert', session.cert)
kwargs.setdefault('proxies', session.proxies)
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
timeout = kwargs.get('timeout')
verify = kwargs.get('verify')
cert = kwargs.get('cert')
proxies = kwargs.get('proxies')
gen = session.resolve_redirects(
response,
request,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies)
history = [resp for resp in gen] if allow_redirects else []
if history:
history.insert(0, response)
response = history.pop()
response.history = tuple(history)
session.cookies.update(response.cookies)
return response
return self._real_session_send(session, request, **kwargs)
def _fake_prepare_request(session, request):
"""
Fake this method so the `PreparedRequest` objects contains
an attribute `original` of the original request.
"""
prep = self._real_session_prepare_request(session, request)
prep.original = request
return prep
requests.Session.send = _fake_send
requests.Session.prepare_request = _fake_prepare_request
return self
def __exit__(self, exc_type, exc_val, exc_tb):
requests.Session.send = self._real_session_send
requests.Session.prepare_request = self._real_session_prepare_request
def intercept(self, request, **kwargs):
url = urlparse.urlsplit(request.url)
res = first_of(self.handlers, url, request)
if isinstance(res, requests.Response):
return res
elif isinstance(res, dict):
return response(res.get('status_code'),
res.get('content'),
res.get('headers'),
res.get('reason'),
res.get('elapsed', 0),
request,
stream=kwargs.get('stream', False),
http_vsn=res.get('http_vsn', 11))
elif isinstance(res, (text_type, binary_type)):
return response(content=res, stream=kwargs.get('stream', False))
elif res is None:
return None
else:
raise TypeError(
"Dont know how to handle response of type {0}".format(type(res)))
def with_httmock(*handlers):
mock = HTTMock(*handlers)
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
with mock:
return func(*args, **kwargs)
return inner
return decorator
| HTTMock |
python | jazzband__django-oauth-toolkit | tests/test_oauth2_validators.py | {
"start": 1688,
"end": 17433
} | class ____(TransactionTestCase):
def setUp(self):
self.user = UserModel.objects.create_user("user", "test@example.com", "123456")
self.request = mock.MagicMock(wraps=Request)
self.request.user = self.user
self.request.grant_type = "not client"
self.validator = OAuth2Validator()
self.application = Application.objects.create(
client_id="client_id",
client_secret=CLEARTEXT_SECRET,
user=self.user,
client_type=Application.CLIENT_PUBLIC,
authorization_grant_type=Application.GRANT_PASSWORD,
)
self.request.client = self.application
self.blank_secret_request = mock.MagicMock(wraps=Request)
self.blank_secret_request.user = self.user
self.blank_secret_request.grant_type = "not client"
self.blank_secret_application = Application.objects.create(
client_id="blank_secret_client_id",
client_secret=CLEARTEXT_BLANK_SECRET,
user=self.user,
client_type=Application.CLIENT_PUBLIC,
authorization_grant_type=Application.GRANT_PASSWORD,
)
self.blank_secret_request.client = self.blank_secret_application
def tearDown(self):
self.application.delete()
def test_authenticate_request_body(self):
self.request.client_id = "client_id"
self.assertFalse(self.validator._authenticate_request_body(self.request))
self.request.client_secret = ""
self.assertFalse(self.validator._authenticate_request_body(self.request))
self.request.client_secret = "wrong_client_secret"
self.assertFalse(self.validator._authenticate_request_body(self.request))
self.request.client_secret = CLEARTEXT_SECRET
self.assertTrue(self.validator._authenticate_request_body(self.request))
self.blank_secret_request.client_id = "blank_secret_client_id"
self.assertTrue(self.validator._authenticate_request_body(self.blank_secret_request))
self.blank_secret_request.client_secret = CLEARTEXT_BLANK_SECRET
self.assertTrue(self.validator._authenticate_request_body(self.blank_secret_request))
self.blank_secret_request.client_secret = "wrong_client_secret"
self.assertFalse(self.validator._authenticate_request_body(self.blank_secret_request))
def test_authenticate_request_body_unhashed_secret(self):
self.application.client_secret = CLEARTEXT_SECRET
self.application.hash_client_secret = False
self.application.save()
self.request.client_id = "client_id"
self.request.client_secret = CLEARTEXT_SECRET
self.assertTrue(self.validator._authenticate_request_body(self.request))
self.application.hash_client_secret = True
self.application.save()
def test_extract_basic_auth(self):
self.request.headers = {"HTTP_AUTHORIZATION": "Basic 123456"}
self.assertEqual(self.validator._extract_basic_auth(self.request), "123456")
self.request.headers = {}
self.assertIsNone(self.validator._extract_basic_auth(self.request))
self.request.headers = {"HTTP_AUTHORIZATION": "Dummy 123456"}
self.assertIsNone(self.validator._extract_basic_auth(self.request))
self.request.headers = {"HTTP_AUTHORIZATION": "Basic"}
self.assertIsNone(self.validator._extract_basic_auth(self.request))
self.request.headers = {"HTTP_AUTHORIZATION": "Basic 123456 789"}
self.assertEqual(self.validator._extract_basic_auth(self.request), "123456 789")
def test_authenticate_basic_auth_hashed_secret(self):
self.request.encoding = "utf-8"
self.request.headers = get_basic_auth_header("client_id", CLEARTEXT_SECRET)
self.assertTrue(self.validator._authenticate_basic_auth(self.request))
def test_authenticate_basic_auth_unhashed_secret(self):
self.application.client_secret = CLEARTEXT_SECRET
self.application.hash_client_secret = False
self.application.save()
self.request.encoding = "utf-8"
self.request.headers = get_basic_auth_header("client_id", CLEARTEXT_SECRET)
self.assertTrue(self.validator._authenticate_basic_auth(self.request))
def test_authenticate_basic_auth_default_encoding(self):
self.request.encoding = None
self.request.headers = get_basic_auth_header("client_id", CLEARTEXT_SECRET)
self.assertTrue(self.validator._authenticate_basic_auth(self.request))
def test_authenticate_basic_auth_wrong_client_id(self):
self.request.encoding = "utf-8"
self.request.headers = get_basic_auth_header("wrong_id", CLEARTEXT_SECRET)
self.assertFalse(self.validator._authenticate_basic_auth(self.request))
def test_authenticate_basic_auth_wrong_client_secret(self):
self.request.encoding = "utf-8"
self.request.headers = get_basic_auth_header("client_id", "wrong_secret")
self.assertFalse(self.validator._authenticate_basic_auth(self.request))
def test_authenticate_basic_auth_not_b64_auth_string(self):
self.request.encoding = "utf-8"
# Can"t b64decode
self.request.headers = {"HTTP_AUTHORIZATION": "Basic not_base64"}
self.assertFalse(self.validator._authenticate_basic_auth(self.request))
def test_authenticate_basic_auth_invalid_b64_string(self):
self.request.encoding = "utf-8"
self.request.headers = {"HTTP_AUTHORIZATION": "Basic ZHVtbXk=:ZHVtbXk=\n"}
self.assertFalse(self.validator._authenticate_basic_auth(self.request))
def test_authenticate_basic_auth_not_utf8(self):
self.request.encoding = "utf-8"
# b64decode("test") will become b"\xb5\xeb-", it can"t be decoded as utf-8
self.request.headers = {"HTTP_AUTHORIZATION": "Basic test"}
self.assertFalse(self.validator._authenticate_basic_auth(self.request))
def test_authenticate_basic_auth_public_app_with_device_code(self):
self.request.grant_type = "urn:ietf:params:oauth:grant-type:device_code"
self.request.headers = get_basic_auth_header("client_id", CLEARTEXT_SECRET)
self.application.client_type = Application.CLIENT_PUBLIC
self.assertTrue(self.validator._authenticate_basic_auth(self.request))
def test_authenticate_check_secret(self):
hashed = make_password(CLEARTEXT_SECRET)
self.assertTrue(self.validator._check_secret(CLEARTEXT_SECRET, CLEARTEXT_SECRET))
self.assertTrue(self.validator._check_secret(CLEARTEXT_SECRET, hashed))
self.assertFalse(self.validator._check_secret(hashed, hashed))
self.assertFalse(self.validator._check_secret(hashed, CLEARTEXT_SECRET))
def test_authenticate_client_id(self):
self.assertTrue(self.validator.authenticate_client_id("client_id", self.request))
def test_authenticate_client_id_fail(self):
self.application.client_type = Application.CLIENT_CONFIDENTIAL
self.application.save()
self.assertFalse(self.validator.authenticate_client_id("client_id", self.request))
self.assertFalse(self.validator.authenticate_client_id("fake_client_id", self.request))
def test_client_authentication_required(self):
self.request.headers = {"HTTP_AUTHORIZATION": "Basic 123456"}
self.assertTrue(self.validator.client_authentication_required(self.request))
self.request.headers = {}
self.request.client_id = "client_id"
self.request.client_secret = CLEARTEXT_SECRET
self.assertTrue(self.validator.client_authentication_required(self.request))
self.request.client_secret = ""
self.assertFalse(self.validator.client_authentication_required(self.request))
self.application.client_type = Application.CLIENT_CONFIDENTIAL
self.application.save()
self.request.client = ""
self.assertTrue(self.validator.client_authentication_required(self.request))
def test_load_application_loads_client_id_when_request_has_no_client(self):
self.request.client = None
application = self.validator._load_application("client_id", self.request)
self.assertEqual(application, self.application)
def test_load_application_uses_cached_when_request_has_valid_client_matching_client_id(self):
self.request.client = self.application
application = self.validator._load_application("client_id", self.request)
self.assertIs(application, self.application)
self.assertIs(self.request.client, self.application)
def test_load_application_succeeds_when_request_has_invalid_client_valid_client_id(self):
self.request.client = 'invalid_client'
application = self.validator._load_application("client_id", self.request)
self.assertEqual(application, self.application)
self.assertEqual(self.request.client, self.application)
def test_load_application_overwrites_client_on_client_id_mismatch(self):
another_application = Application.objects.create(
client_id="another_client_id",
client_secret=CLEARTEXT_SECRET,
user=self.user,
client_type=Application.CLIENT_PUBLIC,
authorization_grant_type=Application.GRANT_PASSWORD,
)
self.request.client = another_application
application = self.validator._load_application("client_id", self.request)
self.assertEqual(application, self.application)
self.assertEqual(self.request.client, self.application)
another_application.delete()
@mock.patch.object(Application, "is_usable")
def test_load_application_returns_none_when_client_not_usable_cached(self, mock_is_usable):
mock_is_usable.return_value = False
self.request.client = self.application
application = self.validator._load_application("client_id", self.request)
self.assertIsNone(application)
self.assertIsNone(self.request.client)
@mock.patch.object(Application, "is_usable")
def test_load_application_returns_none_when_client_not_usable_db_lookup(self, mock_is_usable):
mock_is_usable.return_value = False
self.request.client = None
application = self.validator._load_application("client_id", self.request)
self.assertIsNone(application)
self.assertIsNone(self.request.client)
def test_rotate_refresh_token__is_true(self):
self.assertTrue(self.validator.rotate_refresh_token(mock.MagicMock()))
def test_save_bearer_token__without_user__raises_fatal_client(self):
token = {}
with self.assertRaises(FatalClientError):
self.validator.save_bearer_token(token, mock.MagicMock())
def test_save_bearer_token__with_existing_tokens__does_not_create_new_tokens(self):
rotate_token_function = mock.MagicMock()
rotate_token_function.return_value = False
self.validator.rotate_refresh_token = rotate_token_function
access_token = AccessToken.objects.create(
token="123",
user=self.user,
expires=timezone.now() + datetime.timedelta(seconds=60),
application=self.application,
)
refresh_token = RefreshToken.objects.create(
access_token=access_token, token="abc", user=self.user, application=self.application
)
self.request.refresh_token_instance = refresh_token
token = {
"scope": "foo bar",
"refresh_token": "abc",
"access_token": "123",
}
self.assertEqual(1, RefreshToken.objects.count())
self.assertEqual(1, AccessToken.objects.count())
self.validator.save_bearer_token(token, self.request)
self.assertEqual(1, RefreshToken.objects.count())
self.assertEqual(1, AccessToken.objects.count())
def test_save_bearer_token__checks_to_rotate_tokens(self):
rotate_token_function = mock.MagicMock()
rotate_token_function.return_value = False
self.validator.rotate_refresh_token = rotate_token_function
access_token = AccessToken.objects.create(
token="123",
user=self.user,
expires=timezone.now() + datetime.timedelta(seconds=60),
application=self.application,
)
refresh_token = RefreshToken.objects.create(
access_token=access_token, token="abc", user=self.user, application=self.application
)
self.request.refresh_token_instance = refresh_token
token = {
"scope": "foo bar",
"refresh_token": "abc",
"access_token": "123",
}
self.validator.save_bearer_token(token, self.request)
rotate_token_function.assert_called_once_with(self.request)
def test_save_bearer_token__with_new_token__creates_new_tokens(self):
token = {
"scope": "foo bar",
"refresh_token": "abc",
"access_token": "123",
}
self.assertEqual(0, RefreshToken.objects.count())
self.assertEqual(0, AccessToken.objects.count())
self.validator.save_bearer_token(token, self.request)
self.assertEqual(1, RefreshToken.objects.count())
self.assertEqual(1, AccessToken.objects.count())
def test_save_bearer_token__with_new_token_equal_to_existing_token__revokes_old_tokens(self):
access_token = AccessToken.objects.create(
token="123",
user=self.user,
expires=timezone.now() + datetime.timedelta(seconds=60),
application=self.application,
)
refresh_token = RefreshToken.objects.create(
access_token=access_token, token="abc", user=self.user, application=self.application
)
self.request.refresh_token_instance = refresh_token
token = {
"scope": "foo bar",
"refresh_token": "abc",
"access_token": "123",
}
self.assertEqual(1, RefreshToken.objects.count())
self.assertEqual(1, AccessToken.objects.count())
self.validator.save_bearer_token(token, self.request)
self.assertEqual(1, RefreshToken.objects.filter(revoked__isnull=True).count())
self.assertEqual(1, AccessToken.objects.count())
def test_save_bearer_token__with_no_refresh_token__creates_new_access_token_only(self):
token = {
"scope": "foo bar",
"access_token": "123",
}
self.validator.save_bearer_token(token, self.request)
self.assertEqual(0, RefreshToken.objects.count())
self.assertEqual(1, AccessToken.objects.count())
def test_save_bearer_token__with_new_token__calls_methods_to_create_access_and_refresh_tokens(self):
token = {
"scope": "foo bar",
"refresh_token": "abc",
"access_token": "123",
}
# Mock private methods to create access and refresh tokens
create_access_token_mock = mock.MagicMock()
create_refresh_token_mock = mock.MagicMock()
self.validator._create_refresh_token = create_refresh_token_mock
self.validator._create_access_token = create_access_token_mock
self.validator.save_bearer_token(token, self.request)
assert create_access_token_mock.call_count == 1
assert create_refresh_token_mock.call_count == 1
def test_get_or_create_user_from_content(self):
content = {"username": "test_user"}
UserModel.objects.filter(username=content["username"]).delete()
user = self.validator.get_or_create_user_from_content(content)
self.assertIsNotNone(user)
self.assertEqual(content["username"], user.username)
| TestOAuth2Validator |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 680134,
"end": 680619
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "repository", "teams")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
repository = sgqlc.types.Field("Repository", graphql_name="repository")
teams = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null("Team")), graphql_name="teams"
)
| UpdateTeamsRepositoryPayload |
python | apache__airflow | task-sdk/tests/task_sdk/api/test_client.py | {
"start": 54722,
"end": 58990
} | class ____:
def test_add_response(self) -> None:
ti_id = uuid7()
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path in (f"/hitlDetails/{ti_id}"):
return httpx.Response(
status_code=201,
json={
"ti_id": str(ti_id),
"options": ["Approval", "Reject"],
"subject": "This is subject",
"body": "This is body",
"defaults": ["Approval"],
"params": None,
"multiple": False,
},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.hitl.add_response(
ti_id=ti_id,
options=["Approval", "Reject"],
subject="This is subject",
body="This is body",
defaults=["Approval"],
params=None,
multiple=False,
)
assert isinstance(result, HITLDetailRequest)
assert result.ti_id == ti_id
assert result.options == ["Approval", "Reject"]
assert result.subject == "This is subject"
assert result.body == "This is body"
assert result.defaults == ["Approval"]
assert result.params is None
assert result.multiple is False
assert result.assigned_users is None
def test_update_response(self, time_machine: TimeMachineFixture) -> None:
time_machine.move_to(datetime(2025, 7, 3, 0, 0, 0))
ti_id = uuid7()
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path in (f"/hitlDetails/{ti_id}"):
return httpx.Response(
status_code=200,
json={
"chosen_options": ["Approval"],
"params_input": {},
"responded_by_user": {"id": "admin", "name": "admin"},
"response_received": True,
"responded_at": "2025-07-03T00:00:00Z",
},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.hitl.update_response(
ti_id=ti_id,
chosen_options=["Approve"],
params_input={},
)
assert isinstance(result, HITLDetailResponse)
assert result.response_received is True
assert result.chosen_options == ["Approval"]
assert result.params_input == {}
assert result.responded_by_user == HITLUser(id="admin", name="admin")
assert result.responded_at == timezone.datetime(2025, 7, 3, 0, 0, 0)
def test_get_detail_response(self, time_machine: TimeMachineFixture) -> None:
time_machine.move_to(datetime(2025, 7, 3, 0, 0, 0))
ti_id = uuid7()
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path in (f"/hitlDetails/{ti_id}"):
return httpx.Response(
status_code=200,
json={
"chosen_options": ["Approval"],
"params_input": {},
"responded_by_user": {"id": "admin", "name": "admin"},
"response_received": True,
"responded_at": "2025-07-03T00:00:00Z",
},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.hitl.get_detail_response(ti_id=ti_id)
assert isinstance(result, HITLDetailResponse)
assert result.response_received is True
assert result.chosen_options == ["Approval"]
assert result.params_input == {}
assert result.responded_by_user == HITLUser(id="admin", name="admin")
assert result.responded_at == timezone.datetime(2025, 7, 3, 0, 0, 0)
| TestHITLOperations |
python | ApeWorX__ape | src/ape_ethereum/trace.py | {
"start": 21234,
"end": 27441
} | class ____(Trace):
tx: dict
"""
Transaction data. Is a dictionary to allow traces to easily
be created near sending the request.
"""
arguments: list[Any] = []
"""
Remaining eth-call arguments, minus the transaction.
"""
call_trace_approach: TraceApproach = TraceApproach.GETH_STRUCT_LOG_PARSE
"""debug_traceCall must use the struct-log tracer."""
supports_debug_trace_call: Optional[bool] = None
@field_validator("tx", mode="before")
@classmethod
def _tx_to_dict(cls, value):
if isinstance(value, TransactionAPI):
return value.model_dump(by_alias=True)
return value
@property
def raw_trace_frames(self) -> Iterator[dict]:
yield from self._traced_call.get("structLogs", [])
@property
def return_value(self) -> Any:
return self._traced_call.get("returnValue", "")
@cached_property
def _traced_call(self) -> dict:
if self.supports_debug_trace_call is True:
return self._debug_trace_call()
elif self.supports_debug_trace_call is False:
return {}
# else: is None - need to figure out what this node supports.
try:
result = self._debug_trace_call()
except Exception:
self._set_supports_trace_call(False)
return {}
self._set_supports_trace_call(True)
return result
@property
def transaction(self) -> dict:
return self.tx
def get_calltree(self) -> CallTreeNode:
calltree = self._debug_trace_transaction_struct_logs_to_call()
calltree.gas_cost = self._traced_call.get("gas", calltree.gas_cost)
calltree.failed = self._traced_call.get("failed", calltree.failed)
return calltree
def _set_supports_trace_call(self, value: bool):
self.supports_debug_trace_call = value
if hasattr(self.provider, "_supports_debug_trace_call"):
self.provider._supports_debug_trace_call = True
def _debug_trace_call(self):
arguments = [self.transaction, *self.arguments]
# Block ID is required, at least for regular geth nodes.
if len(arguments) == 1:
arguments.append("latest")
return self.provider.make_request("debug_traceCall", arguments)
def parse_rich_tree(call: dict, verbose: bool = False) -> Tree:
tree = _create_tree(call, verbose=verbose)
for event in call.get("events", []):
if "calldata" not in event and "name" not in event:
# Not sure; or not worth showing.
logger.debug(f"Unknown event data: '{event}'.")
continue
event_tree = _create_event_tree(event)
tree.add(event_tree)
for sub_call in call.get("calls", []):
sub_tree = parse_rich_tree(sub_call, verbose=verbose)
tree.add(sub_tree)
return tree
def _events_to_trees(events: list[dict]) -> list[Tree]:
event_counter = defaultdict(list)
for evt in events:
name = evt.get("name")
calldata = evt.get("calldata")
if not name or not calldata:
# Not sure; or not worth showing.
logger.debug(f"Unknown event data: '{evt}'.")
continue
tuple_key = (
name,
",".join(f"{k}={v}" for k, v in calldata.items()),
)
event_counter[tuple_key].append(evt)
result = []
for evt_tup, events in event_counter.items():
count = len(events)
evt = events[0]
if "name" not in evt and "calldata" not in evt:
# Not sure; or not worth showing.
logger.debug(f"Unknown event data: '{evt}'.")
continue
# NOTE: Using similar style to gas-cost on purpose.
suffix = f"[[{TraceStyles.GAS_COST}]x{count}[/]]" if count > 1 else ""
evt_tree = _create_event_tree(evt, suffix=suffix)
result.append(evt_tree)
return result
def _create_event_tree(event: dict, suffix: str = "") -> Tree:
signature = _event_to_str(event, stylize=True, suffix=suffix)
return Tree(signature)
def _call_to_str(call: dict, stylize: bool = False, verbose: bool = False) -> str:
is_create = "CREATE" in call.get("call_type", "")
method = (
"__new__"
if is_create and call["method_id"] and is_0x_prefixed(call["method_id"])
else str(call.get("method_id") or "")
)
contract = str(call.get("contract_id", ""))
signature = prettify_function(
method,
call.get("calldata", ""),
returndata=call.get("returndata", ""),
contract=contract,
stylize=stylize,
is_create=is_create,
)
if call.get("call_type") is not None and call["call_type"].upper() == "DELEGATECALL":
delegate = "(delegate)"
if stylize:
delegate = f"[orange]{delegate}[/]"
signature = f"{delegate} {signature}"
if call.get("value"):
value = str(call["value"])
if stylize:
value = f"[{TraceStyles.VALUE}]{value}[/]"
signature += f" {value}"
if call.get("gas_cost"):
gas_value = f"[{call['gas_cost']} gas]"
if stylize:
gas_value = f"[{TraceStyles.GAS_COST}]{gas_value}[/]"
signature += f" {gas_value}"
if verbose:
verbose_items = {k: v for k, v in call.items() if type(v) in (int, str, bytes, float)}
extra = json.dumps(verbose_items, indent=2)
signature = f"{signature}\n{extra}"
return signature
def _event_to_str(event: dict, stylize: bool = False, suffix: str = "") -> str:
# NOTE: Some of the styles are matching others parts of the trace,
# even though the 'name' is a bit misleading.
event_name = event.get("name", "ANONYMOUS_EVENT")
name = f"[{TraceStyles.METHODS}]{event_name}[/]" if stylize else event_name
arguments_str = prettify_inputs(event.get("calldata", "0x"), stylize=stylize)
prefix = f"[{TraceStyles.CONTRACTS}]log[/]" if stylize else "log"
return f"{prefix} {name}{arguments_str}{suffix}"
def _create_tree(call: dict, verbose: bool = False) -> Tree:
signature = _call_to_str(call, stylize=True, verbose=verbose)
return Tree(signature)
| CallTrace |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 943830,
"end": 944590
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for RepositoryRuleset."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("RepositoryRulesetEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("RepositoryRuleset"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| RepositoryRulesetConnection |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/model_tests/model_handler.py | {
"start": 12622,
"end": 14722
} | class ____(_ModelHandlerBase):
"""Runs a model in TF2."""
@property
def graph_func(self):
try:
return self._graph_func
except:
graph_func = load_graph_func(
saved_model_dir=self.model_config.saved_model_dir,
saved_model_tags=self.model_config.saved_model_tags,
saved_model_signature_key=self.model_config.saved_model_signature_key)
self._graph_func = convert_to_constants.convert_variables_to_constants_v2(
graph_func)
return self._graph_func
@property
def input_tensor_names(self):
return [tensor.name for tensor in self.graph_func.inputs]
@property
def output_tensor_names(self):
return [tensor.name for tensor in self.graph_func.outputs]
def generate_random_inputs(self,
batch_size: Optional[int] = None
) -> Sequence[framework_ops.Tensor]:
batch_size = batch_size or self.model_config.default_batch_size
return [
_generate_random_tensor_v2(tensor, batch_size)
for tensor in self.graph_func.inputs
]
def run(self,
inputs: Optional[Sequence[framework_ops.Tensor]] = None,
warmup_iterations=10,
benchmark_iterations=100,
enable_gpu=True) -> TestResult:
inputs = inputs or self.generate_random_inputs()
try:
device = "/device:gpu:0" if enable_gpu else "/device:cpu:0"
with framework_ops.device(device):
for _ in range(warmup_iterations):
self.graph_func(*inputs)
latency = []
for _ in range(benchmark_iterations):
before = time.time()
outputs = self.graph_func(*inputs)
latency.append(time.time() - before)
except Exception as exc:
raise RuntimeError("Failed to run model inference! "
"Model information: {}".format(str(self))) from exc
return TestResult(
model_config=self.model_config,
enable_gpu=enable_gpu,
model_latency=latency,
output_names=self.output_tensor_names,
output_tensors=outputs)
| ModelHandlerV2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.