language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry-python
|
sentry_sdk/tracing_utils.py
|
{
"start": 1483,
"end": 11670
}
|
class ____(Mapping): # type: ignore
def __init__(
self,
environ, # type: Mapping[str, str]
prefix="HTTP_", # type: str
):
# type: (...) -> None
self.environ = environ
self.prefix = prefix
def __getitem__(self, key):
# type: (str) -> Optional[Any]
return self.environ[self.prefix + key.replace("-", "_").upper()]
def __len__(self):
# type: () -> int
return sum(1 for _ in iter(self))
def __iter__(self):
# type: () -> Generator[str, None, None]
for k in self.environ:
if not isinstance(k, str):
continue
k = k.replace("-", "_").upper()
if not k.startswith(self.prefix):
continue
yield k[len(self.prefix) :]
def has_tracing_enabled(options):
# type: (Optional[Dict[str, Any]]) -> bool
"""
Returns True if either traces_sample_rate or traces_sampler is
defined and enable_tracing is set and not false.
"""
if options is None:
return False
return bool(
options.get("enable_tracing") is not False
and (
options.get("traces_sample_rate") is not None
or options.get("traces_sampler") is not None
)
)
@contextlib.contextmanager
def record_sql_queries(
cursor, # type: Any
query, # type: Any
params_list, # type: Any
paramstyle, # type: Optional[str]
executemany, # type: bool
record_cursor_repr=False, # type: bool
span_origin="manual", # type: str
):
# type: (...) -> Generator[sentry_sdk.tracing.Span, None, None]
# TODO: Bring back capturing of params by default
if sentry_sdk.get_client().options["_experiments"].get("record_sql_params", False):
if not params_list or params_list == [None]:
params_list = None
if paramstyle == "pyformat":
paramstyle = "format"
else:
params_list = None
paramstyle = None
query = _format_sql(cursor, query)
data = {}
if params_list is not None:
data["db.params"] = params_list
if paramstyle is not None:
data["db.paramstyle"] = paramstyle
if executemany:
data["db.executemany"] = True
if record_cursor_repr and cursor is not None:
data["db.cursor"] = cursor
with capture_internal_exceptions():
sentry_sdk.add_breadcrumb(message=query, category="query", data=data)
with sentry_sdk.start_span(
op=OP.DB,
name=query,
origin=span_origin,
) as span:
for k, v in data.items():
span.set_data(k, v)
yield span
def maybe_create_breadcrumbs_from_span(scope, span):
# type: (sentry_sdk.Scope, sentry_sdk.tracing.Span) -> None
if span.op == OP.DB_REDIS:
scope.add_breadcrumb(
message=span.description, type="redis", category="redis", data=span._tags
)
elif span.op == OP.HTTP_CLIENT:
level = None
status_code = span._data.get(SPANDATA.HTTP_STATUS_CODE)
if status_code:
if 500 <= status_code <= 599:
level = "error"
elif 400 <= status_code <= 499:
level = "warning"
if level:
scope.add_breadcrumb(
type="http", category="httplib", data=span._data, level=level
)
else:
scope.add_breadcrumb(type="http", category="httplib", data=span._data)
elif span.op == "subprocess":
scope.add_breadcrumb(
type="subprocess",
category="subprocess",
message=span.description,
data=span._data,
)
def _get_frame_module_abs_path(frame):
# type: (FrameType) -> Optional[str]
try:
return frame.f_code.co_filename
except Exception:
return None
def _should_be_included(
is_sentry_sdk_frame, # type: bool
namespace, # type: Optional[str]
in_app_include, # type: Optional[list[str]]
in_app_exclude, # type: Optional[list[str]]
abs_path, # type: Optional[str]
project_root, # type: Optional[str]
):
# type: (...) -> bool
# in_app_include takes precedence over in_app_exclude
should_be_included = _module_in_list(namespace, in_app_include)
should_be_excluded = _is_external_source(abs_path) or _module_in_list(
namespace, in_app_exclude
)
return not is_sentry_sdk_frame and (
should_be_included
or (_is_in_project_root(abs_path, project_root) and not should_be_excluded)
)
def add_source(span, project_root, in_app_include, in_app_exclude):
# type: (sentry_sdk.tracing.Span, Optional[str], Optional[list[str]], Optional[list[str]]) -> None
"""
Adds OTel compatible source code information to the span
"""
# Find the correct frame
frame = sys._getframe() # type: Union[FrameType, None]
while frame is not None:
abs_path = _get_frame_module_abs_path(frame)
try:
namespace = frame.f_globals.get("__name__") # type: Optional[str]
except Exception:
namespace = None
is_sentry_sdk_frame = namespace is not None and namespace.startswith(
"sentry_sdk."
)
should_be_included = _should_be_included(
is_sentry_sdk_frame=is_sentry_sdk_frame,
namespace=namespace,
in_app_include=in_app_include,
in_app_exclude=in_app_exclude,
abs_path=abs_path,
project_root=project_root,
)
if should_be_included:
break
frame = frame.f_back
else:
frame = None
# Set the data
if frame is not None:
try:
lineno = frame.f_lineno
except Exception:
lineno = None
if lineno is not None:
span.set_data(SPANDATA.CODE_LINENO, frame.f_lineno)
try:
namespace = frame.f_globals.get("__name__")
except Exception:
namespace = None
if namespace is not None:
span.set_data(SPANDATA.CODE_NAMESPACE, namespace)
filepath = _get_frame_module_abs_path(frame)
if filepath is not None:
if namespace is not None:
in_app_path = filename_for_module(namespace, filepath)
elif project_root is not None and filepath.startswith(project_root):
in_app_path = filepath.replace(project_root, "").lstrip(os.sep)
else:
in_app_path = filepath
span.set_data(SPANDATA.CODE_FILEPATH, in_app_path)
try:
code_function = frame.f_code.co_name
except Exception:
code_function = None
if code_function is not None:
span.set_data(SPANDATA.CODE_FUNCTION, frame.f_code.co_name)
def add_query_source(span):
# type: (sentry_sdk.tracing.Span) -> None
"""
Adds OTel compatible source code information to a database query span
"""
client = sentry_sdk.get_client()
if not client.is_active():
return
if span.timestamp is None or span.start_timestamp is None:
return
should_add_query_source = client.options.get("enable_db_query_source", True)
if not should_add_query_source:
return
duration = span.timestamp - span.start_timestamp
threshold = client.options.get("db_query_source_threshold_ms", 0)
slow_query = duration / timedelta(milliseconds=1) > threshold
if not slow_query:
return
add_source(
span=span,
project_root=client.options["project_root"],
in_app_include=client.options.get("in_app_include"),
in_app_exclude=client.options.get("in_app_exclude"),
)
def add_http_request_source(span):
# type: (sentry_sdk.tracing.Span) -> None
"""
Adds OTel compatible source code information to a span for an outgoing HTTP request
"""
client = sentry_sdk.get_client()
if not client.is_active():
return
if span.timestamp is None or span.start_timestamp is None:
return
should_add_request_source = client.options.get("enable_http_request_source", True)
if not should_add_request_source:
return
duration = span.timestamp - span.start_timestamp
threshold = client.options.get("http_request_source_threshold_ms", 0)
slow_query = duration / timedelta(milliseconds=1) > threshold
if not slow_query:
return
add_source(
span=span,
project_root=client.options["project_root"],
in_app_include=client.options.get("in_app_include"),
in_app_exclude=client.options.get("in_app_exclude"),
)
def extract_sentrytrace_data(header):
# type: (Optional[str]) -> Optional[Dict[str, Union[str, bool, None]]]
"""
Given a `sentry-trace` header string, return a dictionary of data.
"""
if not header:
return None
if header.startswith("00-") and header.endswith("-00"):
header = header[3:-3]
match = SENTRY_TRACE_REGEX.match(header)
if not match:
return None
trace_id, parent_span_id, sampled_str = match.groups()
parent_sampled = None
if trace_id:
trace_id = "{:032x}".format(int(trace_id, 16))
if parent_span_id:
parent_span_id = "{:016x}".format(int(parent_span_id, 16))
if sampled_str:
parent_sampled = sampled_str != "0"
return {
"trace_id": trace_id,
"parent_span_id": parent_span_id,
"parent_sampled": parent_sampled,
}
def _format_sql(cursor, sql):
# type: (Any, str) -> Optional[str]
real_sql = None
# If we're using psycopg2, it could be that we're
# looking at a query that uses Composed objects. Use psycopg2's mogrify
# function to format the query. We lose per-parameter trimming but gain
# accuracy in formatting.
try:
if hasattr(cursor, "mogrify"):
real_sql = cursor.mogrify(sql)
if isinstance(real_sql, bytes):
real_sql = real_sql.decode(cursor.connection.encoding)
except Exception:
real_sql = None
return real_sql or to_string(sql)
|
EnvironHeaders
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/nn_ops/softsign_op_test.py
|
{
"start": 1110,
"end": 3064
}
|
class ____(test.TestCase):
def _npSoftsign(self, np_features):
return np_features / (1 + np.abs(np_features))
def _testSoftsign(self, np_features, atol, rtol, use_gpu=False):
np_softsign = self._npSoftsign(np_features)
with self.cached_session(use_gpu=use_gpu):
softsign = nn_ops.softsign(np_features)
tf_softsign = self.evaluate(softsign)
self.assertAllClose(np_softsign, tf_softsign, rtol=rtol, atol=atol)
self.assertShapeEqual(np_softsign, softsign)
def testNumbers(self):
for t, atol, rtol in [
(np.float16, 1e-6, 1e-6),
(np.float32, 1e-6, 1e-6),
(np.float64, 1e-6, 1e-6),
(dtypes.bfloat16.as_numpy_dtype, 1e-2, 1e-2),
]:
self._testSoftsign(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
atol,
rtol,
use_gpu=False)
self._testSoftsign(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
atol,
rtol,
use_gpu=True)
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softsign(x, name="softsign")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softsign (float) gradient err = ", err)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testNoInts(self):
with self.cached_session():
with self.assertRaisesRegex(
TypeError,
"'features' has DataType int32 not in list of allowed values"):
nn_ops.softsign(constant_op.constant(7)).eval()
if __name__ == "__main__":
test.main()
|
SoftsignTest
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/trainer/trainer.py
|
{
"start": 4189,
"end": 79058
}
|
class ____:
@_defaults_from_env_vars
def __init__(
self,
*,
accelerator: Union[str, Accelerator] = "auto",
strategy: Union[str, Strategy] = "auto",
devices: Union[list[int], str, int] = "auto",
num_nodes: int = 1,
precision: Optional[_PRECISION_INPUT] = None,
logger: Optional[Union[Logger, Iterable[Logger], bool]] = None,
callbacks: Optional[Union[list[Callback], Callback]] = None,
fast_dev_run: Union[int, bool] = False,
max_epochs: Optional[int] = None,
min_epochs: Optional[int] = None,
max_steps: int = -1,
min_steps: Optional[int] = None,
max_time: Optional[Union[str, timedelta, dict[str, int]]] = None,
limit_train_batches: Optional[Union[int, float]] = None,
limit_val_batches: Optional[Union[int, float]] = None,
limit_test_batches: Optional[Union[int, float]] = None,
limit_predict_batches: Optional[Union[int, float]] = None,
overfit_batches: Union[int, float] = 0.0,
val_check_interval: Optional[Union[int, float, str, timedelta, dict[str, int]]] = None,
check_val_every_n_epoch: Optional[int] = 1,
num_sanity_val_steps: Optional[int] = None,
log_every_n_steps: Optional[int] = None,
enable_checkpointing: Optional[bool] = None,
enable_progress_bar: Optional[bool] = None,
enable_model_summary: Optional[bool] = None,
accumulate_grad_batches: int = 1,
gradient_clip_val: Optional[Union[int, float]] = None,
gradient_clip_algorithm: Optional[str] = None,
deterministic: Optional[Union[bool, _LITERAL_WARN]] = None,
benchmark: Optional[bool] = None,
inference_mode: bool = True,
use_distributed_sampler: bool = True,
profiler: Optional[Union[Profiler, str]] = None,
detect_anomaly: bool = False,
barebones: bool = False,
plugins: Optional[Union[_PLUGIN_INPUT, list[_PLUGIN_INPUT]]] = None,
sync_batchnorm: bool = False,
reload_dataloaders_every_n_epochs: int = 0,
default_root_dir: Optional[_PATH] = None,
enable_autolog_hparams: bool = True,
model_registry: Optional[str] = None,
) -> None:
r"""Customize every aspect of training via flags.
Args:
accelerator: Supports passing different accelerator types ("cpu", "gpu", "tpu", "hpu", "mps", "auto")
as well as custom accelerator instances.
strategy: Supports different training strategies with aliases as well custom strategies.
Default: ``"auto"``.
devices: The devices to use. Can be set to a positive number (int or str), a sequence of device indices
(list or str), the value ``-1`` to indicate all available devices should be used, or ``"auto"`` for
automatic selection based on the chosen accelerator. Default: ``"auto"``.
num_nodes: Number of GPU nodes for distributed training.
Default: ``1``.
precision: Double precision (64, '64' or '64-true'), full precision (32, '32' or '32-true'),
16bit mixed precision (16, '16', '16-mixed') or bfloat16 mixed precision ('bf16', 'bf16-mixed').
Can be used on CPU, GPU, TPUs, or HPUs.
Default: ``'32-true'``.
logger: Logger (or iterable collection of loggers) for experiment tracking. A ``True`` value uses
the default ``TensorBoardLogger`` if it is installed, otherwise ``CSVLogger``.
``False`` will disable logging. If multiple loggers are provided, local files
(checkpoints, profiler traces, etc.) are saved in the ``log_dir`` of the first logger.
Default: ``True``.
callbacks: Add a callback or list of callbacks.
Default: ``None``.
fast_dev_run: Runs n if set to ``n`` (int) else 1 if set to ``True`` batch(es)
of train, val and test to find any bugs (ie: a sort of unit test).
Default: ``False``.
max_epochs: Stop training once this number of epochs is reached. Disabled by default (None).
If both max_epochs and max_steps are not specified, defaults to ``max_epochs = 1000``.
To enable infinite training, set ``max_epochs = -1``.
min_epochs: Force training for at least these many epochs. Disabled by default (None).
max_steps: Stop training after this number of steps. Disabled by default (-1). If ``max_steps = -1``
and ``max_epochs = None``, will default to ``max_epochs = 1000``. To enable infinite training, set
``max_epochs`` to ``-1``.
min_steps: Force training for at least these number of steps. Disabled by default (``None``).
max_time: Stop training after this amount of time has passed. Disabled by default (``None``).
The time duration can be specified in the format DD:HH:MM:SS (days, hours, minutes seconds), as a
:class:`datetime.timedelta`, or a dictionary with keys that will be passed to
:class:`datetime.timedelta`.
limit_train_batches: How much of training dataset to check (float = fraction, int = num_batches).
Value is per device. Default: ``1.0``.
limit_val_batches: How much of validation dataset to check (float = fraction, int = num_batches).
Value is per device. Default: ``1.0``.
limit_test_batches: How much of test dataset to check (float = fraction, int = num_batches).
Value is per device. Default: ``1.0``.
limit_predict_batches: How much of prediction dataset to check (float = fraction, int = num_batches).
Value is per device. Default: ``1.0``.
overfit_batches: Overfit a fraction of training/validation data (float) or a set number of batches (int).
Default: ``0.0``.
val_check_interval: How often to check the validation set. Pass a ``float`` in the range [0.0, 1.0] to check
after a fraction of the training epoch. Pass an ``int`` to check after a fixed number of training
batches. An ``int`` value can only be higher than the number of training batches when
``check_val_every_n_epoch=None``, which validates after every ``N`` training batches
across epochs or during iteration-based training. Additionally, accepts a time-based duration
as a string "DD:HH:MM:SS", a :class:`datetime.timedelta`, or a dict of kwargs to
:class:`datetime.timedelta`. When time-based, validation triggers once the elapsed wall-clock time
since the last validation exceeds the interval; the check occurs after the current batch
completes, the validation loop runs, and the timer is reset.
Default: ``1.0``.
check_val_every_n_epoch: Perform a validation loop after every `N` training epochs. If ``None``,
validation will be done solely based on the number of training batches, requiring ``val_check_interval``
to be an integer value. When used together with a time-based ``val_check_interval`` and
``check_val_every_n_epoch`` > 1, validation is aligned to epoch multiples: if the interval elapses
before the next multiple-N epoch, validation runs at the start of that epoch (after the first batch)
and the timer resets; if it elapses during a multiple-N epoch, validation runs after the current batch.
For ``None`` or ``1`` cases, the time-based behavior of ``val_check_interval`` applies without
additional alignment.
Default: ``1``.
num_sanity_val_steps: Sanity check runs n validation batches before starting the training routine.
Set it to `-1` to run all batches in all validation dataloaders.
Default: ``2``.
log_every_n_steps: How often to log within steps.
Default: ``50``.
enable_checkpointing: If ``True``, enable checkpointing.
It will configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint in
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.callbacks`.
Default: ``True``.
enable_progress_bar: Whether to enable to progress bar by default.
Default: ``True``.
enable_model_summary: Whether to enable model summarization by default.
Default: ``True``.
accumulate_grad_batches: Accumulates gradients over k batches before stepping the optimizer.
Default: 1.
gradient_clip_val: The value at which to clip gradients. Passing ``gradient_clip_val=None`` disables
gradient clipping. If using Automatic Mixed Precision (AMP), the gradients will be unscaled before.
Default: ``None``.
gradient_clip_algorithm: The gradient clipping algorithm to use. Pass ``gradient_clip_algorithm="value"``
to clip by value, and ``gradient_clip_algorithm="norm"`` to clip by norm. By default it will
be set to ``"norm"``.
deterministic: If ``True``, sets whether PyTorch operations must use deterministic algorithms.
Set to ``"warn"`` to use deterministic algorithms whenever possible, throwing warnings on operations
that don't support deterministic mode. If not set, defaults to ``False``. Default: ``None``.
benchmark: The value (``True`` or ``False``) to set ``torch.backends.cudnn.benchmark`` to.
The value for ``torch.backends.cudnn.benchmark`` set in the current session will be used
(``False`` if not manually set). If :paramref:`~lightning.pytorch.trainer.trainer.Trainer.deterministic`
is set to ``True``, this will default to ``False``. Override to manually set a different value.
Default: ``None``.
inference_mode: Whether to use :func:`torch.inference_mode` or :func:`torch.no_grad` during
evaluation (``validate``/``test``/``predict``).
use_distributed_sampler: Whether to wrap the DataLoader's sampler with
:class:`torch.utils.data.DistributedSampler`. If not specified this is toggled automatically for
strategies that require it. By default, it will add ``shuffle=True`` for the train sampler and
``shuffle=False`` for validation/test/predict samplers. If you want to disable this logic, you can pass
``False`` and add your own distributed sampler in the dataloader hooks. If ``True`` and a distributed
sampler was already added, Lightning will not replace the existing one. For iterable-style datasets,
we don't do this automatically.
profiler: To profile individual steps during training and assist in identifying bottlenecks.
Default: ``None``.
detect_anomaly: Enable anomaly detection for the autograd engine.
Default: ``False``.
barebones: Whether to run in "barebones mode", where all features that may impact raw speed are
disabled. This is meant for analyzing the Trainer overhead and is discouraged during regular training
runs. The following features are deactivated:
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.enable_checkpointing`,
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.logger`,
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.enable_progress_bar`,
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.log_every_n_steps`,
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.enable_model_summary`,
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.num_sanity_val_steps`,
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.fast_dev_run`,
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.detect_anomaly`,
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.profiler`,
:meth:`~lightning.pytorch.core.LightningModule.log`,
:meth:`~lightning.pytorch.core.LightningModule.log_dict`.
plugins: Plugins allow modification of core behavior like ddp and amp, and enable custom lightning plugins.
Default: ``None``.
sync_batchnorm: Synchronize batch norm layers between process groups/whole world.
Default: ``False``.
reload_dataloaders_every_n_epochs: Set to a positive integer to reload dataloaders every n epochs.
Default: ``0``.
default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed.
Default: ``os.getcwd()``.
Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'
enable_autolog_hparams: Whether to log hyperparameters at the start of a run.
Default: ``True``.
model_registry: The name of the model being uploaded to Model hub.
Raises:
TypeError:
If ``gradient_clip_val`` is not an int or float.
MisconfigurationException:
If ``gradient_clip_algorithm`` is invalid.
"""
super().__init__()
log.debug(f"{self.__class__.__name__}: Initializing trainer with parameters: {locals()}")
if default_root_dir is not None:
default_root_dir = os.fspath(default_root_dir)
# remove version if accidentally passed
self._model_registry = model_registry.split(":")[0] if model_registry else None
self.barebones = barebones
if barebones:
# opt-outs
if enable_checkpointing:
raise ValueError(
f"`Trainer(barebones=True, enable_checkpointing={enable_checkpointing!r})` was passed."
" Checkpointing can impact raw speed so it is disabled in barebones mode."
)
enable_checkpointing = False
if logger is not None and logger is not False:
raise ValueError(
f"`Trainer(barebones=True, logger={logger!r})` was passed."
" Logging can impact raw speed so it is disabled in barebones mode."
)
logger = False
if enable_progress_bar:
raise ValueError(
f"`Trainer(barebones=True, enable_progress_bar={enable_progress_bar!r})` was passed."
" The progress bar can impact raw speed so it is disabled in barebones mode."
)
enable_progress_bar = False
if log_every_n_steps is not None and log_every_n_steps != 0:
raise ValueError(
f"`Trainer(barebones=True, log_every_n_steps={log_every_n_steps!r})` was passed."
" Logging can impact raw speed so it is disabled in barebones mode."
)
log_every_n_steps = 0
if enable_model_summary:
raise ValueError(
f"`Trainer(barebones=True, enable_model_summary={enable_model_summary!r})` was passed."
" Model summary can impact raw speed so it is disabled in barebones mode."
)
enable_model_summary = False
if num_sanity_val_steps is not None and num_sanity_val_steps != 0:
raise ValueError(
f"`Trainer(barebones=True, num_sanity_val_steps={num_sanity_val_steps!r})` was passed."
" Sanity checking can impact raw speed so it is disabled in barebones mode."
)
num_sanity_val_steps = 0
# opt-ins
if fast_dev_run is not False and fast_dev_run != 0:
raise ValueError(
f"`Trainer(barebones=True, fast_dev_run={fast_dev_run!r})` was passed."
" Development run is not meant for raw speed evaluation so it is disabled in barebones mode."
)
if detect_anomaly:
raise ValueError(
f"`Trainer(barebones=True, detect_anomaly={detect_anomaly!r})` was passed."
" Anomaly detection can impact raw speed so it is disabled in barebones mode."
)
if profiler is not None:
raise ValueError(
f"`Trainer(barebones=True, profiler={profiler!r})` was passed."
" Profiling can impact raw speed so it is disabled in barebones mode."
)
deactivated = (
" - Checkpointing: `Trainer(enable_checkpointing=True)`",
" - Progress bar: `Trainer(enable_progress_bar=True)`",
" - Model summary: `Trainer(enable_model_summary=True)`",
" - Logging: `Trainer(logger=True)`, `Trainer(log_every_n_steps>0)`,"
" `LightningModule.log(...)`, `LightningModule.log_dict(...)`",
" - Sanity checking: `Trainer(num_sanity_val_steps>0)`",
" - Development run: `Trainer(fast_dev_run=True)`",
" - Anomaly detection: `Trainer(detect_anomaly=True)`",
" - Profiling: `Trainer(profiler=...)`",
)
rank_zero_info(
"You are running in `Trainer(barebones=True)` mode. All features that may impact raw speed have been"
" disabled to facilitate analyzing the Trainer overhead. Specifically, the following features are"
f" deactivated:{os.linesep}{os.linesep.join(deactivated)}"
)
else:
# set the opt-out defaults
if enable_checkpointing is None:
enable_checkpointing = True
if logger is None:
logger = True
if enable_progress_bar is None:
enable_progress_bar = True
if log_every_n_steps is None:
log_every_n_steps = 50
if enable_model_summary is None:
enable_model_summary = True
if num_sanity_val_steps is None:
num_sanity_val_steps = 2
# init connectors
self._data_connector = _DataConnector(self)
self._accelerator_connector = _AcceleratorConnector(
devices=devices,
accelerator=accelerator,
strategy=strategy,
num_nodes=num_nodes,
sync_batchnorm=sync_batchnorm,
benchmark=benchmark,
use_distributed_sampler=use_distributed_sampler,
deterministic=deterministic,
precision=precision,
plugins=plugins,
)
self._logger_connector = _LoggerConnector(self)
self._callback_connector = _CallbackConnector(self)
self._checkpoint_connector = _CheckpointConnector(self)
self._signal_connector = _SignalConnector(self)
# init loops
self.fit_loop = _FitLoop(self, min_epochs=min_epochs, max_epochs=max_epochs)
self.fit_loop.epoch_loop = _TrainingEpochLoop(self, min_steps=min_steps, max_steps=max_steps)
self.validate_loop = _EvaluationLoop(
self, TrainerFn.VALIDATING, RunningStage.VALIDATING, inference_mode=inference_mode
)
self.test_loop = _EvaluationLoop(self, TrainerFn.TESTING, RunningStage.TESTING, inference_mode=inference_mode)
self.predict_loop = _PredictionLoop(self, inference_mode=inference_mode)
self.accumulate_grad_batches = accumulate_grad_batches
# init callbacks
# Declare attributes to be set in _callback_connector on_trainer_init
self._callback_connector.on_trainer_init(
callbacks,
enable_checkpointing,
enable_progress_bar,
default_root_dir,
enable_model_summary,
max_time,
)
# init data flags
self.check_val_every_n_epoch: Optional[int]
self._data_connector.on_trainer_init(
val_check_interval,
reload_dataloaders_every_n_epochs,
check_val_every_n_epoch,
)
# gradient clipping
if gradient_clip_val is not None and not isinstance(gradient_clip_val, (int, float)):
raise TypeError(f"`gradient_clip_val` should be an int or a float. Got {gradient_clip_val}.")
if gradient_clip_algorithm is not None and not GradClipAlgorithmType.supported_type(
gradient_clip_algorithm.lower()
):
raise MisconfigurationException(
f"`gradient_clip_algorithm` {gradient_clip_algorithm} is invalid. "
f"Allowed algorithms: {GradClipAlgorithmType.supported_types()}."
)
self.gradient_clip_val: Optional[Union[int, float]] = gradient_clip_val
self.gradient_clip_algorithm: Optional[GradClipAlgorithmType] = (
GradClipAlgorithmType(gradient_clip_algorithm.lower()) if gradient_clip_algorithm is not None else None
)
if detect_anomaly:
rank_zero_info(
"You have turned on `Trainer(detect_anomaly=True)`. This will significantly slow down compute speed and"
" is recommended only for model debugging."
)
self._detect_anomaly: bool = detect_anomaly
setup._log_device_info(self)
self.should_stop = False
self.state = TrainerState()
# configure profiler
setup._init_profiler(self, profiler)
# init logger flags
self._loggers: list[Logger]
self._logger_connector.on_trainer_init(logger, log_every_n_steps)
# init debugging flags
self.val_check_batch: Optional[Union[int, float]] = None
self.val_check_interval: Union[int, float]
self.num_sanity_val_steps: Union[int, float]
self.limit_train_batches: Union[int, float]
self.limit_val_batches: Union[int, float]
self.limit_test_batches: Union[int, float]
self.limit_predict_batches: Union[int, float]
setup._init_debugging_flags(
self,
limit_train_batches,
limit_val_batches,
limit_test_batches,
limit_predict_batches,
fast_dev_run,
overfit_batches,
val_check_interval,
num_sanity_val_steps,
)
self.enable_autolog_hparams = enable_autolog_hparams
def fit(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
ckpt_path: Optional[_PATH] = None,
weights_only: Optional[bool] = None,
) -> None:
r"""Runs the full optimization routine.
Args:
model: Model to fit.
train_dataloaders: An iterable or collection of iterables specifying training samples.
Alternatively, a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
the :class:`~lightning.pytorch.core.hooks.DataHooks.train_dataloader` hook.
val_dataloaders: An iterable or collection of iterables specifying validation samples.
datamodule: A :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
the :class:`~lightning.pytorch.core.hooks.DataHooks.train_dataloader` hook.
ckpt_path: Path/URL of the checkpoint from which training is resumed. Could also be one of three special
keywords ``"last"``, ``"hpc"`` and ``"registry"``.
Otherwise, if there is no checkpoint file at the path, an exception is raised.
- best: the best model checkpoint from the previous ``trainer.fit`` call will be loaded
- last: the last model checkpoint from the previous ``trainer.fit`` call will be loaded
- registry: the model will be downloaded from the Lightning Model Registry with following notations:
- ``'registry'``: uses the latest/default version of default model set
with ``Trainer(..., model_registry="my-model")``
- ``'registry:model-name'``: uses the latest/default version of this model `model-name`
- ``'registry:model-name:version:v2'``: uses the specific version 'v2' of the model `model-name`
- ``'registry:version:v2'``: uses the default model set
with ``Trainer(..., model_registry="my-model")`` and version 'v2'
weights_only: Defaults to ``None``. If ``True``, restricts loading to ``state_dicts`` of plain
``torch.Tensor`` and other primitive types. If loading a checkpoint from a trusted source that contains
an ``nn.Module``, use ``weights_only=False``. If loading checkpoint from an untrusted source, we
recommend using ``weights_only=True``. For more information, please refer to the
`PyTorch Developer Notes on Serialization Semantics <https://docs.pytorch.org/docs/main/notes/serialization.html#id3>`_.
For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.
:rtype: :py:obj:`None`
Raises:
TypeError:
If ``model`` is not :class:`~lightning.pytorch.core.LightningModule` for torch version less than
2.0.0 and if ``model`` is not :class:`~lightning.pytorch.core.LightningModule` or
:class:`torch._dynamo.OptimizedModule` for torch versions greater than or equal to 2.0.0 .
"""
model = _maybe_unwrap_optimized(model)
self.strategy._lightning_module = model
_verify_strategy_supports_compile(model, self.strategy)
self.state.fn = TrainerFn.FITTING
self.state.status = TrainerStatus.RUNNING
self.training = True
self.should_stop = False
call._call_and_handle_interrupt(
self,
self._fit_impl,
model,
train_dataloaders,
val_dataloaders,
datamodule,
ckpt_path,
weights_only,
)
def _fit_impl(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
ckpt_path: Optional[_PATH] = None,
weights_only: Optional[bool] = None,
) -> None:
log.debug(f"{self.__class__.__name__}: trainer fit stage")
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(train_dataloaders, LightningDataModule):
datamodule = train_dataloaders
train_dataloaders = None
# If you supply a datamodule you can't supply train_dataloader or val_dataloaders
if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:
raise MisconfigurationException(
"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.fit(datamodule=...)`"
)
# links data to the trainer
self._data_connector.attach_data(
model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule
)
assert self.state.fn is not None
if _is_registry(ckpt_path) and module_available("litmodels"):
download_model_from_registry(ckpt_path, self)
ckpt_path = self._checkpoint_connector._select_ckpt_path(
self.state.fn,
ckpt_path,
model_provided=True,
model_connected=self.lightning_module is not None,
)
self._run(model, ckpt_path=ckpt_path, weights_only=weights_only)
assert self.state.stopped
self.training = False
return
def validate(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[_PATH] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
weights_only: Optional[bool] = None,
) -> _EVALUATE_OUTPUT:
r"""Perform one evaluation epoch over the validation set.
Args:
model: The model to validate.
dataloaders: An iterable or collection of iterables specifying validation samples.
Alternatively, a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
the :class:`~lightning.pytorch.core.hooks.DataHooks.val_dataloader` hook.
ckpt_path: Either ``"best"``, ``"last"``, ``"hpc"``, ``"registry"`` or path to the checkpoint you wish
to validate. If ``None`` and the model instance was passed, use the current weights.
Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
if a checkpoint callback is configured.
verbose: If True, prints the validation results.
datamodule: A :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
the :class:`~lightning.pytorch.core.hooks.DataHooks.val_dataloader` hook.
weights_only: Defaults to ``None``. If ``True``, restricts loading to ``state_dicts`` of plain
``torch.Tensor`` and other primitive types. If loading a checkpoint from a trusted source that contains
an ``nn.Module``, use ``weights_only=False``. If loading checkpoint from an untrusted source, we
recommend using ``weights_only=True``. For more information, please refer to the
`PyTorch Developer Notes on Serialization Semantics <https://docs.pytorch.org/docs/main/notes/serialization.html#id3>`_.
For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.
Returns:
List of dictionaries with metrics logged during the validation phase, e.g., in model- or callback hooks
like :meth:`~lightning.pytorch.LightningModule.validation_step` etc.
The length of the list corresponds to the number of validation dataloaders used.
Raises:
TypeError:
If no ``model`` is passed and there was no ``LightningModule`` passed in the previous run.
If ``model`` passed is not `LightningModule` or `torch._dynamo.OptimizedModule`.
MisconfigurationException:
If both ``dataloaders`` and ``datamodule`` are passed. Pass only one of these.
RuntimeError:
If a compiled ``model`` is passed and the strategy is not supported.
"""
if model is None:
# do we still have a reference from a previous call?
if self.lightning_module is None:
raise TypeError(
"`Trainer.validate()` requires a `LightningModule` when it hasn't been passed in a previous run"
)
else:
model = _maybe_unwrap_optimized(model)
self.strategy._lightning_module = model
_verify_strategy_supports_compile(self.lightning_module, self.strategy)
self.state.fn = TrainerFn.VALIDATING
self.state.status = TrainerStatus.RUNNING
self.validating = True
return call._call_and_handle_interrupt(
self, self._validate_impl, model, dataloaders, ckpt_path, verbose, datamodule, weights_only
)
def _validate_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[_PATH] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
weights_only: Optional[bool] = None,
) -> Optional[Union[_PREDICT_OUTPUT, _EVALUATE_OUTPUT]]:
# --------------------
# SETUP HOOK
# --------------------
log.debug(f"{self.__class__.__name__}: trainer validate stage")
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
# If you supply a datamodule you can't supply val_dataloaders
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.validate(dataloaders=..., datamodule=...)`")
if model is None:
model = self.lightning_module
model_provided = False
else:
model_provided = True
self.validate_loop.verbose = verbose
# links data to the trainer
self._data_connector.attach_data(model, val_dataloaders=dataloaders, datamodule=datamodule)
assert self.state.fn is not None
if _is_registry(ckpt_path) and module_available("litmodels"):
download_model_from_registry(ckpt_path, self)
ckpt_path = self._checkpoint_connector._select_ckpt_path(
self.state.fn, ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
results = self._run(model, ckpt_path=ckpt_path, weights_only=weights_only)
# remove the tensors from the validation results
results = convert_tensors_to_scalars(results)
assert self.state.stopped
self.validating = False
return results
def test(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[_PATH] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
weights_only: Optional[bool] = None,
) -> _EVALUATE_OUTPUT:
r"""Perform one evaluation epoch over the test set. It's separated from fit to make sure you never run on your
test set until you want to.
Args:
model: The model to test.
dataloaders: An iterable or collection of iterables specifying test samples.
Alternatively, a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
the :class:`~lightning.pytorch.core.hooks.DataHooks.test_dataloader` hook.
ckpt_path: Either ``"best"``, ``"last"``, ``"hpc"``, ``"registry"`` or path to the checkpoint you wish
to test. If ``None`` and the model instance was passed, use the current weights.
Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
if a checkpoint callback is configured.
verbose: If True, prints the test results.
datamodule: A :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
the :class:`~lightning.pytorch.core.hooks.DataHooks.test_dataloader` hook.
weights_only: Defaults to ``None``. If ``True``, restricts loading to ``state_dicts`` of plain
``torch.Tensor`` and other primitive types. If loading a checkpoint from a trusted source that contains
an ``nn.Module``, use ``weights_only=False``. If loading checkpoint from an untrusted source, we
recommend using ``weights_only=True``. For more information, please refer to the
`PyTorch Developer Notes on Serialization Semantics <https://docs.pytorch.org/docs/main/notes/serialization.html#id3>`_.
For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.
Returns:
List of dictionaries with metrics logged during the test phase, e.g., in model- or callback hooks
like :meth:`~lightning.pytorch.LightningModule.test_step` etc.
The length of the list corresponds to the number of test dataloaders used.
Raises:
TypeError:
If no ``model`` is passed and there was no ``LightningModule`` passed in the previous run.
If ``model`` passed is not `LightningModule` or `torch._dynamo.OptimizedModule`.
MisconfigurationException:
If both ``dataloaders`` and ``datamodule`` are passed. Pass only one of these.
RuntimeError:
If a compiled ``model`` is passed and the strategy is not supported.
"""
if model is None:
# do we still have a reference from a previous call?
if self.lightning_module is None:
raise TypeError(
"`Trainer.test()` requires a `LightningModule` when it hasn't been passed in a previous run"
)
else:
model = _maybe_unwrap_optimized(model)
self.strategy._lightning_module = model
_verify_strategy_supports_compile(self.lightning_module, self.strategy)
self.state.fn = TrainerFn.TESTING
self.state.status = TrainerStatus.RUNNING
self.testing = True
return call._call_and_handle_interrupt(
self, self._test_impl, model, dataloaders, ckpt_path, verbose, datamodule, weights_only
)
def _test_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[_PATH] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
weights_only: Optional[bool] = None,
) -> Optional[Union[_PREDICT_OUTPUT, _EVALUATE_OUTPUT]]:
# --------------------
# SETUP HOOK
# --------------------
log.debug(f"{self.__class__.__name__}: trainer test stage")
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
# If you supply a datamodule you can't supply test_dataloaders
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.test(dataloaders=..., datamodule=...)`")
if model is None:
model = self.lightning_module
model_provided = False
else:
model_provided = True
self.test_loop.verbose = verbose
# links data to the trainer
self._data_connector.attach_data(model, test_dataloaders=dataloaders, datamodule=datamodule)
assert self.state.fn is not None
if _is_registry(ckpt_path) and module_available("litmodels"):
download_model_from_registry(ckpt_path, self)
ckpt_path = self._checkpoint_connector._select_ckpt_path(
self.state.fn, ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
results = self._run(model, ckpt_path=ckpt_path, weights_only=weights_only)
# remove the tensors from the test results
results = convert_tensors_to_scalars(results)
assert self.state.stopped
self.testing = False
return results
def predict(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
datamodule: Optional[LightningDataModule] = None,
return_predictions: Optional[bool] = None,
ckpt_path: Optional[_PATH] = None,
weights_only: Optional[bool] = None,
) -> Optional[_PREDICT_OUTPUT]:
r"""Run inference on your data. This will call the model forward function to compute predictions. Useful to
perform distributed and batched predictions. Logging is disabled in the predict hooks.
Args:
model: The model to predict with.
dataloaders: An iterable or collection of iterables specifying predict samples.
Alternatively, a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
the :class:`~lightning.pytorch.core.hooks.DataHooks.predict_dataloader` hook.
datamodule: A :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
the :class:`~lightning.pytorch.core.hooks.DataHooks.predict_dataloader` hook.
return_predictions: Whether to return predictions.
``True`` by default except when an accelerator that spawns processes is used (not supported).
ckpt_path: Either ``"best"``, ``"last"``, ``"hpc"``, ``"registry"`` or path to the checkpoint you wish
to predict. If ``None`` and the model instance was passed, use the current weights.
Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
if a checkpoint callback is configured.
weights_only: Defaults to ``None``. If ``True``, restricts loading to ``state_dicts`` of plain
``torch.Tensor`` and other primitive types. If loading a checkpoint from a trusted source that contains
an ``nn.Module``, use ``weights_only=False``. If loading checkpoint from an untrusted source, we
recommend using ``weights_only=True``. For more information, please refer to the
`PyTorch Developer Notes on Serialization Semantics <https://docs.pytorch.org/docs/main/notes/serialization.html#id3>`_.
For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.
Returns:
Returns a list of dictionaries, one for each provided dataloader containing their respective predictions.
Raises:
TypeError:
If no ``model`` is passed and there was no ``LightningModule`` passed in the previous run.
If ``model`` passed is not `LightningModule` or `torch._dynamo.OptimizedModule`.
MisconfigurationException:
If both ``dataloaders`` and ``datamodule`` are passed. Pass only one of these.
RuntimeError:
If a compiled ``model`` is passed and the strategy is not supported.
See :ref:`Lightning inference section<deploy/production_basic:Predict step with your LightningModule>` for more.
"""
if model is None:
# do we still have a reference from a previous call?
if self.lightning_module is None:
raise TypeError(
"`Trainer.predict()` requires a `LightningModule` when it hasn't been passed in a previous run"
)
else:
model = _maybe_unwrap_optimized(model)
self.strategy._lightning_module = model
_verify_strategy_supports_compile(self.lightning_module, self.strategy)
self.state.fn = TrainerFn.PREDICTING
self.state.status = TrainerStatus.RUNNING
self.predicting = True
return call._call_and_handle_interrupt(
self,
self._predict_impl,
model,
dataloaders,
datamodule,
return_predictions,
ckpt_path,
weights_only,
)
def _predict_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
datamodule: Optional[LightningDataModule] = None,
return_predictions: Optional[bool] = None,
ckpt_path: Optional[_PATH] = None,
weights_only: Optional[bool] = None,
) -> Optional[_PREDICT_OUTPUT]:
# --------------------
# SETUP HOOK
# --------------------
log.debug(f"{self.__class__.__name__}: trainer predict stage")
self.predict_loop.return_predictions = return_predictions
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.predict(dataloaders=..., datamodule=...)`")
if model is None:
model = self.lightning_module
model_provided = False
else:
model_provided = True
# links data to the trainer
self._data_connector.attach_data(model, predict_dataloaders=dataloaders, datamodule=datamodule)
assert self.state.fn is not None
if _is_registry(ckpt_path) and module_available("litmodels"):
download_model_from_registry(ckpt_path, self)
ckpt_path = self._checkpoint_connector._select_ckpt_path(
self.state.fn, ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
results = self._run(model, ckpt_path=ckpt_path, weights_only=weights_only)
assert self.state.stopped
self.predicting = False
return results
def _run(
self,
model: "pl.LightningModule",
ckpt_path: Optional[_PATH] = None,
weights_only: Optional[bool] = None,
) -> Optional[Union[_EVALUATE_OUTPUT, _PREDICT_OUTPUT]]:
if self.state.fn == TrainerFn.FITTING:
min_epochs, max_epochs = _parse_loop_limits(
self.min_steps, self.max_steps, self.min_epochs, self.max_epochs, self
)
self.fit_loop.min_epochs = min_epochs
self.fit_loop.max_epochs = max_epochs
if self.barebones:
# no progress bar in barebones can make it look like the Trainer hung
rank_zero_info(
"`Trainer(barebones=True)` started running. The progress bar is disabled so you might want to"
" manually print the progress in your model."
)
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# attach model to the strategy
self.strategy.connect(model)
self._callback_connector._attach_model_callbacks()
self._callback_connector._attach_model_logging_functions()
_verify_loop_configurations(self)
# ----------------------------
# SET UP THE TRAINER
# ----------------------------
log.debug(f"{self.__class__.__name__}: setting up strategy environment")
self.strategy.setup_environment()
self.__setup_profiler()
log.debug(f"{self.__class__.__name__}: preparing data")
self._data_connector.prepare_data()
call._call_setup_hook(self) # allow user to set up LightningModule in accelerator environment
log.debug(f"{self.__class__.__name__}: configuring model")
call._call_configure_model(self)
# check if we should delay restoring checkpoint till later
if not self.strategy.restore_checkpoint_after_setup:
log.debug(f"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}")
self._checkpoint_connector._restore_modules_and_callbacks(ckpt_path, weights_only)
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
# strategy will configure model and move it to the device
self.strategy.setup(self)
# hook
if self.state.fn == TrainerFn.FITTING:
call._call_callback_hooks(self, "on_fit_start")
call._call_lightning_module_hook(self, "on_fit_start")
# only log hparams if enabled
if self.enable_autolog_hparams:
_log_hyperparams(self)
if self.strategy.restore_checkpoint_after_setup:
log.debug(f"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}")
self._checkpoint_connector._restore_modules_and_callbacks(ckpt_path)
# restore optimizers, etc.
log.debug(f"{self.__class__.__name__}: restoring training state")
self._checkpoint_connector.restore_training_state()
self._checkpoint_connector.resume_end()
self._signal_connector.register_signal_handlers()
# ----------------------------
# RUN THE TRAINER
# ----------------------------
results = self._run_stage()
# ----------------------------
# POST-Training CLEAN UP
# ----------------------------
log.debug(f"{self.__class__.__name__}: trainer tearing down")
self._teardown()
if self.state.fn == TrainerFn.FITTING:
call._call_callback_hooks(self, "on_fit_end")
call._call_lightning_module_hook(self, "on_fit_end")
log.debug(f"{self.__class__.__name__}: calling teardown hooks")
call._call_teardown_hook(self)
self.state.status = TrainerStatus.FINISHED
self.state.stage = None
return results
def _teardown(self) -> None:
"""This is the Trainer's internal teardown, unrelated to the `teardown` hooks in LightningModule and Callback;
those are handled by :meth:`_call_teardown_hook`."""
self.strategy.teardown()
loop = self._active_loop
# loop should never be `None` here but it can because we don't know the trainer stage with `ddp_spawn`
if loop is not None:
loop.teardown()
self._logger_connector.teardown()
self._signal_connector.teardown()
def _run_stage(self) -> Optional[Union[_PREDICT_OUTPUT, _EVALUATE_OUTPUT]]:
# wait for all to join if on distributed
self.strategy.barrier("run-stage")
self.lightning_module.zero_grad()
if self.evaluating:
return self._evaluation_loop.run()
if self.predicting:
return self.predict_loop.run()
if self.training:
with isolate_rng():
self._run_sanity_check()
with torch.autograd.set_detect_anomaly(self._detect_anomaly):
self.fit_loop.run()
return None
raise RuntimeError(f"Unexpected state {self.state}")
def _run_sanity_check(self) -> None:
val_loop = self.fit_loop.epoch_loop.val_loop
should_sanity_check = (
self.enable_validation
and self.num_sanity_val_steps > 0
# do not sanity check if restarting because it would mess up the loaded state
and not val_loop.restarting
)
# run tiny validation (if validation defined)
# to make sure program won't crash during val
if should_sanity_check:
stage = self.state.stage
self.sanity_checking = True
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
call._call_callback_hooks(self, "on_sanity_check_start")
# run eval step
val_loop.run()
call._call_callback_hooks(self, "on_sanity_check_end")
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
# reset the progress tracking state after sanity checking. we don't need to set the state before
# because sanity check only runs when we are not restarting
_reset_progress(val_loop)
# restore the previous stage when the sanity check if finished
self.state.stage = stage
def __setup_profiler(self) -> None:
assert self.state.fn is not None
local_rank = self.local_rank if self.world_size > 1 else None
self.profiler._lightning_module = proxy(self.lightning_module)
self.profiler.setup(stage=self.state.fn, local_rank=local_rank, log_dir=self.log_dir)
@contextmanager
def init_module(self, empty_init: Optional[bool] = None) -> Generator:
"""Tensors that you instantiate under this context manager will be created on the device right away and have
the right data type depending on the precision setting in the Trainer.
The parameters and tensors get created on the device and with the right data type right away without wasting
memory being allocated unnecessarily.
Args:
empty_init: Whether to initialize the model with empty weights (uninitialized memory).
If ``None``, the strategy will decide. Some strategies may not support all options.
Set this to ``True`` if you are loading a checkpoint into a large model.
"""
if is_overridden("model_sharded_context", self.strategy, parent=Strategy):
# warning instead of error so that code changes are not required when changing strategies
# this is a limitation because processes are not expected to have been launched when this is called
rank_zero_warn(
f"`trainer.init_module` cannot fully support proper instantiation of your model with the"
f" `{type(self.strategy).__name__}` strategy. Please instantiate your model inside the"
f"`LightningModule.configure_model` hook instead",
# ideally we would check if `configure_model` is already overridden, but we don't have a reliable
# reference to the model yet
category=PossibleUserWarning,
)
with self.strategy.tensor_init_context(empty_init=empty_init):
yield
def print(self, *args: Any, **kwargs: Any) -> None:
"""Print something only on the first process. If running on multiple machines, it will print from the first
process in each machine.
Arguments passed to this method are forwarded to the Python built-in :func:`print` function.
"""
if self.local_rank == 0:
print(*args, **kwargs)
"""
Accelerator properties
"""
@property
def accelerator(self) -> Accelerator:
assert self.strategy.accelerator
return self.strategy.accelerator
@property
def strategy(self) -> Strategy:
return self._accelerator_connector.strategy
@property
def precision_plugin(self) -> Precision:
return self.strategy.precision_plugin
@property
def global_rank(self) -> int:
return self.strategy.global_rank
@property
def local_rank(self) -> int:
# some strategies define a local rank
return getattr(self.strategy, "local_rank", 0)
@property
def node_rank(self) -> int:
# some strategies define a node rank
return getattr(self.strategy, "node_rank", 0)
@property
def world_size(self) -> int:
# some strategies define a world size
return getattr(self.strategy, "world_size", 1)
@property
def num_nodes(self) -> int:
return getattr(self.strategy, "num_nodes", 1)
@property
def device_ids(self) -> list[int]:
"""List of device indexes per node."""
devices = (
self.strategy.parallel_devices
if isinstance(self.strategy, ParallelStrategy)
else [self.strategy.root_device]
)
assert devices is not None
device_ids = []
for idx, device in enumerate(devices):
if isinstance(device, torch.device):
device_ids.append(device.index or idx)
elif isinstance(device, int):
device_ids.append(device)
return device_ids
@property
def num_devices(self) -> int:
"""Number of devices the trainer uses per node."""
return len(self.device_ids)
@property
def lightning_module(self) -> "pl.LightningModule":
# TODO: this is actually an optional return
return self.strategy.lightning_module # type: ignore[return-value]
@property
def optimizers(self) -> list[Optimizer]:
return self.strategy.optimizers
@optimizers.setter
def optimizers(self, new_optims: list[Optimizer]) -> None:
self.strategy.optimizers = new_optims
@property
def lr_scheduler_configs(self) -> list[LRSchedulerConfig]:
return self.strategy.lr_scheduler_configs
@property
def precision(self) -> _PRECISION_INPUT_STR:
return self.strategy.precision_plugin.precision
@property
def scaler(self) -> Optional[Any]:
return getattr(self.precision_plugin, "scaler", None)
@property
def model(self) -> Optional[torch.nn.Module]:
"""The LightningModule, but possibly wrapped into DataParallel or DistributedDataParallel.
To access the pure LightningModule, use
:meth:`~lightning.pytorch.trainer.trainer.Trainer.lightning_module` instead.
"""
return self.strategy.model
"""
General properties
"""
@property
def log_dir(self) -> Optional[str]:
"""The directory for the current experiment. Use this to save images to, etc...
.. note:: You must call this on all processes. Failing to do so will cause your program to stall forever.
.. code-block:: python
def training_step(self, batch, batch_idx):
img = ...
save_img(img, self.trainer.log_dir)
"""
if len(self.loggers) > 0:
if not isinstance(self.loggers[0], (TensorBoardLogger, CSVLogger)):
dirpath = self.loggers[0].save_dir
else:
dirpath = self.loggers[0].log_dir
else:
dirpath = self.default_root_dir
dirpath = self.strategy.broadcast(dirpath)
return dirpath
@property
def is_global_zero(self) -> bool:
"""Whether this process is the global zero in multi-node training.
.. code-block:: python
def training_step(self, batch, batch_idx):
if self.trainer.is_global_zero:
print("in node 0, accelerator 0")
"""
return self.strategy.is_global_zero
@property
def distributed_sampler_kwargs(self) -> Optional[dict[str, Any]]:
if isinstance(self.strategy, ParallelStrategy):
return self.strategy.distributed_sampler_kwargs
return None
@property
def enable_validation(self) -> bool:
"""Check if we should run validation during training."""
return (
self.fit_loop.epoch_loop.val_loop._data_source.is_defined()
and is_overridden("validation_step", self.lightning_module)
and self.limit_val_batches > 0
)
@property
def default_root_dir(self) -> str:
"""The default location to save artifacts of loggers, checkpoints etc.
It is used as a fallback if logger or checkpoint callback do not define specific save paths.
"""
if _is_local_file_protocol(self._default_root_dir):
return os.path.normpath(os.path.expanduser(self._default_root_dir))
return self._default_root_dir
@property
def early_stopping_callback(self) -> Optional[EarlyStopping]:
"""The first :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` callback in the
Trainer.callbacks list, or ``None`` if it doesn't exist."""
callbacks = self.early_stopping_callbacks
return callbacks[0] if len(callbacks) > 0 else None
@property
def early_stopping_callbacks(self) -> list[EarlyStopping]:
"""A list of all instances of :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` found in the
Trainer.callbacks list."""
return [c for c in self.callbacks if isinstance(c, EarlyStopping)]
@property
def checkpoint_callback(self) -> Optional[Checkpoint]:
"""The first :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` callback in the
Trainer.callbacks list, or ``None`` if it doesn't exist."""
callbacks = self.checkpoint_callbacks
return callbacks[0] if len(callbacks) > 0 else None
@property
def checkpoint_callbacks(self) -> list[Checkpoint]:
"""A list of all instances of :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` found in
the Trainer.callbacks list."""
return [c for c in self.callbacks if isinstance(c, Checkpoint)]
@property
def progress_bar_callback(self) -> Optional[ProgressBar]:
"""An instance of :class:`~lightning.pytorch.callbacks.progress.progress_bar.ProgressBar` found in the
Trainer.callbacks list, or ``None`` if one doesn't exist."""
for c in self.callbacks:
if isinstance(c, ProgressBar):
return c
return None
@property
def ckpt_path(self) -> Optional[_PATH]:
"""Set to the path/URL of a checkpoint loaded via :meth:`~lightning.pytorch.trainer.trainer.Trainer.fit`,
:meth:`~lightning.pytorch.trainer.trainer.Trainer.validate`,
:meth:`~lightning.pytorch.trainer.trainer.Trainer.test`, or
:meth:`~lightning.pytorch.trainer.trainer.Trainer.predict`.
``None`` otherwise.
"""
return self._checkpoint_connector._ckpt_path
@ckpt_path.setter
def ckpt_path(self, ckpt_path: Optional[_PATH]) -> None:
"""Allows you to manage which checkpoint is loaded statefully.
.. code-block:: python
trainer = Trainer()
trainer.ckpt_path = "my/checkpoint/file.ckpt"
trainer.fit(model)
...
# you will be in charge of resetting this
trainer.ckpt_path = None
trainer.test(model)
"""
self._checkpoint_connector._ckpt_path = ckpt_path
self._checkpoint_connector._user_managed = bool(ckpt_path)
def save_checkpoint(
self, filepath: _PATH, weights_only: Optional[bool] = None, storage_options: Optional[Any] = None
) -> None:
r"""Runs routine to create a checkpoint.
This method needs to be called on all processes in case the selected strategy is handling distributed
checkpointing.
Args:
filepath: Path where checkpoint is saved.
weights_only: If ``True``, will only save the model weights.
storage_options: parameter for how to save to storage, passed to ``CheckpointIO`` plugin
Raises:
AttributeError:
If the model is not attached to the Trainer before calling this method.
"""
if self.model is None:
raise AttributeError(
"Saving a checkpoint is only possible if a model is attached to the Trainer. Did you call"
" `Trainer.save_checkpoint()` before calling `Trainer.{fit,validate,test,predict}`?"
)
with self.profiler.profile("save_checkpoint"):
checkpoint = self._checkpoint_connector.dump_checkpoint(weights_only)
self.strategy.save_checkpoint(checkpoint, filepath, storage_options=storage_options)
self.strategy.barrier("Trainer.save_checkpoint")
"""
State properties
"""
@property
def interrupted(self) -> bool:
return self.state.status == TrainerStatus.INTERRUPTED
@property
def training(self) -> bool:
return self.state.stage == RunningStage.TRAINING
@training.setter
def training(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.TRAINING
elif self.training:
self.state.stage = None
@property
def testing(self) -> bool:
return self.state.stage == RunningStage.TESTING
@testing.setter
def testing(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.TESTING
elif self.testing:
self.state.stage = None
@property
def predicting(self) -> bool:
return self.state.stage == RunningStage.PREDICTING
@predicting.setter
def predicting(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.PREDICTING
elif self.predicting:
self.state.stage = None
@property
def validating(self) -> bool:
return self.state.stage == RunningStage.VALIDATING
@validating.setter
def validating(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.VALIDATING
elif self.validating:
self.state.stage = None
@property
def evaluating(self) -> bool:
return self.state.stage is not None and self.state.stage.evaluating
@property
def sanity_checking(self) -> bool:
"""Whether sanity checking is running.
Useful to disable some hooks, logging or callbacks during the sanity checking.
"""
return self.state.stage == RunningStage.SANITY_CHECKING
@sanity_checking.setter
def sanity_checking(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.SANITY_CHECKING
elif self.sanity_checking:
self.state.stage = None
@property
def received_sigterm(self) -> bool:
"""Whether a ``signal.SIGTERM`` signal was received.
For example, this can be checked to exit gracefully.
"""
return self._signal_connector.received_sigterm
"""
Loop properties
"""
@property
def global_step(self) -> int:
"""The number of optimizer steps taken (does not reset each epoch).
This includes multiple optimizers (if enabled).
"""
return self.fit_loop.epoch_loop.global_step
@property
def current_epoch(self) -> int:
"""The current epoch, updated after the epoch end hooks are run."""
return self.fit_loop.epoch_progress.current.completed
@property
def max_epochs(self) -> Optional[int]:
return self.fit_loop.max_epochs
@property
def min_epochs(self) -> Optional[int]:
return self.fit_loop.min_epochs
@property
def max_steps(self) -> int:
return self.fit_loop.max_steps
@property
def min_steps(self) -> Optional[int]:
return self.fit_loop.min_steps
@property
def is_last_batch(self) -> bool:
"""Whether trainer is executing the last batch."""
return self.fit_loop.epoch_loop.batch_progress.is_last_batch
@property
def train_dataloader(self) -> Optional[TRAIN_DATALOADERS]:
"""The training dataloader(s) used during ``trainer.fit()``."""
if (combined_loader := self.fit_loop._combined_loader) is not None:
return combined_loader.iterables
return None
@property
def val_dataloaders(self) -> Optional[EVAL_DATALOADERS]:
"""The validation dataloader(s) used during ``trainer.fit()`` or ``trainer.validate()``."""
if (combined_loader := self.fit_loop.epoch_loop.val_loop._combined_loader) is not None or (
combined_loader := self.validate_loop._combined_loader
) is not None:
return combined_loader.iterables
return None
@property
def test_dataloaders(self) -> Optional[EVAL_DATALOADERS]:
"""The test dataloader(s) used during ``trainer.test()``."""
if (combined_loader := self.test_loop._combined_loader) is not None:
return combined_loader.iterables
return None
@property
def predict_dataloaders(self) -> Optional[EVAL_DATALOADERS]:
"""The prediction dataloader(s) used during ``trainer.predict()``."""
if (combined_loader := self.predict_loop._combined_loader) is not None:
return combined_loader.iterables
return None
@property
def num_training_batches(self) -> Union[int, float]:
"""The number of training batches that will be used during ``trainer.fit()``."""
return self.fit_loop.max_batches
@property
def num_sanity_val_batches(self) -> list[Union[int, float]]:
"""The number of validation batches that will be used during the sanity-checking part of ``trainer.fit()``."""
max_batches = self.fit_loop.epoch_loop.val_loop.max_batches
# re-compute the `min` in case this is called outside the sanity-checking stage
return [min(self.num_sanity_val_steps, batches) for batches in max_batches]
@property
def num_val_batches(self) -> list[Union[int, float]]:
"""The number of validation batches that will be used during ``trainer.fit()`` or ``trainer.validate()``."""
if self.state.fn == TrainerFn.VALIDATING:
return self.validate_loop.max_batches
# if no trainer.fn is set, assume fit's validation
# use the protected access, because it shouldn't return the sanity_val batches
return self.fit_loop.epoch_loop.val_loop._max_batches
@property
def num_test_batches(self) -> list[Union[int, float]]:
"""The number of test batches that will be used during ``trainer.test()``."""
return self.test_loop.max_batches
@property
def num_predict_batches(self) -> list[Union[int, float]]:
"""The number of prediction batches that will be used during ``trainer.predict()``."""
return self.predict_loop.max_batches
@property
def _evaluation_loop(self) -> _EvaluationLoop:
if self.state.fn == TrainerFn.FITTING:
return self.fit_loop.epoch_loop.val_loop
if self.state.fn == TrainerFn.VALIDATING:
return self.validate_loop
if self.state.fn == TrainerFn.TESTING:
return self.test_loop
raise RuntimeError("The `Trainer._evaluation_loop` property isn't defined. Accessed outside of scope")
@property
def _active_loop(self) -> Optional[Union[_FitLoop, _EvaluationLoop, _PredictionLoop]]:
if self.training:
return self.fit_loop
if self.sanity_checking or self.evaluating:
return self._evaluation_loop
if self.predicting:
return self.predict_loop
return None
"""
Logging properties
"""
@property
def logger(self) -> Optional[Logger]:
"""The first :class:`~lightning.pytorch.loggers.logger.Logger` being used."""
return self.loggers[0] if len(self.loggers) > 0 else None
@logger.setter
def logger(self, logger: Optional[Logger]) -> None:
if not logger:
self.loggers = []
else:
self.loggers = [logger]
@property
def loggers(self) -> list[Logger]:
"""The list of :class:`~lightning.pytorch.loggers.logger.Logger` used.
.. code-block:: python
for logger in trainer.loggers:
logger.log_metrics({"foo": 1.0})
"""
return self._loggers
@loggers.setter
def loggers(self, loggers: Optional[list[Logger]]) -> None:
self._loggers = loggers if loggers else []
@property
def callback_metrics(self) -> _OUT_DICT:
"""The metrics available to callbacks.
.. code-block:: python
def training_step(self, batch, batch_idx):
self.log("a_val", 2.0)
callback_metrics = trainer.callback_metrics
assert callback_metrics["a_val"] == 2.0
"""
return self._logger_connector.callback_metrics
@property
def logged_metrics(self) -> _OUT_DICT:
"""The metrics sent to the loggers.
This includes metrics logged via :meth:`~lightning.pytorch.core.LightningModule.log` with the
:paramref:`~lightning.pytorch.core.LightningModule.log.logger` argument set.
"""
return self._logger_connector.logged_metrics
@property
def progress_bar_metrics(self) -> _PBAR_DICT:
"""The metrics sent to the progress bar.
This includes metrics logged via :meth:`~lightning.pytorch.core.LightningModule.log` with the
:paramref:`~lightning.pytorch.core.LightningModule.log.prog_bar` argument set.
"""
return self._logger_connector.progress_bar_metrics
@property
def _results(self) -> Optional[_ResultCollection]:
active_loop = self._active_loop
if active_loop is not None:
return active_loop._results
return None
"""
Other
"""
@property
def estimated_stepping_batches(self) -> Union[int, float]:
r"""The estimated number of batches that will ``optimizer.step()`` during training.
This accounts for gradient accumulation and the current trainer configuration. This might be used when setting
up your training dataloader, if it hasn't been set up already.
.. code-block:: python
def configure_optimizers(self):
optimizer = ...
stepping_batches = self.trainer.estimated_stepping_batches
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-3, total_steps=stepping_batches)
return [optimizer], [scheduler]
Raises:
MisconfigurationException:
If estimated stepping batches cannot be computed due to different `accumulate_grad_batches`
at different epochs.
"""
# infinite training
if self.max_epochs == -1:
return float("inf") if self.max_steps == -1 else self.max_steps
if self.train_dataloader is None:
rank_zero_info("Loading `train_dataloader` to estimate number of stepping batches.")
self.fit_loop.setup_data()
total_batches = self.num_training_batches
# iterable dataset
if total_batches == float("inf"):
return self.max_steps
assert self.max_epochs is not None
max_estimated_steps = math.ceil(total_batches / self.accumulate_grad_batches) * max(self.max_epochs, 1)
max_estimated_steps = min(max_estimated_steps, self.max_steps) if self.max_steps != -1 else max_estimated_steps
return max_estimated_steps
|
Trainer
|
python
|
openai__openai-python
|
src/openai/resources/beta/realtime/realtime.py
|
{
"start": 7156,
"end": 7633
}
|
class ____:
def __init__(self, realtime: AsyncRealtime) -> None:
self._realtime = realtime
@cached_property
def sessions(self) -> AsyncSessionsWithRawResponse:
return AsyncSessionsWithRawResponse(self._realtime.sessions)
@cached_property
def transcription_sessions(self) -> AsyncTranscriptionSessionsWithRawResponse:
return AsyncTranscriptionSessionsWithRawResponse(self._realtime.transcription_sessions)
|
AsyncRealtimeWithRawResponse
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/angellist/provider.py
|
{
"start": 442,
"end": 930
}
|
class ____(OAuth2Provider):
id = "angellist"
name = "AngelList"
account_class = AngelListAccount
oauth2_adapter_class = AngelListOAuth2Adapter
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
return dict(
email=data.get("email"),
username=data.get("angellist_url").split("/")[-1],
name=data.get("name"),
)
provider_classes = [AngelListProvider]
|
AngelListProvider
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
|
{
"start": 18314,
"end": 18610
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (
GrapheneMessageEvent,
GrapheneDisplayableEvent,
GrapheneStepEvent,
GrapheneMarkerEvent,
GrapheneErrorEvent,
)
name = "EngineEvent"
|
GrapheneEngineEvent
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-twilio/dagster_twilio/resources.py
|
{
"start": 266,
"end": 1371
}
|
class ____(ConfigurableResource):
"""This resource is for connecting to Twilio."""
account_sid: str = Field(
description=(
"Twilio Account SID, created with yout Twilio account. This can be found on your Twilio"
" dashboard, see"
" https://www.twilio.com/blog/twilio-access-tokens-python"
),
)
auth_token: str = Field(
description=(
"Twilio Authentication Token, created with yout Twilio account. This can be found on"
" your Twilio dashboard, see https://www.twilio.com/blog/twilio-access-tokens-python"
),
)
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
def create_client(self) -> Client:
return Client(self.account_sid, self.auth_token)
@dagster_maintained_resource
@resource(
config_schema=TwilioResource.to_config_schema(),
description="This resource is for connecting to Twilio",
)
def twilio_resource(context: InitResourceContext) -> Client:
return TwilioResource.from_resource_context(context).create_client()
|
TwilioResource
|
python
|
kamyu104__LeetCode-Solutions
|
Python/largest-perimeter-triangle.py
|
{
"start": 33,
"end": 329
}
|
class ____(object):
def largestPerimeter(self, A):
"""
:type A: List[int]
:rtype: int
"""
A.sort()
for i in reversed(xrange(len(A) - 2)):
if A[i] + A[i+1] > A[i+2]:
return A[i] + A[i+1] + A[i+2]
return 0
|
Solution
|
python
|
donnemartin__system-design-primer
|
solutions/object_oriented_design/online_chat/online_chat.py
|
{
"start": 2353,
"end": 2443
}
|
class ____(Enum):
UNREAD = 0
READ = 1
ACCEPTED = 2
REJECTED = 3
|
RequestStatus
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/cpp_utils.py
|
{
"start": 6222,
"end": 9314
}
|
class ____(_CppPrinter):
def doprint(self, expr, *, simplify: bool = True, p=True):
# TODO: why are people passing strings to the printer here :think:
if simplify and isinstance(expr, sympy.Expr) and hasattr(V.graph, "sizevars"):
expr = V.graph.sizevars.simplify(expr)
return super().doprint(expr)
def parenthesize(self, item: sympy.Expr, level: int, strict: bool = False) -> str:
if isinstance(item, sympy.Mod):
# use parenthesis to enforce precedence.
# in sympy 1.13.3, -2*Mod(x,y) becomes -2*x%y, which is wrong.
return f"({self._print(item)})"
else:
return super().parenthesize(item, level, strict)
# A function to print, useful for printing sympy symbols.
cexpr = CppPrinter().doprint
def cexpr_index(index):
return f"static_cast<{INDEX_TYPE}>({cexpr(index)})"
def value_to_cpp(value, cpp_type):
if value == float("-inf"):
return f"-std::numeric_limits<{cpp_type}>::infinity()"
elif value == float("inf"):
return f"std::numeric_limits<{cpp_type}>::infinity()"
elif isinstance(value, bool):
return f"static_cast<{cpp_type}>({str(value).lower()})"
elif math.isnan(value):
return f"std::numeric_limits<{cpp_type}>::quiet_NaN()"
else:
return f"static_cast<{cpp_type}>({repr(value)})"
def rewrite_index_for_function(
localize_buffer_handler: "LocalizeBufferHandler",
index: sympy.Expr,
global_buf_name: str,
):
# Local buffer at the inner dimensions
snode = V.graph.scheduler.name_to_buf[global_buf_name].defining_op
assert snode is not None
local_buf = localize_buffer_handler.global_to_local[global_buf_name]
scheduler_nodes = snode.get_nodes()
_, (group, reduction_group) = max(
scheduler_nodes, key=lambda x: int(x.is_reduction())
).group
call_ranges = tuple(group) + tuple(reduction_group)
indices_to_keep = [
f"x{len(call_ranges) - (idx + 1)}"
for idx in range(len(local_buf.get_layout().size))
]
sorted_symbols = sorted(index.free_symbols, key=lambda s: s.name) # type: ignore[attr-defined]
replacements = {}
for x in sorted_symbols:
if x.name.startswith("x") and x.name not in indices_to_keep: # type: ignore[attr-defined]
# Only keep index used by local buffer
replacements[x] = sympy.core.numbers.Zero()
index = sympy_subs(index, replacements) # type: ignore[arg-type]
return index
def rewrite_index_for_nodes(
localize_buffer_handler: "LocalizeBufferHandler",
index: sympy.Expr,
global_buf_name: str,
):
used_vars = OrderedSet(
s for s in index.free_symbols if symbol_is_type(s, SymT.INDEX)
)
index_vars = []
local_buf = localize_buffer_handler.global_to_local[global_buf_name]
for i in range(len(local_buf.get_size())):
var = sympy_index_symbol_with_prefix(SymT.INDEX, i)
index_vars.append(var if var in used_vars else 0)
index = local_buf.get_layout().make_indexer()(index_vars)
return index
|
CppPrinter
|
python
|
astropy__astropy
|
astropy/units/tests/test_quantity_array_methods.py
|
{
"start": 2762,
"end": 5545
}
|
class ____:
"""Test different ndarray methods that alter the array shape
tests: reshape, squeeze, ravel, flatten, transpose, swapaxes
"""
def test_reshape(self):
q = np.arange(6.0) * u.m
q_reshape = q.reshape(3, 2)
assert isinstance(q_reshape, u.Quantity)
assert q_reshape.unit == q.unit
assert np.all(q_reshape.value == q.value.reshape(3, 2))
def test_squeeze(self):
q = np.arange(6.0).reshape(6, 1) * u.m
q_squeeze = q.squeeze()
assert isinstance(q_squeeze, u.Quantity)
assert q_squeeze.unit == q.unit
assert np.all(q_squeeze.value == q.value.squeeze())
def test_ravel(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_ravel = q.ravel()
assert isinstance(q_ravel, u.Quantity)
assert q_ravel.unit == q.unit
assert np.all(q_ravel.value == q.value.ravel())
def test_flatten(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_flatten = q.flatten()
assert isinstance(q_flatten, u.Quantity)
assert q_flatten.unit == q.unit
assert np.all(q_flatten.value == q.value.flatten())
def test_transpose(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_transpose = q.transpose()
assert isinstance(q_transpose, u.Quantity)
assert q_transpose.unit == q.unit
assert np.all(q_transpose.value == q.value.transpose())
def test_swapaxes(self):
q = np.arange(6.0).reshape(3, 1, 2) * u.m
q_swapaxes = q.swapaxes(0, 2)
assert isinstance(q_swapaxes, u.Quantity)
assert q_swapaxes.unit == q.unit
assert np.all(q_swapaxes.value == q.value.swapaxes(0, 2))
def test_flat_attributes(self):
"""While ``flat`` doesn't make a copy, it changes the shape."""
q = np.arange(6.0).reshape(3, 1, 2) * u.m
qf = q.flat
# flat shape is same as before reshaping
assert len(qf) == 6
# see TestQuantityArrayCopy.test_flat for tests of iteration
# and slicing and setting. Here we test the properties and methods to
# match `numpy.ndarray.flatiter`
assert qf.base is q
# testing the indices -- flat and full -- into the array
assert qf.coords == (0, 0, 0) # to start
assert qf.index == 0
# now consume the iterator
endindices = [(qf.index, qf.coords) for x in qf][-2] # next() oversteps
assert endindices[0] == 5
assert endindices[1] == (2, 0, 1) # shape of q - 1
# also check q_flat copies properly
q_flat_copy = qf.copy()
assert all(q_flat_copy == q.flatten())
assert isinstance(q_flat_copy, u.Quantity)
assert not np.may_share_memory(q_flat_copy, q)
|
TestQuantityReshapeFuncs
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/streamplot.py
|
{
"start": 15947,
"end": 17794
}
|
class ____:
"""
Mask to keep track of discrete regions crossed by streamlines.
The resolution of this grid determines the approximate spacing between
trajectories. Streamlines are only allowed to pass through zeroed cells:
When a streamline enters a cell, that cell is set to 1, and no new
streamlines are allowed to enter.
"""
def __init__(self, density):
try:
self.nx, self.ny = (30 * np.broadcast_to(density, 2)).astype(int)
except ValueError as err:
raise ValueError("'density' must be a scalar or be of length "
"2") from err
if self.nx < 0 or self.ny < 0:
raise ValueError("'density' must be positive")
self._mask = np.zeros((self.ny, self.nx))
self.shape = self._mask.shape
self._current_xy = None
def __getitem__(self, args):
return self._mask[args]
def _start_trajectory(self, xm, ym, broken_streamlines=True):
"""Start recording streamline trajectory"""
self._traj = []
self._update_trajectory(xm, ym, broken_streamlines)
def _undo_trajectory(self):
"""Remove current trajectory from mask"""
for t in self._traj:
self._mask[t] = 0
def _update_trajectory(self, xm, ym, broken_streamlines=True):
"""
Update current trajectory position in mask.
If the new position has already been filled, raise `InvalidIndexError`.
"""
if self._current_xy != (xm, ym):
if self[ym, xm] == 0:
self._traj.append((ym, xm))
self._mask[ym, xm] = 1
self._current_xy = (xm, ym)
else:
if broken_streamlines:
raise InvalidIndexError
else:
pass
|
StreamMask
|
python
|
getsentry__sentry
|
tests/snuba/api/endpoints/test_organization_trace_item_attributes.py
|
{
"start": 701,
"end": 1636
}
|
class ____(APITestCase, SnubaTestCase):
feature_flags: dict[str, bool]
item_type: SupportedTraceItemType
viewname = "sentry-api-0-organization-trace-item-attributes"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
def do_request(self, query=None, features=None, **kwargs):
if query is None:
query = {}
if "itemType" not in query:
query["itemType"] = self.item_type.value
if "attributeType" not in query:
query["attributeType"] = "string"
if features is None:
features = self.feature_flags
with self.feature(features):
url = reverse(
self.viewname,
kwargs={"organization_id_or_slug": self.organization.slug},
)
return self.client.get(url, query, format="json", **kwargs)
|
OrganizationTraceItemAttributesEndpointTestBase
|
python
|
Netflix__metaflow
|
metaflow/plugins/aws/batch/batch_client.py
|
{
"start": 24477,
"end": 25468
}
|
class ____(object):
def __init__(self, delta_in_secs=1, num_tries=20):
self.delta_in_secs = delta_in_secs
self.num_tries = num_tries
self._now = None
self._reset()
def _reset(self):
self._tries_left = self.num_tries
self._wait = self.delta_in_secs
def __call__(self, func):
def wrapped(*args, **kwargs):
now = time.time()
if self._now is None or (now - self._now > self._wait):
self._now = now
try:
func(*args, **kwargs)
self._reset()
except TriableException as ex:
self._tries_left -= 1
if self._tries_left == 0:
raise ex.ex
self._wait = (self.delta_in_secs * 1.2) ** (
self.num_tries - self._tries_left
) + random.randint(0, 3 * self.delta_in_secs)
return wrapped
|
Throttle
|
python
|
keras-team__keras
|
keras/src/optimizers/schedules/learning_rate_schedule_test.py
|
{
"start": 4595,
"end": 6237
}
|
class ____(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.PolynomialDecay(
initial_learning_rate=0.1,
decay_steps=100,
end_learning_rate=0.005,
power=1.0,
cycle=False,
name="my_ld",
)
)
def test_halfway(self):
step = 5
lr = 0.05
end_lr = 0.0
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = lr * 0.5
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_end(self):
step = 10
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_halfway_with_end(self):
step = 5
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = (lr + end_lr) * 0.5
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end_with_cycle(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, cycle=True)
expected = (lr - end_lr) * 0.25 + end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
|
LinearDecayTest
|
python
|
huggingface__transformers
|
src/transformers/models/aria/modeling_aria.py
|
{
"start": 24530,
"end": 25343
}
|
class ____(PreTrainedModel):
config: AriaConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["AriaDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False # MoE models don't work with torch.compile (dynamic slicing)
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": AriaTextDecoderLayer,
"attentions": AriaTextAttention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, AriaProjector):
init.trunc_normal_(module.query, std=self.config.initializer_range)
|
AriaPreTrainedModel
|
python
|
joblib__joblib
|
joblib/parallel.py
|
{
"start": 26711,
"end": 37786
}
|
class ____(object):
"""Callback to keep track of completed results and schedule the next tasks.
This callable is executed by the parent process whenever a worker process
has completed a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
It is assumed that this callback will always be triggered by the backend
right after the end of a task, in case of success as well as in case of
failure.
"""
##########################################################################
# METHODS CALLED BY THE MAIN THREAD #
##########################################################################
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
self.parallel_call_id = parallel._call_id
self._completion_timeout_counter = None
# Internals to keep track of the status and outcome of the task.
# Used to hold a reference to the future-like object returned by the
# backend after launching this task
# This will be set later when calling `register_job`, as it is only
# created once the task has been submitted.
self.job = None
if not parallel._backend.supports_retrieve_callback:
# The status is only used for asynchronous result retrieval in the
# callback.
self.status = None
else:
# The initial status for the job is TASK_PENDING.
# Once it is done, it will be either TASK_DONE, or TASK_ERROR.
self.status = TASK_PENDING
def register_job(self, job):
"""Register the object returned by `submit`."""
self.job = job
def get_result(self, timeout):
"""Returns the raw result of the task that was submitted.
If the task raised an exception rather than returning, this same
exception will be raised instead.
If the backend supports the retrieval callback, it is assumed that this
method is only called after the result has been registered. It is
ensured by checking that `self.status(timeout)` does not return
TASK_PENDING. In this case, `get_result` directly returns the
registered result (or raise the registered exception).
For other backends, there are no such assumptions, but `get_result`
still needs to synchronously retrieve the result before it can
return it or raise. It will block at most `self.timeout` seconds
waiting for retrieval to complete, after that it raises a TimeoutError.
"""
backend = self.parallel._backend
if backend.supports_retrieve_callback:
# We assume that the result has already been retrieved by the
# callback thread, and is stored internally. It's just waiting to
# be returned.
return self._return_or_raise()
# For other backends, the main thread needs to run the retrieval step.
try:
result = backend.retrieve_result(self.job, timeout=timeout)
outcome = dict(result=result, status=TASK_DONE)
except BaseException as e:
outcome = dict(result=e, status=TASK_ERROR)
self._register_outcome(outcome)
return self._return_or_raise()
def _return_or_raise(self):
try:
if self.status == TASK_ERROR:
raise self._result
return self._result
finally:
del self._result
def get_status(self, timeout):
"""Get the status of the task.
This function also checks if the timeout has been reached and register
the TimeoutError outcome when it is the case.
"""
if timeout is None or self.status != TASK_PENDING:
return self.status
# The computation are running and the status is pending.
# Check that we did not wait for this jobs more than `timeout`.
now = time.time()
if self._completion_timeout_counter is None:
self._completion_timeout_counter = now
if (now - self._completion_timeout_counter) > timeout:
outcome = dict(result=TimeoutError(), status=TASK_ERROR)
self._register_outcome(outcome)
return self.status
##########################################################################
# METHODS CALLED BY CALLBACK THREADS #
##########################################################################
def __call__(self, *args, **kwargs):
"""Function called by the callback thread after a job is completed."""
# If the backend doesn't support callback retrievals, the next batch of
# tasks is dispatched regardless. The result will be retrieved by the
# main thread when calling `get_result`.
if not self.parallel._backend.supports_retrieve_callback:
self._dispatch_new()
return
# If the backend supports retrieving the result in the callback, it
# registers the task outcome (TASK_ERROR or TASK_DONE), and schedules
# the next batch if needed.
with self.parallel._lock:
# Edge case where while the task was processing, the `parallel`
# instance has been reset and a new call has been issued, but the
# worker managed to complete the task and trigger this callback
# call just before being aborted by the reset.
if self.parallel._call_id != self.parallel_call_id:
return
# When aborting, stop as fast as possible and do not retrieve the
# result as it won't be returned by the Parallel call.
if self.parallel._aborting:
return
# Retrieves the result of the task in the main process and dispatch
# a new batch if needed.
job_succeeded = self._retrieve_result(*args, **kwargs)
if job_succeeded:
self._dispatch_new()
def _dispatch_new(self):
"""Schedule the next batch of tasks to be processed."""
# This steps ensure that auto-batching works as expected.
this_batch_duration = time.time() - self.dispatch_timestamp
self.parallel._backend.batch_completed(self.batch_size, this_batch_duration)
# Schedule the next batch of tasks.
with self.parallel._lock:
self.parallel.n_completed_tasks += self.batch_size
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
def _retrieve_result(self, out):
"""Fetch and register the outcome of a task.
Return True if the task succeeded, False otherwise.
This function is only called by backends that support retrieving
the task result in the callback thread.
"""
try:
result = self.parallel._backend.retrieve_result_callback(out)
outcome = dict(status=TASK_DONE, result=result)
except BaseException as e:
# Avoid keeping references to parallel in the error.
e.__traceback__ = None
outcome = dict(result=e, status=TASK_ERROR)
self._register_outcome(outcome)
return outcome["status"] != TASK_ERROR
##########################################################################
# This method can be called either in the main thread #
# or in the callback thread. #
##########################################################################
def _register_outcome(self, outcome):
"""Register the outcome of a task.
This method can be called only once, future calls will be ignored.
"""
# Covers the edge case where the main thread tries to register a
# `TimeoutError` while the callback thread tries to register a result
# at the same time.
with self.parallel._lock:
if self.status not in (TASK_PENDING, None):
return
self.status = outcome["status"]
self._result = outcome["result"]
# Once the result and the status are extracted, the last reference to
# the job can be deleted.
self.job = None
# As soon as an error as been spotted, early stopping flags are sent to
# the `parallel` instance.
if self.status == TASK_ERROR:
self.parallel._exception = True
self.parallel._aborting = True
if self.parallel.return_ordered:
return
with self.parallel._lock:
# For `return_as=generator_unordered`, append the job to the queue
# in the order of completion instead of submission.
self.parallel._jobs.append(self)
###############################################################################
def register_parallel_backend(name, factory, make_default=False):
"""Register a new Parallel backend factory.
The new backend can then be selected by passing its name as the backend
argument to the :class:`~Parallel` class. Moreover, the default backend can
be overwritten globally by setting make_default=True.
The factory can be any callable that takes no argument and return an
instance of ``ParallelBackendBase``.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
BACKENDS[name] = factory
if make_default:
global DEFAULT_BACKEND
DEFAULT_BACKEND = name
def effective_n_jobs(n_jobs=-1):
"""Determine the number of jobs that can actually run in parallel
n_jobs is the number of workers requested by the callers. Passing n_jobs=-1
means requesting all available workers for instance matching the number of
CPU cores on the worker host(s).
This method should return a guesstimate of the number of workers that can
actually perform work concurrently with the currently enabled default
backend. The primary use case is to make it possible for the caller to know
in how many chunks to slice the work.
In general working on larger data chunks is more efficient (less scheduling
overhead and better use of CPU cache prefetching heuristics) as long as all
the workers have enough work to do.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
if n_jobs == 1:
return 1
backend, backend_n_jobs = get_active_backend()
if n_jobs is None:
n_jobs = backend_n_jobs
return backend.effective_n_jobs(n_jobs=n_jobs)
###############################################################################
|
BatchCompletionCallBack
|
python
|
Pylons__pyramid
|
tests/test_config/test_assets.py
|
{
"start": 35891,
"end": 36063
}
|
class ____:
def __init__(self):
self.registered = []
def register_loader_type(self, typ, inst):
self.registered.append((typ, inst))
|
DummyPkgResources
|
python
|
django__django
|
tests/model_forms/models.py
|
{
"start": 10543,
"end": 10717
}
|
class ____(models.Model):
name = models.CharField(max_length=50)
def __iter__(self):
yield from range(5)
def __str__(self):
return self.name
|
Color
|
python
|
rq__rq
|
tests/test_callbacks.py
|
{
"start": 400,
"end": 4402
}
|
class ____(RQTestCase):
def test_enqueue_with_success_callback(self):
"""Test enqueue* methods with on_success"""
queue = Queue(connection=self.connection)
# Only functions and builtins are supported as callback
with self.assertRaises(ValueError):
queue.enqueue(say_hello, on_success=Job.fetch)
job = queue.enqueue(say_hello, on_success=print)
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.success_callback, print)
job = queue.enqueue_in(timedelta(seconds=10), say_hello, on_success=print)
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.success_callback, print)
# test string callbacks
job = queue.enqueue(say_hello, on_success=Callback('print'))
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.success_callback, print)
job = queue.enqueue_in(timedelta(seconds=10), say_hello, on_success=Callback('print'))
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.success_callback, print)
def test_enqueue_with_failure_callback(self):
"""queue.enqueue* methods with on_failure is persisted correctly"""
queue = Queue(connection=self.connection)
# Only functions and builtins are supported as callback
with self.assertRaises(ValueError):
queue.enqueue(say_hello, on_failure=Job.fetch)
job = queue.enqueue(say_hello, on_failure=print)
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.failure_callback, print)
job = queue.enqueue_in(timedelta(seconds=10), say_hello, on_failure=print)
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.failure_callback, print)
# test string callbacks
job = queue.enqueue(say_hello, on_failure=Callback('print'))
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.failure_callback, print)
job = queue.enqueue_in(timedelta(seconds=10), say_hello, on_failure=Callback('print'))
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.failure_callback, print)
def test_enqueue_with_stopped_callback(self):
"""queue.enqueue* methods with on_stopped is persisted correctly"""
queue = Queue(connection=self.connection)
# Only functions and builtins are supported as callback
with self.assertRaises(ValueError):
queue.enqueue(say_hello, on_stopped=Job.fetch)
job = queue.enqueue(long_process, on_stopped=print)
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.stopped_callback, print)
job = queue.enqueue_in(timedelta(seconds=10), long_process, on_stopped=print)
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.stopped_callback, print)
# test string callbacks
job = queue.enqueue(long_process, on_stopped=Callback('print'))
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.stopped_callback, print)
job = queue.enqueue_in(timedelta(seconds=10), long_process, on_stopped=Callback('print'))
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.stopped_callback, print)
def test_enqueue_many_callback(self):
queue = Queue('example', connection=self.connection)
job_data = Queue.prepare_data(
func=say_hello, on_success=print, on_failure=save_exception, on_stopped=save_result_if_not_stopped
)
jobs = queue.enqueue_many([job_data])
assert jobs[0].success_callback == job_data.on_success
assert jobs[0].failure_callback == job_data.on_failure
assert jobs[0].stopped_callback == job_data.on_stopped
|
QueueCallbackTestCase
|
python
|
astropy__astropy
|
astropy/io/ascii/fastbasic.py
|
{
"start": 8443,
"end": 9243
}
|
class ____(FastBasic):
"""
A faster version of the ordinary :class:`Csv` writer that uses the
optimized C parsing engine. Note that this reader will append empty
field values to the end of any row with not enough columns, while
:class:`FastBasic` simply raises an error.
"""
_format_name = "fast_csv"
_description = "Comma-separated values table using the fast C engine"
_fast = True
fill_extra_cols = True
def __init__(self, **kwargs):
super().__init__({"delimiter": ",", "comment": None}, **kwargs)
def write(self, table, output):
"""
Override the default write method of `FastBasic` to
output masked values as empty fields.
"""
self._write(table, output, {"fill_values": [(core.masked, "")]})
|
FastCsv
|
python
|
neetcode-gh__leetcode
|
python/0371-sum-of-two-integers.py
|
{
"start": 0,
"end": 537
}
|
class ____:
def getSum(self, a: int, b: int) -> int:
def add(a, b):
if not a or not b:
return a or b
return add(a ^ b, (a & b) << 1)
if a * b < 0: # assume a < 0, b > 0
if a > 0:
return self.getSum(b, a)
if add(~a, 1) == b: # -a == b
return 0
if add(~a, 1) < b: # -a < b
return add(~add(add(~a, 1), add(~b, 1)), 1) # -add(-a, -b)
return add(a, b) # a*b >= 0 or (-a) > b > 0
|
Solution
|
python
|
pypa__warehouse
|
tests/unit/packaging/test_views.py
|
{
"start": 17114,
"end": 19631
}
|
class ____:
def test_get_render_form(self, pyramid_request):
project = pretend.stub()
form_obj = pretend.stub()
form_class = pretend.call_recorder(lambda d, **kw: form_obj)
result = views.submit_malware_observation(
project, pyramid_request, _form_class=form_class
)
assert result == {"project": project, "form": form_obj}
assert form_class.calls == [pretend.call(pyramid_request.POST)]
def test_post_invalid_form(self, pyramid_request):
project = pretend.stub()
form_obj = pretend.stub()
form_obj.validate = pretend.call_recorder(lambda: False)
form_class = pretend.call_recorder(lambda d, **kw: form_obj)
pyramid_request.method = "POST"
result = views.submit_malware_observation(
project, pyramid_request, _form_class=form_class
)
assert result == {"project": project, "form": form_obj}
assert form_obj.validate.calls == [pretend.call()]
def test_post_valid_form(self, db_request):
user = UserFactory.create()
project = ProjectFactory.create()
form_obj = pretend.stub()
form_obj.inspector_link = pretend.stub(
data=f"https://inspector.pypi.io/project/{project.name}/"
)
form_obj.summary = pretend.stub(data="Bad stuff in here")
form_obj.validate = pretend.call_recorder(lambda: True)
form_class = pretend.call_recorder(lambda d, **kw: form_obj)
db_request.method = "POST"
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: f"/project/{project.name}/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = user
result = views.submit_malware_observation(
project, db_request, _form_class=form_class
)
assert isinstance(result, HTTPMovedPermanently)
assert result.headers["Location"] == f"/project/{project.name}/"
assert form_obj.validate.calls == [pretend.call()]
assert db_request.session.flash.calls == [
pretend.call(
"Your report has been recorded. Thank you for your help.",
queue="success",
)
]
assert db_request.route_path.calls == [
pretend.call("packaging.project", name=project.name)
]
assert len(project.observations) == 1
|
TestProjectSubmitMalwareObservation
|
python
|
django__django
|
django/contrib/gis/gdal/srs.py
|
{
"start": 11724,
"end": 12392
}
|
class ____(GDALBase):
"The coordinate system transformation object."
destructor = capi.destroy_ct
def __init__(self, source, target):
"Initialize on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(
target, SpatialReference
):
raise TypeError("source and target must be of type SpatialReference")
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
|
CoordTransform
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/execution/plan/objects.py
|
{
"start": 2591,
"end": 5050
}
|
class ____(
NamedTuple(
"_StepFailureData",
[
("error", Optional[SerializableErrorInfo]),
("user_failure_data", Optional[UserFailureData]),
("error_source", ErrorSource),
],
)
):
def __new__(cls, error, user_failure_data, error_source=None):
return super().__new__(
cls,
error=truncate_event_error_info(
check.opt_inst_param(error, "error", SerializableErrorInfo)
),
user_failure_data=check.opt_inst_param(
user_failure_data, "user_failure_data", UserFailureData
),
error_source=check.opt_inst_param(
error_source, "error_source", ErrorSource, default=ErrorSource.FRAMEWORK_ERROR
),
)
@property
def error_display_string(self) -> str:
"""Creates a display string that hides framework frames if the error arose in user code."""
from dagster._core.errors import DagsterRedactedUserCodeError
if not self.error:
return ""
if self.error_source == ErrorSource.USER_CODE_ERROR:
# For a redacted error, just return the redacted message without any
# internal user code error.
if self.error.cls_name == DagsterRedactedUserCodeError.__name__:
return self.error.to_string()
user_code_error = self.error.cause
check.invariant(
user_code_error,
"User code error is missing cause. User code errors are expected to have a"
" causes, which are the errors thrown from user code.",
)
return (
self.error.message.strip() + ":\n\n" + check.not_none(user_code_error).to_string()
)
else:
return self.error.to_string()
def step_failure_event_from_exc_info(
step_context: "StepExecutionContext",
exc_info: ExcInfo,
user_failure_data: Optional[UserFailureData] = None,
error_source: Optional[ErrorSource] = None,
):
from dagster._core.events import DagsterEvent
return DagsterEvent.step_failure_event(
step_context=step_context,
step_failure_data=StepFailureData(
error=serializable_error_info_from_exc_info(exc_info),
user_failure_data=user_failure_data,
error_source=error_source,
),
)
@whitelist_for_serdes
|
StepFailureData
|
python
|
huggingface__transformers
|
tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py
|
{
"start": 2349,
"end": 19314
}
|
class ____:
supports_sdpa = False
def get_encoder_decoder_model(self, config, decoder_config):
pass
def prepare_config_and_inputs(self):
pass
def get_pretrained_model_and_inputs(self):
pass
def check_encoder_decoder_model_from_pretrained_configs(
self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs
):
encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config)
self.assertTrue(encoder_decoder_config.decoder.is_decoder)
enc_dec_model = VisionEncoderDecoderModel(encoder_decoder_config)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
self.assertTrue(enc_dec_model.config.is_encoder_decoder)
outputs_encoder_decoder = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
def check_encoder_decoder_model(
self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
self.assertTrue(enc_dec_model.config.decoder.is_decoder)
self.assertTrue(enc_dec_model.config.decoder.add_cross_attention)
self.assertTrue(enc_dec_model.config.is_encoder_decoder)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
output_hidden_states=True,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
encoder_outputs = BaseModelOutput(last_hidden_state=outputs_encoder_decoder.encoder_hidden_states[-1])
outputs_encoder_decoder = enc_dec_model(
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
def check_encoder_decoder_model_from_pretrained(
self,
config,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
return_dict,
pixel_values=None,
**kwargs,
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict}
enc_dec_model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
output_hidden_states=True,
return_dict=True,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
def check_save_and_load(
self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
with torch.no_grad():
outputs = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
enc_dec_model.save_pretrained(tmpdirname)
enc_dec_model = VisionEncoderDecoderModel.from_pretrained(tmpdirname)
enc_dec_model.to(torch_device)
after_outputs = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def check_save_and_load_encoder_decoder_model(
self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
with torch.no_grad():
outputs = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with (
tempfile.TemporaryDirectory() as encoder_tmp_dirname,
tempfile.TemporaryDirectory() as decoder_tmp_dirname,
):
enc_dec_model.encoder.save_pretrained(encoder_tmp_dirname)
enc_dec_model.decoder.save_pretrained(decoder_tmp_dirname)
VisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=encoder_tmp_dirname,
decoder_pretrained_model_name_or_path=decoder_tmp_dirname,
)
after_outputs = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def check_encoder_decoder_model_output_attentions(
self,
config,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
labels=None,
pixel_values=None,
**kwargs,
):
# force eager attention to support output attentions
config._attn_implementation = "eager"
decoder_config._attn_implementation = "eager"
# make the decoder inputs a different shape from the encoder inputs to harden the test
decoder_input_ids = decoder_input_ids[:, :-1]
decoder_attention_mask = decoder_attention_mask[:, :-1]
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
output_attentions=True,
)
encoder_attentions = outputs_encoder_decoder["encoder_attentions"]
self.assertEqual(len(encoder_attentions), config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
image_size = to_2tuple(encoder_model.config.image_size)
patch_size = to_2tuple(encoder_model.config.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_len = num_patches + 1
self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads, seq_len, seq_len))
decoder_attentions = outputs_encoder_decoder["decoder_attentions"]
num_decoder_layers = (
decoder_config.num_decoder_layers
if hasattr(decoder_config, "num_decoder_layers")
else decoder_config.num_hidden_layers
)
self.assertEqual(len(decoder_attentions), num_decoder_layers)
self.assertEqual(
decoder_attentions[0].shape[-3:],
(decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]),
)
cross_attentions = outputs_encoder_decoder["cross_attentions"]
self.assertEqual(len(cross_attentions), num_decoder_layers)
cross_attention_input_seq_len = decoder_input_ids.shape[-1]
self.assertEqual(
cross_attentions[0].shape[-3:],
(decoder_config.num_attention_heads, cross_attention_input_seq_len, seq_len),
)
def check_encoder_decoder_model_generate(self, config, decoder_config, pixel_values=None, **kwargs):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
# Generate until max length
if hasattr(enc_dec_model.config, "eos_token_id"):
enc_dec_model.config.eos_token_id = None
if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"):
enc_dec_model.config.decoder.eos_token_id = None
if hasattr(enc_dec_model.generation_config, "eos_token_id"):
enc_dec_model.generation_config.eos_token_id = None
enc_dec_model.to(torch_device)
inputs = pixel_values
# Bert does not have a bos token id, so use pad_token_id instead
generated_output = enc_dec_model.generate(
inputs,
decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id,
max_length=enc_dec_model.generation_config.max_length,
)
self.assertEqual(generated_output.shape, (inputs.shape[0],) + (enc_dec_model.generation_config.max_length,))
def test_encoder_decoder_model(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model(**input_ids_dict)
def test_encoder_decoder_model_from_pretrained_configs(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict)
def test_encoder_decoder_model_from_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False)
def test_encoder_decoder_model_from_pretrained_return_dict(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True)
def test_save_and_load_from_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_save_and_load(**input_ids_dict)
def test_save_and_load_from_encoder_decoder_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_save_and_load_encoder_decoder_model(**input_ids_dict)
def test_encoder_decoder_model_output_attentions(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_output_attentions(**input_ids_dict)
def test_encoder_decoder_model_generate(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_generate(**input_ids_dict)
def test_training_gradient_checkpointing(self):
inputs_dict = self.prepare_config_and_inputs()
encoder_model, decoder_model = self.get_encoder_decoder_model(
inputs_dict["config"], inputs_dict["decoder_config"]
)
model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
model.to(torch_device)
model.train()
model.gradient_checkpointing_enable()
model.config.decoder_start_token_id = 0
model.config.pad_token_id = 0
model_inputs = {
"pixel_values": inputs_dict["pixel_values"],
"labels": inputs_dict["labels"],
"decoder_input_ids": inputs_dict["decoder_input_ids"],
}
loss = model(**model_inputs).loss
loss.backward()
@slow
def test_real_model_save_load_from_pretrained(self):
model_2, inputs = self.get_pretrained_model_and_inputs()
model_2.to(torch_device)
with torch.no_grad():
outputs = model_2(**inputs)
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmp_dirname:
model_2.save_pretrained(tmp_dirname)
model_1 = VisionEncoderDecoderModel.from_pretrained(tmp_dirname)
model_1.to(torch_device)
after_outputs = model_1(**inputs)
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
@unittest.skip("TODO Arthur I have to skip for now because I don't understand it")
def test_sdpa_can_dispatch_composite_models(self):
if not self.supports_sdpa:
self.skipTest("SDPA is not supported")
inputs_dict = self.prepare_config_and_inputs()
encoder_config, decoder_config = inputs_dict["config"], inputs_dict["decoder_config"]
config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(
encoder_config=encoder_config, decoder_config=decoder_config
)
model = VisionEncoderDecoderModel(config=config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = VisionEncoderDecoderModel.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
# see https://github.com/huggingface/transformers/pull/32238
# Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present)
encoder_attn = "sdpa" if model.encoder._supports_sdpa else "eager"
decoder_attn = "sdpa" if model.decoder._supports_sdpa else "eager"
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model_sdpa.encoder.config._attn_implementation == encoder_attn)
self.assertTrue(model_sdpa.decoder.config._attn_implementation == decoder_attn)
# Also test that nothing break if we request SDPA explicitly, when both sub-parts support it.
# If the model supports sdpa (i.e. all of sub-models supports it) we'll dispatch safely
# Otherwise we should raise error that SDPA is not supported, as some of the sub-models doesn't support
if encoder_attn == "sdpa" and decoder_attn == "sdpa":
model_sdpa_explicit = VisionEncoderDecoderModel.from_pretrained(tmpdirname, attn_implementation="sdpa")
model_sdpa_explicit = model_sdpa_explicit.eval().to(torch_device)
self.assertTrue(model_sdpa_explicit.config._attn_implementation == "sdpa")
else:
with self.assertRaises(ValueError):
model_sdpa_explicit = VisionEncoderDecoderModel.from_pretrained(
tmpdirname, attn_implementation="sdpa"
)
model_eager = VisionEncoderDecoderModel.from_pretrained(
tmpdirname,
attn_implementation="eager",
)
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_eager.config._attn_implementation == "eager")
self.assertTrue(model_eager.encoder.config._attn_implementation == "eager")
self.assertTrue(model_eager.decoder.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
raise ValueError("The eager model should not have SDPA attention layers")
@require_torch
|
EncoderDecoderMixin
|
python
|
ray-project__ray
|
python/ray/data/_internal/execution/operators/actor_pool_map_operator.py
|
{
"start": 24161,
"end": 26398
}
|
class ____:
"""An actor worker for MapOperator."""
def __init__(
self,
ctx: DataContext,
src_fn_name: str,
map_transformer: MapTransformer,
logical_actor_id: str,
actor_location_tracker: ray.actor.ActorHandle[ActorLocationTracker],
):
self.src_fn_name: str = src_fn_name
self._map_transformer = map_transformer
# Initialize the data context for this actor after setting the src_fn_name in order to not
# break __repr__. It's possible that logging setup fails.
DataContext._set_current(ctx)
# Initialize state for this actor.
self._map_transformer.init()
self._logical_actor_id = logical_actor_id
actor_location_tracker.update_actor_location.remote(
self._logical_actor_id, ray.get_runtime_context().get_node_id()
)
def get_location(self) -> NodeIdStr:
return ray.get_runtime_context().get_node_id()
def submit(
self,
data_context: DataContext,
ctx: TaskContext,
*blocks: Block,
slices: Optional[List[BlockSlice]] = None,
**kwargs: Dict[str, Any],
) -> Iterator[Union[Block, List[BlockMetadata]]]:
yield from _map_task(
self._map_transformer,
data_context,
ctx,
*blocks,
slices=slices,
**kwargs,
)
def __repr__(self):
# Use getattr to handle case where __init__ failed before src_fn_name was set.
# This can happen during actor restarts or initialization failures.
return f"MapWorker({getattr(self, 'src_fn_name', '<initializing>')})"
def on_exit(self):
"""Called when the actor is about to exist.
This enables performing cleanup operations via `UDF.__del__`.
Note, this only ensures cleanup is performed when the job exists gracefully.
If the driver or the actor is forcefully killed, `__del__` will not be called.
"""
# `_map_actor_context` is a global variable that references the UDF object.
# Delete it to trigger `UDF.__del__`.
del ray.data._map_actor_context
ray.data._map_actor_context = None
@dataclass
|
_MapWorker
|
python
|
getsentry__sentry
|
src/sentry/models/groupinbox.py
|
{
"start": 4278,
"end": 4455
}
|
class ____(TypedDict):
until: str | None # datetime str
count: int | None
window: int | None
user_count: int | None
user_window: int | None
|
InboxReasonDetails
|
python
|
pytorch__pytorch
|
torch/_inductor/select_algorithm.py
|
{
"start": 95226,
"end": 97860
}
|
class ____(RuntimeError):
pass
@functools.cache
def get_num_workers() -> int:
if "TORCHINDUCTOR_COMPILE_THREADS" in os.environ:
return int(os.environ["TORCHINDUCTOR_COMPILE_THREADS"])
cpu_count = (
len(os.sched_getaffinity(0))
if hasattr(os, "sched_getaffinity")
else os.cpu_count()
)
assert cpu_count
# Divide the number of CPUs by the number of GPUs for distributed workloads
if (
config.is_fbcode()
and torch.cuda.is_available()
and torch.cuda.device_count() > 0
):
cpu_count = cpu_count // torch.cuda.device_count()
return cpu_count
def create_inputs_key(input_nodes) -> str:
return repr([AlgorithmSelectorCache.key_of(x) for x in input_nodes])
def create_precompile_key(
name: str, inputs_key: str, choices: list[ChoiceCaller]
) -> str:
return ":".join(
[
name,
inputs_key,
torch.get_float32_matmul_precision(),
]
+ [choice.kernel_hash_key() for choice in choices]
)
# Args to FeedbackFunctions
# timings: mapping from choices to the benchmark time
# name: name of the op
# input_nodes: list of input ir.py Nodes
# choices: list of choices
# profiled time: Callable that returns a dict mapping from choices to the profiled time
FeedbackFunction = Callable[
[
dict[ChoiceCaller, float],
str,
list[Any],
list[ChoiceCaller],
Callable[[], dict[ChoiceCaller, float]],
],
None,
]
# Args to PreprocessingFunctions
# choices: list of ChoiceCaller objects to preprocess
# Returns: modified list of ChoiceCaller objects
PreprocessingFunction = Callable[[list[ChoiceCaller]], list[ChoiceCaller]]
def filter_choices_by_name_regex(choices: list[ChoiceCaller]) -> list[ChoiceCaller]:
"""Filter choices based on autotune_choice_name_regex config."""
if config.test_configs.autotune_choice_name_regex is not None:
return [
c
for c in choices
if re.search(
config.test_configs.autotune_choice_name_regex,
c.name,
)
]
return choices
def filter_choices_by_desc_regex(choices: list[ChoiceCaller]) -> list[ChoiceCaller]:
"""Filter choices based on autotune_choice_desc_regex config."""
if config.test_configs.autotune_choice_desc_regex is not None:
return [
c
for c in choices
if re.search(
config.test_configs.autotune_choice_desc_regex,
c.description,
)
]
return choices
|
NoValidChoicesError
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-vertex/llama_index/llms/vertex/base.py
|
{
"start": 1652,
"end": 19162
}
|
class ____(FunctionCallingLLM):
"""
Vertext LLM.
Examples:
`pip install llama-index-llms-vertex`
```python
from llama_index.llms.vertex import Vertex
# Set up necessary variables
credentials = {
"project_id": "INSERT_PROJECT_ID",
"api_key": "INSERT_API_KEY",
}
# Create an instance of the Vertex class
llm = Vertex(
model="text-bison",
project=credentials["project_id"],
credentials=credentials,
context_window=4096,
)
# Access the complete method from the instance
response = llm.complete("Hello world!")
print(str(response))
```
"""
model: str = Field(description="The vertex model to use.")
temperature: float = Field(description="The temperature to use for sampling.")
context_window: int = Field(
default=4096, description="The context window to use for sampling."
)
max_tokens: int = Field(description="The maximum number of tokens to generate.")
examples: Optional[Sequence[ChatMessage]] = Field(
description="Example messages for the chat model."
)
max_retries: int = Field(default=10, description="The maximum number of retries.")
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the Vertex."
)
iscode: bool = Field(
default=False, description="Flag to determine if current model is a Code Model"
)
_is_gemini: bool = PrivateAttr()
_is_chat_model: bool = PrivateAttr()
_client: Any = PrivateAttr()
_chat_client: Any = PrivateAttr()
_safety_settings: Dict[str, Any] = PrivateAttr()
def __init__(
self,
model: str = "text-bison",
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[Any] = None,
examples: Optional[Sequence[ChatMessage]] = None,
temperature: float = 0.1,
max_tokens: int = 512,
context_window: int = 4096,
max_retries: int = 10,
iscode: bool = False,
safety_settings: Optional[SafetySettingsType] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
init_vertexai(project=project, location=location, credentials=credentials)
safety_settings = safety_settings or {}
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
super().__init__(
temperature=temperature,
context_window=context_window,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
model=model,
examples=examples,
iscode=iscode,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
self._safety_settings = safety_settings
self._is_gemini = False
self._is_chat_model = False
if model in CHAT_MODELS:
from vertexai.language_models import ChatModel
self._chat_client = ChatModel.from_pretrained(model)
self._is_chat_model = True
elif model in CODE_CHAT_MODELS:
from vertexai.language_models import CodeChatModel
self._chat_client = CodeChatModel.from_pretrained(model)
iscode = True
self._is_chat_model = True
elif model in CODE_MODELS:
from vertexai.language_models import CodeGenerationModel
self._client = CodeGenerationModel.from_pretrained(model)
iscode = True
elif model in TEXT_MODELS:
from vertexai.language_models import TextGenerationModel
self._client = TextGenerationModel.from_pretrained(model)
elif is_gemini_model(model):
self._client = create_gemini_client(model, self._safety_settings)
self._chat_client = self._client
self._is_gemini = True
self._is_chat_model = True
else:
raise (ValueError(f"Model {model} not found, please verify the model name"))
@classmethod
def class_name(cls) -> str:
return "Vertex"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
num_output=self.max_tokens,
context_window=self.context_window,
is_chat_model=self._is_chat_model,
is_function_calling_model=self._is_gemini,
model_name=self.model,
system_role=(
MessageRole.USER if self._is_gemini else MessageRole.SYSTEM
), # Gemini does not support the default: MessageRole.SYSTEM
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"temperature": self.temperature,
"max_output_tokens": self.max_tokens,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
def _get_content_and_tool_calls(self, response: Any) -> Tuple[str, List]:
tool_calls = []
if response.candidates[0].function_calls:
for tool_call in response.candidates[0].function_calls:
tool_calls.append(tool_call)
try:
content = response.text
except Exception:
content = ""
return content, tool_calls
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
merged_messages = (
merge_neighboring_same_role_messages(messages)
if self._is_gemini
else messages
)
question = _parse_message(merged_messages[-1], self._is_gemini)
chat_history = _parse_chat_history(merged_messages[:-1], self._is_gemini)
chat_params = {**chat_history}
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
if self.iscode and "candidate_count" in params:
raise (ValueError("candidate_count is not supported by the codey model's"))
if self.examples and "examples" not in params:
chat_params["examples"] = _parse_examples(self.examples)
elif "examples" in params:
raise (
ValueError(
"examples are not supported in chat generation pass them as a constructor parameter"
)
)
generation = completion_with_retry(
client=self._chat_client,
prompt=question,
chat=True,
stream=False,
is_gemini=self._is_gemini,
params=chat_params,
max_retries=self.max_retries,
**params,
)
content, tool_calls = self._get_content_and_tool_calls(generation)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=content,
additional_kwargs={"tool_calls": tool_calls},
),
raw=generation.__dict__,
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
if self.iscode and "candidate_count" in params:
raise (ValueError("candidate_count is not supported by the codey model's"))
completion = completion_with_retry(
self._client,
prompt,
max_retries=self.max_retries,
is_gemini=self._is_gemini,
**params,
)
return CompletionResponse(text=completion.text, raw=completion.__dict__)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
merged_messages = (
merge_neighboring_same_role_messages(messages)
if self._is_gemini
else messages
)
question = _parse_message(merged_messages[-1], self._is_gemini)
chat_history = _parse_chat_history(merged_messages[:-1], self._is_gemini)
chat_params = {**chat_history}
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
if self.iscode and "candidate_count" in params:
raise (ValueError("candidate_count is not supported by the codey model's"))
if self.examples and "examples" not in params:
chat_params["examples"] = _parse_examples(self.examples)
elif "examples" in params:
raise (
ValueError(
"examples are not supported in chat generation pass them as a constructor parameter"
)
)
response = completion_with_retry(
client=self._chat_client,
prompt=question,
chat=True,
stream=True,
is_gemini=self._is_gemini,
params=chat_params,
max_retries=self.max_retries,
**params,
)
def gen() -> ChatResponseGen:
content = ""
role = MessageRole.ASSISTANT
for r in response:
content_delta = r.text
content += content_delta
yield ChatResponse(
message=ChatMessage(role=role, content=content),
delta=content_delta,
raw=r.__dict__,
)
return gen()
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
if "candidate_count" in params:
raise (ValueError("candidate_count is not supported by the streaming"))
completion = completion_with_retry(
client=self._client,
prompt=prompt,
stream=True,
is_gemini=self._is_gemini,
max_retries=self.max_retries,
**params,
)
def gen() -> CompletionResponseGen:
content = ""
for r in completion:
content_delta = r.text
content += content_delta
yield CompletionResponse(
text=content, delta=content_delta, raw=r.__dict__
)
return gen()
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
merged_messages = (
merge_neighboring_same_role_messages(messages)
if self._is_gemini
else messages
)
question = _parse_message(merged_messages[-1], self._is_gemini)
chat_history = _parse_chat_history(merged_messages[:-1], self._is_gemini)
chat_params = {**chat_history}
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
if self.iscode and "candidate_count" in params:
raise (ValueError("candidate_count is not supported by the codey model's"))
if self.examples and "examples" not in params:
chat_params["examples"] = _parse_examples(self.examples)
elif "examples" in params:
raise (
ValueError(
"examples are not supported in chat generation pass them as a constructor parameter"
)
)
generation = await acompletion_with_retry(
client=self._chat_client,
prompt=question,
chat=True,
is_gemini=self._is_gemini,
params=chat_params,
max_retries=self.max_retries,
**params,
)
##this is due to a bug in vertex AI we have to await twice
if self.iscode:
generation = await generation
content, tool_calls = self._get_content_and_tool_calls(generation)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=content,
additional_kwargs={"tool_calls": tool_calls},
),
raw=generation.__dict__,
)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
if self.iscode and "candidate_count" in params:
raise (ValueError("candidate_count is not supported by the codey model's"))
completion = await acompletion_with_retry(
client=self._client,
prompt=prompt,
max_retries=self.max_retries,
is_gemini=self._is_gemini,
**params,
)
return CompletionResponse(text=completion.text, raw=completion.__dict__)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise (ValueError("Not Implemented"))
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise (ValueError("Not Implemented"))
def _prepare_chat_with_tools(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False, # theoretically supported, but not implemented
tool_required: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
"""Prepare the arguments needed to let the LLM chat with tools."""
chat_history = chat_history or []
if isinstance(user_msg, str):
user_msg = ChatMessage(role=MessageRole.USER, content=user_msg)
chat_history.append(user_msg)
tool_dicts = []
for tool in tools:
tool_dicts.append(
{
"name": tool.metadata.name,
"description": tool.metadata.description,
"parameters": tool.metadata.get_parameters_dict(),
}
)
tool_config = (
{"tool_config": self._to_function_calling_config(tool_required)}
if self._is_gemini
else {}
)
print("tool_config", tool_config)
return {
"messages": chat_history,
"tools": tool_dicts or None,
**tool_config,
**kwargs,
}
def _to_function_calling_config(self, tool_required: bool) -> ToolConfig:
return ToolConfig(
function_calling_config=ToolConfig.FunctionCallingConfig(
mode=ToolConfig.FunctionCallingConfig.Mode.ANY
if tool_required
else ToolConfig.FunctionCallingConfig.Mode.AUTO,
allowed_function_names=None,
)
)
def _validate_chat_with_tools_response(
self,
response: ChatResponse,
tools: List["BaseTool"],
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> ChatResponse:
"""Validate the response from chat_with_tools."""
if not allow_parallel_tool_calls:
force_single_tool_call(response)
return response
def get_tool_calls_from_response(
self,
response: "ChatResponse",
error_on_no_tool_call: bool = True,
**kwargs: Any,
) -> List[ToolSelection]:
"""Predict and call the tool."""
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) < 1:
if error_on_no_tool_call:
raise ValueError(
f"Expected at least one tool call, but got {len(tool_calls)} tool calls."
)
else:
return []
tool_selections = []
for tool_call in tool_calls:
response_dict = tool_call.to_dict()
if "args" not in response_dict or "name" not in response_dict:
raise ValueError("Invalid tool call.")
argument_dict = response_dict["args"]
tool_selections.append(
ToolSelection(
tool_id="None",
tool_name=tool_call.name,
tool_kwargs=argument_dict,
)
)
return tool_selections
|
Vertex
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/vimeo/provider.py
|
{
"start": 262,
"end": 708
}
|
class ____(OAuthProvider):
id = "vimeo"
name = "Vimeo"
account_class = VimeoAccount
oauth_adapter_class = VimeoOAuthAdapter
def get_default_scope(self):
scope = []
return scope
def extract_uid(self, data):
return data["id"]
def extract_common_fields(self, data):
return dict(name=data.get("display_name"), username=data.get("username"))
provider_classes = [VimeoProvider]
|
VimeoProvider
|
python
|
apache__airflow
|
providers/microsoft/azure/tests/unit/microsoft/azure/test_utils.py
|
{
"start": 3993,
"end": 9658
}
|
class ____:
@mock.patch(f"{MODULE}.PipelineRequest")
@mock.patch(f"{MODULE}.BearerTokenCredentialPolicy")
@mock.patch(f"{MODULE}.DefaultAzureCredential")
def test_signed_session(self, mock_default_azure_credential, mock_policy, mock_request):
mock_request.return_value.http_request.headers = {"Authorization": "Bearer token"}
adapter = AzureIdentityCredentialAdapter()
mock_default_azure_credential.assert_called_once()
mock_policy.assert_called_once()
adapter.signed_session()
assert adapter.token == {"access_token": "token"}
@mock.patch(f"{MODULE}.PipelineRequest")
@mock.patch(f"{MODULE}.BearerTokenCredentialPolicy")
@mock.patch(f"{MODULE}.DefaultAzureCredential")
def test_init_with_identity(self, mock_default_azure_credential, mock_policy, mock_request):
mock_request.return_value.http_request.headers = {"Authorization": "Bearer token"}
adapter = AzureIdentityCredentialAdapter(
managed_identity_client_id="managed_identity_client_id",
workload_identity_tenant_id="workload_identity_tenant_id",
additionally_allowed_tenants=["workload_identity_tenant_id"],
)
mock_default_azure_credential.assert_called_once_with(
managed_identity_client_id="managed_identity_client_id",
workload_identity_tenant_id="workload_identity_tenant_id",
additionally_allowed_tenants=["workload_identity_tenant_id"],
)
mock_policy.assert_called_once()
adapter.signed_session()
assert adapter.token == {"access_token": "token"}
@pytest.mark.parametrize(
("host", "login", "expected_url"),
[
(None, None, "https://None.blob.core.windows.net/"), # to maintain existing behaviour
(None, "storage_account", "https://storage_account.blob.core.windows.net/"),
("testaccountname.blob.core.windows.net", None, "https://testaccountname.blob.core.windows.net"),
(
"testaccountname.blob.core.windows.net",
"service_principal_id",
"https://testaccountname.blob.core.windows.net",
),
(
"https://testaccountname.blob.core.windows.net",
None,
"https://testaccountname.blob.core.windows.net",
),
(
"https://testaccountname.blob.core.windows.net",
"service_principal_id",
"https://testaccountname.blob.core.windows.net",
),
],
)
def test_parse_blob_account_url(host, login, expected_url):
assert parse_blob_account_url(host, login) == expected_url
def get_airflow_connection(
conn_id: str,
host: str = "graph.microsoft.com",
login: str = "client_id",
password: str = "client_secret",
tenant_id: str = "tenant-id",
azure_tenant_id: str | None = None,
proxies: dict | None = None,
scopes: list[str] | None = None,
api_version: APIVersion | str | None = APIVersion.v1.value,
authority: str | None = None,
disable_instance_discovery: bool = False,
):
from airflow.models import Connection
extra = {
"api_version": api_version,
"proxies": proxies or {},
"verify": False,
"scopes": scopes or [],
"authority": authority,
"disable_instance_discovery": disable_instance_discovery,
}
if azure_tenant_id:
extra["tenantId"] = azure_tenant_id
else:
extra["tenant_id"] = tenant_id
return Connection(
schema="https",
conn_id=conn_id,
conn_type="http",
host=host,
port=80,
login=login,
password=password,
extra=extra,
)
def mock_connection(schema: str | None = None, host: str | None = None):
from airflow.models import Connection
connection = MagicMock(spec=Connection)
connection.schema = schema
connection.host = host
return connection
def mock_json_response(status_code, *contents) -> Response:
response = MagicMock(spec=Response)
response.status_code = status_code
response.headers = Headers({})
response.content = b""
if contents:
response.json.side_effect = list(contents)
else:
response.json.return_value = None
return response
def mock_response(status_code, content: Any = None, headers: dict | None = None) -> Response:
response = MagicMock(spec=Response)
response.status_code = status_code
response.headers = Headers(headers or {})
response.content = content
response.json.side_effect = JSONDecodeError("", "", 0)
return response
@contextmanager
def patch_hook(side_effect: Callable = get_airflow_connection):
from asgiref.sync import sync_to_async
with ExitStack() as stack:
patches = [
patch.object(BaseHook, "get_connection", side_effect=side_effect),
patch.object(BaseHook, "aget_connection", side_effect=sync_to_async(side_effect))
if hasattr(BaseHook, "aget_connection")
else None,
]
entered = [stack.enter_context(p) for p in patches if p is not None]
yield entered # expose entered mocks to the caller
@contextmanager
def patch_hook_and_request_adapter(response):
with patch_hook() as hook_mocks:
with patch.object(HttpxRequestAdapter, "get_http_response_message") as mock_get_http_response:
if isinstance(response, Exception):
mock_get_http_response.side_effect = response
else:
mock_get_http_response.return_value = response
yield [*hook_mocks, mock_get_http_response]
|
TestAzureIdentityCredentialAdapter
|
python
|
scrapy__scrapy
|
tests/test_utils_log.py
|
{
"start": 3398,
"end": 4771
}
|
class ____:
def test_redirect(self):
logger = logging.getLogger("test")
logger.setLevel(logging.WARNING)
old_stdout = sys.stdout
sys.stdout = StreamLogger(logger, logging.ERROR)
with LogCapture() as log:
print("test log msg")
log.check(("test", "ERROR", "test log msg"))
sys.stdout = old_stdout
@pytest.mark.parametrize(
("base_extra", "log_extra", "expected_extra"),
[
(
{"spider": "test"},
{"extra": {"log_extra": "info"}},
{"extra": {"log_extra": "info", "spider": "test"}},
),
(
{"spider": "test"},
{"extra": None},
{"extra": {"spider": "test"}},
),
(
{"spider": "test"},
{"extra": {"spider": "test2"}},
{"extra": {"spider": "test"}},
),
],
)
def test_spider_logger_adapter_process(
base_extra: Mapping[str, Any], log_extra: MutableMapping, expected_extra: dict
) -> None:
logger = logging.getLogger("test")
spider_logger_adapter = SpiderLoggerAdapter(logger, base_extra)
log_message = "test_log_message"
result_message, result_kwargs = spider_logger_adapter.process(
log_message, log_extra
)
assert result_message == log_message
assert result_kwargs == expected_extra
|
TestStreamLogger
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-child-who-has-the-ball-after-k-seconds.py
|
{
"start": 36,
"end": 290
}
|
class ____(object):
def numberOfChild(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
q, r = divmod(k, n-1)
return r if q&1 == 0 else (n-1)-r
# Time: O(1)
# Space: O(1)
# math
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/ops_test.py
|
{
"start": 86671,
"end": 96406
}
|
class ____(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
# Creating unregistered ops with _apply_op() doesn't work with the C API
# TODO(skyewm): address this more consistently. Possible solutions are
# to use registered ops in all tests, create a way to register ops in
# Python tests, or conditionally disable the op registration check in
# the C API.
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
with g.control_dependencies([a]):
c = constant_op.constant(1.0)
d = array_ops.identity(b)
e = array_ops.identity(c)
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
@test_util.run_in_graph_and_eager_modes
def testEager(self):
def future():
future.calls += 1
return constant_op.constant(2.0)
future.calls = 0
if context.executing_eagerly():
a = constant_op.constant(1.0)
b = future
with ops.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(future.calls, 1)
else:
g = ops.Graph()
with g.as_default():
a = constant_op.constant(1.0)
b = future()
with g.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(c.op.control_inputs, [a.op, b.op])
self.assertEqual(future.calls, 1)
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_1.op, a_2.op, a_3.op, a_4.op],
b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testClear(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies(None):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies(None):
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_1, c_1],
[dtypes.float32])
e_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_2, c_2],
[dtypes.float32])
e_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_1, e_1],
[dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_3, c_3],
[dtypes.float32])
e_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_2, e_2],
[dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_4, c_4],
[dtypes.float32])
e_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_3, e_3],
[dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "Identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
def testMonitoringAttributeAddedWhenUsingManualControlDep(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
b = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a]):
c = _apply_op(g, "Identity", [b], [dtypes.float32])
with g.control_dependencies([b]):
d = _apply_op(g, "Identity", [b], [dtypes.float32])
# Validate that the monitoring attribute is set to track usage of the
# `control_dependencies(...)` API.
self.assertEqual(c.op.control_inputs, [a.op])
with self.assertRaises(ValueError):
c.op.get_attr("_has_manual_control_dependencies")
self.assertEqual(a.op.get_attr("_has_manual_control_dependencies"), True)
# Validate that the monitoring attribute is set to track usage of the
# `control_dependencies(...)` API even when the manual control deps actually
# happened to be pruned at runtime.
self.assertEqual(d.op.control_inputs, [])
with self.assertRaises(ValueError):
d.op.get_attr("_has_manual_control_dependencies")
self.assertEqual(b.op.get_attr("_has_manual_control_dependencies"), True)
|
ControlDependenciesTest
|
python
|
huggingface__transformers
|
src/transformers/pipelines/pt_utils.py
|
{
"start": 502,
"end": 6627
}
|
class ____(IterableDataset):
def __init__(self, loader, infer, params, loader_batch_size=None):
"""
Roughly equivalent to
```
for item in loader:
yield infer(item, **params)
```
Arguments:
loader (`torch.utils.data.DataLoader` or `Iterable`):
The iterator that will be used to apply `infer` on.
infer (any function):
The function to apply of each element of `loader`.
params (`dict`):
The parameters passed to `infer` along with every item
loader_batch_size (`int`, *optional*):
If specified, the items of `loader` are supposed to come as batch, and are loader_batched here
making it roughly behave as
```
for items in loader:
for i in loader_batch_size:
item = items[i]
yield infer(item, **params)
```"""
self.loader = loader
self.infer = infer
self.params = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
loader_batch_size = None
self.loader_batch_size = loader_batch_size
# Internal bookkeeping
self._loader_batch_index = None
self._loader_batch_data = None
def __len__(self):
return len(self.loader)
def __iter__(self):
self.iterator = iter(self.loader)
return self
def loader_batch_item(self):
"""
Return item located at `loader_batch_index` within the current `loader_batch_data`.
"""
if isinstance(self._loader_batch_data, torch.Tensor):
# Batch data is simple tensor, just fetch the slice
result = self._loader_batch_data[self._loader_batch_index].unsqueeze(0)
else:
# Batch data is assumed to be BaseModelOutput (or dict)
loader_batched = {}
for k, element in self._loader_batch_data.items():
if isinstance(element, ModelOutput):
# Convert ModelOutput to tuple first
element = element.to_tuple()
if isinstance(element[0], torch.Tensor):
loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0], np.ndarray):
loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element)
continue
if k in {"hidden_states", "attentions"} and isinstance(element, tuple):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor):
loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0], np.ndarray):
loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element)
continue
if k == "past_key_values":
continue
if element is None:
# This can happen for optional data that get passed around
loader_batched[k] = None
elif isinstance(element[self._loader_batch_index], torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
loader_batched[k] = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index], np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
loader_batched[k] = np.expand_dims(element[self._loader_batch_index], 0)
else:
# This is typically a list, so no need to `unsqueeze`.
loader_batched[k] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
result = self._loader_batch_data.__class__(loader_batched)
self._loader_batch_index += 1
return result
def __next__(self):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
item = next(self.iterator)
processed = self.infer(item, **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(processed, torch.Tensor):
first_tensor = processed
elif isinstance(processed, tuple):
first_tensor = processed[0]
else:
key = list(processed.keys())[0]
first_tensor = processed[key]
if isinstance(first_tensor, list):
observed_batch_size = len(first_tensor)
else:
observed_batch_size = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
self.loader_batch_size = observed_batch_size
# Setting internal index to unwrap the batch
self._loader_batch_data = processed[0] if isinstance(processed, tuple) else processed
self._loader_batch_index = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
|
PipelineIterator
|
python
|
ray-project__ray
|
python/ray/dashboard/modules/job/tests/test_common.py
|
{
"start": 331,
"end": 8600
}
|
class ____:
def test_validate_entrypoint(self):
r = validate_request_type({"entrypoint": "abc"}, JobSubmitRequest)
assert r.entrypoint == "abc"
with pytest.raises(TypeError, match="required positional argument"):
validate_request_type({}, JobSubmitRequest)
with pytest.raises(TypeError, match="must be a string"):
validate_request_type({"entrypoint": 123}, JobSubmitRequest)
def test_validate_submission_id(self):
r = validate_request_type({"entrypoint": "abc"}, JobSubmitRequest)
assert r.entrypoint == "abc"
assert r.submission_id is None
r = validate_request_type(
{"entrypoint": "abc", "submission_id": "123"}, JobSubmitRequest
)
assert r.entrypoint == "abc"
assert r.submission_id == "123"
with pytest.raises(TypeError, match="must be a string"):
validate_request_type(
{"entrypoint": 123, "submission_id": 1}, JobSubmitRequest
)
def test_validate_runtime_env(self):
r = validate_request_type({"entrypoint": "abc"}, JobSubmitRequest)
assert r.entrypoint == "abc"
assert r.runtime_env is None
r = validate_request_type(
{"entrypoint": "abc", "runtime_env": {"hi": "hi2"}}, JobSubmitRequest
)
assert r.entrypoint == "abc"
assert r.runtime_env == {"hi": "hi2"}
with pytest.raises(TypeError, match="must be a dict"):
validate_request_type(
{"entrypoint": "abc", "runtime_env": 123}, JobSubmitRequest
)
with pytest.raises(TypeError, match="keys must be strings"):
validate_request_type(
{"entrypoint": "abc", "runtime_env": {1: "hi"}}, JobSubmitRequest
)
def test_validate_metadata(self):
r = validate_request_type({"entrypoint": "abc"}, JobSubmitRequest)
assert r.entrypoint == "abc"
assert r.metadata is None
r = validate_request_type(
{"entrypoint": "abc", "metadata": {"hi": "hi2"}}, JobSubmitRequest
)
assert r.entrypoint == "abc"
assert r.metadata == {"hi": "hi2"}
with pytest.raises(TypeError, match="must be a dict"):
validate_request_type(
{"entrypoint": "abc", "metadata": 123}, JobSubmitRequest
)
with pytest.raises(TypeError, match="keys must be strings"):
validate_request_type(
{"entrypoint": "abc", "metadata": {1: "hi"}}, JobSubmitRequest
)
with pytest.raises(TypeError, match="values must be strings"):
validate_request_type(
{"entrypoint": "abc", "metadata": {"hi": 1}}, JobSubmitRequest
)
def test_uri_to_http_and_back():
assert uri_to_http_components("gcs://hello.zip") == ("gcs", "hello.zip")
assert uri_to_http_components("gcs://hello.whl") == ("gcs", "hello.whl")
with pytest.raises(ValueError, match="'blah' is not a valid Protocol"):
uri_to_http_components("blah://halb.zip")
with pytest.raises(ValueError, match="does not end in .zip or .whl"):
assert uri_to_http_components("gcs://hello.not_zip")
with pytest.raises(ValueError, match="does not end in .zip or .whl"):
assert uri_to_http_components("gcs://hello")
assert http_uri_components_to_uri("gcs", "hello.zip") == "gcs://hello.zip"
assert http_uri_components_to_uri("blah", "halb.zip") == "blah://halb.zip"
assert http_uri_components_to_uri("blah", "halb.whl") == "blah://halb.whl"
for original_uri in ["gcs://hello.zip", "gcs://fasdf.whl"]:
new_uri = http_uri_components_to_uri(*uri_to_http_components(original_uri))
assert new_uri == original_uri
def test_dynamic_status_message():
info = JobInfo(
status=JobStatus.PENDING, entrypoint="echo hi", entrypoint_num_cpus=1
)
assert "may be waiting for resources" in info.message
info = JobInfo(
status=JobStatus.PENDING, entrypoint="echo hi", entrypoint_num_gpus=1
)
assert "may be waiting for resources" in info.message
info = JobInfo(status=JobStatus.PENDING, entrypoint="echo hi", entrypoint_memory=4)
assert "may be waiting for resources" in info.message
info = JobInfo(
status=JobStatus.PENDING,
entrypoint="echo hi",
entrypoint_resources={"Custom": 1},
)
assert "may be waiting for resources" in info.message
info = JobInfo(
status=JobStatus.PENDING, entrypoint="echo hi", runtime_env={"conda": "env"}
)
assert "may be waiting for the runtime environment" in info.message
def test_job_info_to_json():
info = JobInfo(
status=JobStatus.PENDING,
entrypoint="echo hi",
entrypoint_num_cpus=1,
entrypoint_num_gpus=1,
entrypoint_memory=4,
entrypoint_resources={"Custom": 1},
runtime_env={"pip": ["pkg"]},
)
expected_items = {
"status": "PENDING",
"message": (
"Job has not started yet. It may be waiting for resources "
"(CPUs, GPUs, memory, custom resources) to become available. "
"It may be waiting for the runtime environment to be set up."
),
"entrypoint": "echo hi",
"entrypoint_num_cpus": 1,
"entrypoint_num_gpus": 1,
"entrypoint_memory": 4,
"entrypoint_resources": {"Custom": 1},
"runtime_env_json": '{"pip": ["pkg"]}',
}
# Check that the expected items are in the JSON.
assert expected_items.items() <= info.to_json().items()
new_job_info = JobInfo.from_json(info.to_json())
assert new_job_info == info
# If `status` is just a string, then operations like status.is_terminal()
# would fail, so we should make sure that it's a JobStatus.
assert isinstance(new_job_info.status, JobStatus)
def test_job_info_json_to_proto():
"""Test that JobInfo JSON can be converted to JobsAPIInfo protobuf."""
info = JobInfo(
status=JobStatus.PENDING,
entrypoint="echo hi",
error_type=JobErrorType.JOB_SUPERVISOR_ACTOR_UNSCHEDULABLE,
start_time=123,
end_time=456,
metadata={"hi": "hi2"},
entrypoint_num_cpus=1,
entrypoint_num_gpus=1,
entrypoint_memory=4,
entrypoint_resources={"Custom": 1},
runtime_env={"pip": ["pkg"]},
driver_agent_http_address="http://localhost:1234",
driver_node_id="node_id",
)
info_json = json.dumps(info.to_json())
info_proto = Parse(info_json, JobsAPIInfo())
assert info_proto.status == "PENDING"
assert info_proto.entrypoint == "echo hi"
assert info_proto.start_time == 123
assert info_proto.end_time == 456
assert info_proto.metadata == {"hi": "hi2"}
assert info_proto.entrypoint_num_cpus == 1
assert info_proto.entrypoint_num_gpus == 1
assert info_proto.entrypoint_memory == 4
assert info_proto.entrypoint_resources == {"Custom": 1}
assert info_proto.runtime_env_json == '{"pip": ["pkg"]}'
assert info_proto.message == (
"Job has not started yet. It may be waiting for resources "
"(CPUs, GPUs, memory, custom resources) to become available. "
"It may be waiting for the runtime environment to be set up."
)
assert info_proto.error_type == "JOB_SUPERVISOR_ACTOR_UNSCHEDULABLE"
assert info_proto.driver_agent_http_address == "http://localhost:1234"
assert info_proto.driver_node_id == "node_id"
minimal_info = JobInfo(status=JobStatus.PENDING, entrypoint="echo hi")
minimal_info_json = json.dumps(minimal_info.to_json())
minimal_info_proto = Parse(minimal_info_json, JobsAPIInfo())
assert minimal_info_proto.status == "PENDING"
assert minimal_info_proto.entrypoint == "echo hi"
for unset_optional_field in [
"entrypoint_num_cpus",
"entrypoint_num_gpus",
"entrypoint_memory",
"runtime_env_json",
"error_type",
"driver_agent_http_address",
"driver_node_id",
]:
assert not minimal_info_proto.HasField(unset_optional_field)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
TestJobSubmitRequestValidation
|
python
|
pydantic__pydantic
|
pydantic/warnings.py
|
{
"start": 3547,
"end": 3868
}
|
class ____(PydanticDeprecationWarning):
"""A specific `PydanticDeprecationWarning` subclass defining functionality deprecated since Pydantic 2.12."""
def __init__(self, message: str, *args: object) -> None:
super().__init__(message, *args, since=(2, 12), expected_removal=(3, 0))
|
PydanticDeprecatedSince212
|
python
|
numba__llvmlite
|
llvmlite/binding/value.py
|
{
"start": 13770,
"end": 13987
}
|
class ____(_ValueIterator):
kind = 'operand'
def _dispose(self):
self._capi.LLVMPY_DisposeOperandsIter(self)
def _next(self):
return ffi.lib.LLVMPY_OperandsIterNext(self)
|
_OperandsIterator
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_formula_results01.py
|
{
"start": 315,
"end": 1708
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("formula_results01.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with formula errors."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_formula("A1", "1+1", None, 2)
worksheet.write_formula("A2", '"Foo"', None, "Foo")
worksheet.write_formula("A3", "IF(B3,FALSE,TRUE)", None, True)
worksheet.write_formula("A4", "IF(B4,TRUE,FALSE)", None, False)
worksheet.write_formula("A5", "#DIV/0!", None, "#DIV/0!")
worksheet.write_formula("A6", "#N/A", None, "#N/A")
worksheet.write_formula("A7", "#NAME?", None, "#NAME?")
worksheet.write_formula("A8", "#NULL!", None, "#NULL!")
worksheet.write_formula("A9", "#NUM!", None, "#NUM!")
worksheet.write_formula("A10", "#REF!", None, "#REF!")
worksheet.write_formula("A11", "#VALUE!", None, "#VALUE!")
worksheet.write_formula("A12", "1/0", None, "#DIV/0!")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
redis__redis-py
|
tests/test_encoding.py
|
{
"start": 2092,
"end": 2657
}
|
class ____:
def test_ignore(self, request):
r = _get_client(
redis.Redis,
request=request,
decode_responses=True,
encoding_errors="ignore",
)
r.set("a", b"foo\xff")
assert r.get("a") == "foo"
def test_replace(self, request):
r = _get_client(
redis.Redis,
request=request,
decode_responses=True,
encoding_errors="replace",
)
r.set("a", b"foo\xff")
assert r.get("a") == "foo\ufffd"
|
TestEncodingErrors
|
python
|
pypa__pipenv
|
pipenv/vendor/click/parser.py
|
{
"start": 4962,
"end": 6770
}
|
class ____:
def __init__(
self,
obj: "CoreOption",
opts: t.Sequence[str],
dest: t.Optional[str],
action: t.Optional[str] = None,
nargs: int = 1,
const: t.Optional[t.Any] = None,
):
self._short_opts = []
self._long_opts = []
self.prefixes: t.Set[str] = set()
for opt in opts:
prefix, value = split_opt(opt)
if not prefix:
raise ValueError(f"Invalid start character for option ({opt})")
self.prefixes.add(prefix[0])
if len(prefix) == 1 and len(value) == 1:
self._short_opts.append(opt)
else:
self._long_opts.append(opt)
self.prefixes.add(prefix)
if action is None:
action = "store"
self.dest = dest
self.action = action
self.nargs = nargs
self.const = const
self.obj = obj
@property
def takes_value(self) -> bool:
return self.action in ("store", "append")
def process(self, value: t.Any, state: "ParsingState") -> None:
if self.action == "store":
state.opts[self.dest] = value # type: ignore
elif self.action == "store_const":
state.opts[self.dest] = self.const # type: ignore
elif self.action == "append":
state.opts.setdefault(self.dest, []).append(value) # type: ignore
elif self.action == "append_const":
state.opts.setdefault(self.dest, []).append(self.const) # type: ignore
elif self.action == "count":
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 # type: ignore
else:
raise ValueError(f"unknown action '{self.action}'")
state.order.append(self.obj)
|
Option
|
python
|
run-llama__llama_index
|
llama-index-integrations/retrievers/llama-index-retrievers-superlinked/llama_index/retrievers/superlinked/retriever.py
|
{
"start": 365,
"end": 4299
}
|
class ____(BaseRetriever):
"""
LlamaIndex retriever for Superlinked.
Provides an adapter that executes a Superlinked query and converts results
into LlamaIndex `TextNode` instances with scores.
"""
def __init__(
self,
*,
sl_client: App,
sl_query: QueryDescriptor,
page_content_field: str,
query_text_param: str = "query_text",
metadata_fields: Optional[List[str]] = None,
top_k: int = 4,
callback_manager: Optional[CallbackManager] = None,
) -> None:
"""
Initialize the Superlinked retriever.
Args:
sl_client (Any): A Superlinked `App` instance.
sl_query (Any): A Superlinked `QueryDescriptor` describing the query.
page_content_field (str): Field name in the Superlinked result to use
as the node text.
query_text_param (str, optional): Parameter name in the Superlinked
query for the user text. Defaults to "query_text".
metadata_fields (Optional[List[str]], optional): If `None`, include
all fields except `page_content_field`. Otherwise, include only
the specified fields. Defaults to `None`.
top_k (int, optional): Maximum number of nodes returned (a final cap
is applied client-side). Defaults to `4`.
callback_manager (Optional[CallbackManager], optional): LlamaIndex
callback manager. Defaults to `None`.
"""
self.sl_client = sl_client
self.sl_query = sl_query
self.page_content_field = page_content_field
self.query_text_param = query_text_param
self.metadata_fields = metadata_fields
self.top_k = top_k
# Initialize BaseRetriever
super().__init__(callback_manager=callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""
Execute the Superlinked query and map results to nodes.
Args:
query_bundle (QueryBundle): User query as a `QueryBundle`.
Returns:
List[NodeWithScore]: Retrieved nodes with associated scores.
"""
user_query = getattr(query_bundle, "query_str", str(query_bundle))
# Build query params, allowing overrides via retriever metadata if needed later
query_params: dict[str, Any] = {self.query_text_param: user_query}
try:
result = self.sl_client.query(
query_descriptor=self.sl_query, **query_params
)
except Exception:
return []
nodes: List[NodeWithScore] = []
for entry in getattr(result, "entries", []) or []:
fields = getattr(entry, "fields", None) or {}
if self.page_content_field not in fields:
continue
text = fields[self.page_content_field]
metadata: dict[str, Any] = {"id": getattr(entry, "id", None)}
if self.metadata_fields is None:
for key, val in fields.items():
if key != self.page_content_field:
metadata[key] = val
else:
for key in self.metadata_fields:
if key in fields:
metadata[key] = fields[key]
# Determine score from Superlinked metadata if available
score_value: float = 1.0
entry_metadata = getattr(entry, "metadata", None)
if entry_metadata is not None and hasattr(entry_metadata, "score"):
try:
score_value = float(entry_metadata.score)
except Exception:
score_value = 1.0
node = TextNode(text=text, metadata=metadata)
nodes.append(NodeWithScore(node=node, score=score_value))
return nodes[: self.top_k]
|
SuperlinkedRetriever
|
python
|
joke2k__faker
|
faker/providers/address/de_DE/__init__.py
|
{
"start": 47,
"end": 10331
}
|
class ____(AddressProvider):
city_formats = ("{{city_name}}",)
city_with_postcode_formats = ("{{postcode}} {{city}}",)
street_name_formats = (
"{{first_name}}-{{last_name}}-{{street_suffix_long}}",
"{{last_name}}{{street_suffix_short}}",
)
street_address_formats = ("{{street_name}} {{building_number}}",)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
# NOTE: Zero itself can be a valid building number in rare cases e.g., Wilhelm-Wisser-Str. 0, Heidhörn
# see: https://www.uniserv.com/wissen/magazin/article/besonderheiten-von-zustelladressen/
building_number_formats = ("#", "%#", "%##", "%###", "%/%", "%#/%#", "%-%", "%#-%#")
street_suffixes_long = (
"Gasse",
"Platz",
"Ring",
"Straße",
"Weg",
"Allee",
)
street_suffixes_short = (
"gasse",
"platz",
"ring",
"straße",
"str.",
"weg",
"allee",
)
postcode_formats = ("#####",)
cities = (
"Aachen",
"Ahaus",
"Altentreptow",
"Altötting",
"Amberg",
"Angermünde",
"Anklam",
"Ansbach",
"Apolda",
"Arnstadt",
"Artern",
"Aschaffenburg",
"Aue",
"Auerbach",
"Augsburg",
"Aurich",
"Backnang",
"Bad Brückenau",
"Bad Freienwalde",
"Bad Kissingen",
"Bad Kreuznach",
"Bad Langensalza",
"Bad Liebenwerda",
"Bad Mergentheim",
"Badalzungen",
"Badibling",
"Badoberan",
"Bamberg",
"Bautzen",
"Bayreuth",
"Beeskow",
"Beilngries",
"Belzig",
"Berchtesgaden",
"Bergzabern",
"Berlin",
"Bernburg",
"Bersenbrück",
"Biedenkopf",
"Bischofswerda",
"Bitterfeld",
"Bogen",
"Borken",
"Borna",
"Brand",
"Brandenburg",
"Bremen",
"Bremervörde",
"Brilon",
"Bruchsal",
"Burg",
"Burgdorf",
"Burglengenfeld",
"Böblingen",
"Büsingen am Hochrhein",
"Bützow",
"Calau",
"Calw",
"Celle",
"Chemnitz",
"Cloppenburg",
"Coburg",
"Cottbus",
"Crailsheim",
"Cuxhaven",
"Dachau",
"Darmstadt",
"Deggendorf",
"Delitzsch",
"Demmin",
"Dessau",
"Dieburg",
"Diepholz",
"Dinkelsbühl",
"Dinslaken",
"Donaueschingen",
"Dresden",
"Duderstadt",
"Döbeln",
"Düren",
"Ebermannstadt",
"Ebern",
"Ebersberg",
"Eberswalde",
"Eckernförde",
"Eggenfelden",
"Eichstätt",
"Eilenburg",
"Einbeck",
"Eisenach",
"Eisenberg",
"Eisenhüttenstadt",
"Eisleben",
"Emmendingen",
"Erbisdorf",
"Erding",
"Erfurt",
"Erkelenz",
"Euskirchen",
"Eutin",
"Fallingbostel",
"Feuchtwangen",
"Finsterwalde",
"Flöha",
"Forchheim",
"Forst",
"Freising",
"Freital",
"Freudenstadt",
"Fulda",
"Fürstenfeldbruck",
"Fürstenwalde",
"Füssen",
"Gadebusch",
"Gardelegen",
"Garmisch-Partenkirchen",
"Geithain",
"Geldern",
"Gelnhausen",
"Genthin",
"Gera",
"Germersheim",
"Gerolzhofen",
"Gießen",
"Gifhorn",
"Goslar",
"Gotha",
"Grafenau",
"Gransee",
"Greifswald",
"Greiz",
"Grevenbroich",
"Grevesmühlen",
"Griesbach Rottal",
"Grimma",
"Grimmen",
"Groß-Gerau",
"Großenhain",
"Gräfenhainichen",
"Guben",
"Gunzenhausen",
"Göppingen",
"Görlitz",
"Göttingen",
"Günzburg",
"Güstrow",
"Gütersloh",
"Hagenow",
"Hainichen",
"Halberstadt",
"Haldensleben",
"Hamburg",
"Hammelburg",
"Hannover",
"Hannoversch Münden",
"Hansestadttralsund",
"Havelberg",
"Hechingen",
"Heiligenstadt",
"Heinsberg",
"Helmstedt",
"Herford",
"Hersbruck",
"Herzberg",
"Hettstedt",
"Hildburghausen",
"Hildesheim",
"Hofgeismar",
"Hohenmölsen",
"Hohenstein-Ernstthal",
"Holzminden",
"Hoyerswerda",
"Husum",
"Höxter",
"Hünfeld",
"Illertissen",
"Ilmenau",
"Ingolstadt",
"Iserlohn",
"Jena",
"Jessen",
"Jülich",
"Jüterbog",
"Kaiserslautern",
"Kamenz",
"Karlsruhe",
"Kassel",
"Kehl",
"Kelheim",
"Kemnath",
"Kitzingen",
"Kleve",
"Klötze",
"Koblenz",
"Konstanz",
"Kronach",
"Kulmbach",
"Kusel",
"Kyritz",
"Königs Wusterhausen",
"Kötzting",
"Leipziger Land",
"Lemgo",
"Lichtenfels",
"Lippstadt",
"Lobenstein",
"Luckau",
"Luckenwalde",
"Ludwigsburg",
"Ludwigslust",
"Lörrach",
"Lübben",
"Lübeck",
"Lübz",
"Lüdenscheid",
"Lüdinghausen",
"Lüneburg",
"Magdeburg",
"Main-Höchst",
"Mainburg",
"Malchin",
"Mallersdorf",
"Marienberg",
"Marktheidenfeld",
"Mayen",
"Meiningen",
"Meißen",
"Melle",
"Mellrichstadt",
"Melsungen",
"Meppen",
"Merseburg",
"Mettmann",
"Miesbach",
"Miltenberg",
"Mittweida",
"Moers",
"Monschau",
"Mühldorf am Inn",
"Mühlhausen",
"München",
"Nabburg",
"Naila",
"Nauen",
"Neu-Ulm",
"Neubrandenburg",
"Neunburg vorm Wald",
"Neuruppin",
"Neuss",
"Neustadt am Rübenberge",
"Neustadtner Waldnaab",
"Neustrelitz",
"Niesky",
"Norden",
"Nordhausen",
"Northeim",
"Nördlingen",
"Nürtingen",
"Oberviechtach",
"Ochsenfurt",
"Olpe",
"Oranienburg",
"Oschatz",
"Osterburg",
"Osterode am Harz",
"Paderborn",
"Parchim",
"Parsberg",
"Pasewalk",
"Passau",
"Pegnitz",
"Peine",
"Perleberg",
"Pfaffenhofen an der Ilm",
"Pinneberg",
"Pirmasens",
"Plauen",
"Potsdam",
"Prenzlau",
"Pritzwalk",
"Pößneck",
"Quedlinburg",
"Querfurt",
"Rastatt",
"Rathenow",
"Ravensburg",
"Recklinghausen",
"Regen",
"Regensburg",
"Rehau",
"Reutlingen",
"Ribnitz-Damgarten",
"Riesa",
"Rochlitz",
"Rockenhausen",
"Roding",
"Rosenheim",
"Rostock",
"Roth",
"Rothenburg ob der Tauber",
"Rottweil",
"Rudolstadt",
"Saarbrücken",
"Saarlouis",
"Sangerhausen",
"Sankt Goar",
"Sankt Goarshausen",
"Saulgau",
"Scheinfeld",
"Schleiz",
"Schlüchtern",
"Schmölln",
"Schongau",
"Schrobenhausen",
"Schwabmünchen",
"Schwandorf",
"Schwarzenberg",
"Schweinfurt",
"Schwerin",
"Schwäbisch Gmünd",
"Schwäbisch Hall",
"Sebnitz",
"Seelow",
"Senftenberg",
"Siegen",
"Sigmaringen",
"Soest",
"Soltau",
"Sondershausen",
"Sonneberg",
"Spremberg",
"Stade",
"Stadtroda",
"Stadtsteinach",
"Staffelstein",
"Starnberg",
"Staßfurt",
"Steinfurt",
"Stendal",
"Sternberg",
"Stollberg",
"Strasburg",
"Strausberg",
"Stuttgart",
"Suhl",
"Sulzbach-Rosenberg",
"Säckingen",
"Sömmerda",
"Tecklenburg",
"Teterow",
"Tirschenreuth",
"Torgau",
"Tuttlingen",
"Tübingen",
"Ueckermünde",
"Uelzen",
"Uffenheim",
"Vechta",
"Viechtach",
"Viersen",
"Vilsbiburg",
"Vohenstrauß",
"Waldmünchen",
"Wanzleben",
"Waren",
"Warendorf",
"Weimar",
"Weißenfels",
"Weißwasser",
"Werdau",
"Wernigerode",
"Wertingen",
"Wesel",
"Wetzlar",
"Wiedenbrück",
"Wismar",
"Wittenberg",
"Wittmund",
"Wittstock",
"Witzenhausen",
"Wolfach",
"Wolfenbüttel",
"Wolfratshausen",
"Wolgast",
"Wolmirstedt",
"Worbis",
"Wunsiedel",
"Wurzen",
"Zerbst",
"Zeulenroda",
"Zossen",
"Zschopau",
)
states = (
"Baden-Württemberg",
"Bayern",
"Berlin",
"Brandenburg",
"Bremen",
"Hamburg",
"Hessen",
"Mecklenburg-Vorpommern",
"Niedersachsen",
"Nordrhein-Westfalen",
"Rheinland-Pfalz",
"Saarland",
"Sachsen",
"Sachsen-Anhalt",
"Schleswig-Holstein",
"Thüringen",
)
def street_suffix_short(self) -> str:
return self.random_element(self.street_suffixes_short)
def street_suffix_long(self) -> str:
return self.random_element(self.street_suffixes_long)
def city_name(self) -> str:
return self.random_element(self.cities)
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit
def city_with_postcode(self) -> str:
pattern: str = self.random_element(self.city_with_postcode_formats)
return self.generator.parse(pattern)
|
Provider
|
python
|
wandb__wandb
|
wandb/sdk/data_types/object_3d.py
|
{
"start": 19043,
"end": 19178
}
|
class ____(_dtypes.Type):
name = "object3D-file"
types = [Object3D]
_dtypes.TypeRegistry.add(_Object3DFileType)
|
_Object3DFileType
|
python
|
walkccc__LeetCode
|
solutions/1632. Rank Transform of a Matrix/1632.py
|
{
"start": 0,
"end": 575
}
|
class ____:
def __init__(self):
self.id = {}
def union(self, u: int, v: int) -> None:
self.id.setdefault(u, u)
self.id.setdefault(v, v)
i = self._find(u)
j = self._find(v)
if i != j:
self.id[i] = j
def getGroupIdToValues(self) -> dict[int, list[int]]:
groupIdToValues = collections.defaultdict(list)
for u in self.id.keys():
groupIdToValues[self._find(u)].append(u)
return groupIdToValues
def _find(self, u: int) -> int:
if self.id[u] != u:
self.id[u] = self._find(self.id[u])
return self.id[u]
|
UnionFind
|
python
|
zarr-developers__zarr-python
|
src/zarr/errors.py
|
{
"start": 2268,
"end": 2442
}
|
class ____(BaseZarrError):
"""Raised when the Zarr metadata is invalid in some way"""
_msg = "Invalid value for '{}'. Expected '{}'. Got '{}'."
|
MetadataValidationError
|
python
|
ray-project__ray
|
python/ray/autoscaler/_private/spark/spark_job_server.py
|
{
"start": 278,
"end": 7271
}
|
class ____(BaseHTTPRequestHandler):
def setup(self) -> None:
super().setup()
self._handler_lock = threading.RLock()
self._created_node_id_set = set()
self._logger = logging.getLogger(__name__)
if "RAY_ON_SPARK_JOB_SERVER_VERBOSE" in os.environ:
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.WARN)
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
def handle_POST(self, path, data):
path_parts = Path(path).parts[1:]
spark_job_group_id = data["spark_job_group_id"]
if path_parts[0] == "create_node":
assert len(path_parts) == 1, f"Illegal request path: {path}"
spark_job_group_desc = data["spark_job_group_desc"]
using_stage_scheduling = data["using_stage_scheduling"]
ray_head_ip = data["ray_head_ip"]
ray_head_port = data["ray_head_port"]
ray_temp_dir = data["ray_temp_dir"]
num_cpus_per_node = data["num_cpus_per_node"]
num_gpus_per_node = data["num_gpus_per_node"]
heap_memory_per_node = data["heap_memory_per_node"]
object_store_memory_per_node = data["object_store_memory_per_node"]
worker_node_options = data["worker_node_options"]
collect_log_to_path = data["collect_log_to_path"]
node_id = data["node_id"]
self._created_node_id_set.add(node_id)
def start_ray_worker_thread_fn():
try:
err_msg = _start_ray_worker_nodes(
spark_job_server=self.server,
spark_job_group_id=spark_job_group_id,
spark_job_group_desc=spark_job_group_desc,
num_worker_nodes=1,
using_stage_scheduling=using_stage_scheduling,
ray_head_ip=ray_head_ip,
ray_head_port=ray_head_port,
ray_temp_dir=ray_temp_dir,
num_cpus_per_node=num_cpus_per_node,
num_gpus_per_node=num_gpus_per_node,
heap_memory_per_node=heap_memory_per_node,
object_store_memory_per_node=object_store_memory_per_node,
worker_node_options=worker_node_options,
collect_log_to_path=collect_log_to_path,
node_id=node_id,
)
if err_msg:
self._logger.warning(
f"Spark job {spark_job_group_id} hosting Ray worker node "
f"launching failed, error:\n{err_msg}"
)
except Exception:
if spark_job_group_id in self.server.task_status_dict:
self.server.task_status_dict.pop(spark_job_group_id)
msg = (
f"Spark job {spark_job_group_id} hosting Ray worker node exit."
)
if self._logger.level > logging.DEBUG:
self._logger.warning(
f"{msg} To see details, you can set "
"'RAY_ON_SPARK_JOB_SERVER_VERBOSE' environmental variable "
"to '1' before calling 'ray.util.spark.setup_ray_cluster'."
)
else:
# This branch is only for debugging Ray-on-Spark purpose.
# User can configure 'RAY_ON_SPARK_JOB_SERVER_VERBOSE'
# environment variable to make the spark job server logging
# showing full exception stack here.
self._logger.debug(msg, exc_info=True)
threading.Thread(
target=inheritable_thread_target(start_ray_worker_thread_fn),
args=(),
daemon=True,
).start()
self.server.task_status_dict[spark_job_group_id] = "pending"
return {}
elif path_parts[0] == "check_node_id_availability":
node_id = data["node_id"]
with self._handler_lock:
if node_id in self._created_node_id_set:
# If the node with the node id has been created,
# it shouldn't be created twice so fail fast here.
# The case happens when a Ray node is down unexpected
# caused by spark worker node down and spark tries to
# reschedule the spark task, so it triggers node
# creation with duplicated node id.
return {"available": False}
else:
self._created_node_id_set.add(node_id)
return {"available": True}
elif path_parts[0] == "terminate_node":
assert len(path_parts) == 1, f"Illegal request path: {path}"
self.server.spark.sparkContext.cancelJobGroup(spark_job_group_id)
if spark_job_group_id in self.server.task_status_dict:
self.server.task_status_dict.pop(spark_job_group_id)
return {}
elif path_parts[0] == "notify_task_launched":
if spark_job_group_id in self.server.task_status_dict:
# Note that if `spark_job_group_id` not in task_status_dict,
# the task has been terminated
self.server.task_status_dict[spark_job_group_id] = "running"
self._logger.info(f"Spark task in {spark_job_group_id} has started.")
return {}
elif path_parts[0] == "query_task_status":
if spark_job_group_id in self.server.task_status_dict:
return {"status": self.server.task_status_dict[spark_job_group_id]}
else:
return {"status": "terminated"}
elif path_parts[0] == "query_last_worker_err":
return {"last_worker_err": self.server.last_worker_error}
else:
raise ValueError(f"Illegal request path: {path}")
def do_POST(self):
"""Reads post request body"""
self._set_headers()
content_len = int(self.headers["content-length"])
content_type = self.headers["content-type"]
assert content_type == "application/json"
path = self.path
post_body = self.rfile.read(content_len).decode("utf-8")
post_body_json = json.loads(post_body)
with self._handler_lock:
response_body_json = self.handle_POST(path, post_body_json)
response_body = json.dumps(response_body_json)
self.wfile.write(response_body.encode("utf-8"))
def log_request(self, code="-", size="-"):
# Make logs less verbose.
pass
|
SparkJobServerRequestHandler
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-k8s/dagster_k8s/client.py
|
{
"start": 8969,
"end": 41343
}
|
class ____:
def __init__(self, batch_api, core_api, logger, sleeper, timer):
self.batch_api = batch_api
self.core_api = core_api
self.logger = logger
self.sleeper = sleeper
self.timer = timer
@staticmethod
def production_client(batch_api_override=None, core_api_override=None):
return DagsterKubernetesClient(
batch_api=(
batch_api_override or kubernetes.client.BatchV1Api(api_client=PatchedApiClient())
),
core_api=(
core_api_override or kubernetes.client.CoreV1Api(api_client=PatchedApiClient())
),
logger=logging.info,
sleeper=time.sleep,
timer=time.time,
)
### Job operations ###
def wait_for_job(
self,
job_name,
namespace,
wait_timeout=DEFAULT_WAIT_TIMEOUT,
wait_time_between_attempts=DEFAULT_WAIT_BETWEEN_ATTEMPTS,
start_time=None,
):
"""Wait for a job to launch and be running.
Args:
job_name (str): Name of the job to wait for.
namespace (str): Namespace in which the job is located.
wait_timeout (numeric, optional): Timeout after which to give up and raise exception.
Defaults to DEFAULT_WAIT_TIMEOUT. Set to 0 to disable.
wait_time_between_attempts (numeric, optional): Wait time between polling attempts. Defaults
to DEFAULT_WAIT_BETWEEN_ATTEMPTS.
Raises:
DagsterK8sError: Raised when wait_timeout is exceeded or an error is encountered.
"""
check.str_param(job_name, "job_name")
check.str_param(namespace, "namespace")
check.numeric_param(wait_timeout, "wait_timeout")
check.numeric_param(wait_time_between_attempts, "wait_time_between_attempts")
job = None
start = start_time or self.timer()
while not job:
if wait_timeout and (self.timer() - start > wait_timeout):
raise DagsterK8sTimeoutError(
f"Timed out while waiting for job {job_name} to launch"
)
# Get all jobs in the namespace and find the matching job
def _get_jobs_for_namespace():
jobs = self.batch_api.list_namespaced_job(
namespace=namespace, field_selector=f"metadata.name={job_name}"
)
if jobs.items:
check.invariant(
len(jobs.items) == 1,
f'There should only be one k8s job with name "{job_name}", but got multiple'
f' jobs:" {jobs.items}',
)
return jobs.items[0]
else:
return None
job = k8s_api_retry(
_get_jobs_for_namespace, max_retries=3, timeout=wait_time_between_attempts
)
if not job:
self.logger(f'Job "{job_name}" not yet launched, waiting')
self.sleeper(wait_time_between_attempts)
def wait_for_job_to_have_pods(
self,
job_name,
namespace,
wait_timeout=DEFAULT_WAIT_TIMEOUT,
wait_time_between_attempts=5,
start_time=None,
):
start = start_time or self.timer()
def _get_pods():
return self.get_pods_in_job(job_name, namespace)
while True:
if wait_timeout and (self.timer() - start > wait_timeout):
raise DagsterK8sTimeoutError(
f"Timed out while waiting for job {job_name} to have pods"
)
pod_list = k8s_api_retry(_get_pods, max_retries=3, timeout=wait_time_between_attempts)
if pod_list:
return pod_list
self.logger(f'Job "{job_name}" does not yet have pods, waiting')
self.sleeper(wait_time_between_attempts)
def wait_for_job_success(
self,
job_name,
namespace,
instance=None,
run_id=None,
wait_timeout=DEFAULT_WAIT_TIMEOUT,
wait_time_between_attempts=DEFAULT_WAIT_BETWEEN_ATTEMPTS,
num_pods_to_wait_for=DEFAULT_JOB_POD_COUNT,
):
"""Poll a job for successful completion.
Args:
job_name (str): Name of the job to wait for.
namespace (str): Namespace in which the job is located.
wait_timeout (numeric, optional): Timeout after which to give up and raise exception.
Defaults to DEFAULT_WAIT_TIMEOUT. Set to 0 to disable.
wait_time_between_attempts (numeric, optional): Wait time between polling attempts. Defaults
to DEFAULT_WAIT_BETWEEN_ATTEMPTS.
Raises:
DagsterK8sError: Raised when wait_timeout is exceeded or an error is encountered.
"""
check.str_param(job_name, "job_name")
check.str_param(namespace, "namespace")
check.opt_inst_param(instance, "instance", DagsterInstance)
check.opt_str_param(run_id, "run_id")
check.numeric_param(wait_timeout, "wait_timeout")
check.numeric_param(wait_time_between_attempts, "wait_time_between_attempts")
check.int_param(num_pods_to_wait_for, "num_pods_to_wait_for")
start = self.timer()
# Wait for job to be running
self.wait_for_job(
job_name,
namespace,
wait_timeout=wait_timeout,
wait_time_between_attempts=wait_time_between_attempts,
start_time=start,
)
self.wait_for_running_job_to_succeed(
job_name,
namespace,
instance,
run_id,
wait_timeout,
wait_time_between_attempts,
num_pods_to_wait_for,
start_time=start,
)
def wait_for_running_job_to_succeed(
self,
job_name,
namespace,
instance=None,
run_id=None,
wait_timeout=DEFAULT_WAIT_TIMEOUT,
wait_time_between_attempts=DEFAULT_WAIT_BETWEEN_ATTEMPTS,
num_pods_to_wait_for=DEFAULT_JOB_POD_COUNT,
start_time: Optional[float] = None,
):
if wait_timeout:
check.float_param(start_time, "start_time")
# Wait for the job status to be completed. We check the status every
# wait_time_between_attempts seconds
while True:
if wait_timeout and (self.timer() - start_time > wait_timeout):
raise DagsterK8sTimeoutError(
f"Timed out while waiting for job {job_name} to complete"
)
# Reads the status of the specified job. Returns a V1Job object that
# we need to read the status off of.
status = self.get_job_status(
job_name=job_name,
namespace=namespace,
wait_time_between_attempts=wait_time_between_attempts,
)
if not status:
raise DagsterK8sError(f"job {job_name} could not be found")
# status.succeeded represents the number of pods which reached phase Succeeded.
if status.succeeded == num_pods_to_wait_for:
break
# status.failed represents the number of pods which reached phase Failed.
# if there are any active runs do not raise an exception. This happens when the job
# is created with a backoff_limit > 0.
if (
(status.active is None or status.active == 0)
and status.failed
and status.failed > 0
):
raise DagsterK8sError(
f"Encountered failed job pods for job {job_name} with status: {status}, "
f"in namespace {namespace}"
)
if instance and run_id:
dagster_run = instance.get_run_by_id(run_id)
if not dagster_run:
raise DagsterK8sJobStatusException()
dagster_run_status = dagster_run.status
if dagster_run_status != DagsterRunStatus.STARTED:
raise DagsterK8sJobStatusException()
self.sleeper(wait_time_between_attempts)
def get_job_status(
self,
job_name: str,
namespace: str,
wait_time_between_attempts=DEFAULT_WAIT_BETWEEN_ATTEMPTS,
) -> Optional[V1JobStatus]:
def _get_job_status():
try:
job = self.batch_api.read_namespaced_job_status(job_name, namespace=namespace)
except kubernetes.client.rest.ApiException as e:
if e.status == 404:
return None
else:
raise
return job.status
return k8s_api_retry(_get_job_status, max_retries=3, timeout=wait_time_between_attempts)
def delete_job(
self,
job_name,
namespace,
):
"""Delete Kubernetes Job.
We also need to delete corresponding pods due to:
https://github.com/kubernetes-client/python/issues/234
Args:
job_name (str): Name of the job to wait for.
namespace (str): Namespace in which the job is located.
"""
check.str_param(job_name, "job_name")
check.str_param(namespace, "namespace")
try:
pod_names = self.get_pod_names_in_job(job_name, namespace)
# Collect all the errors so that we can post-process before raising
pod_names = self.get_pod_names_in_job(job_name, namespace)
errors = []
try:
self.batch_api.delete_namespaced_job(name=job_name, namespace=namespace)
except Exception as e:
errors.append(e)
for pod_name in pod_names:
try:
self.core_api.delete_namespaced_pod(name=pod_name, namespace=namespace)
except Exception as e:
errors.append(e)
if len(errors) > 0:
# Raise first non-expected error. Else, raise first error.
for error in errors:
if not (
isinstance(error, kubernetes.client.rest.ApiException)
and error.status == 404
):
raise error
raise errors[0]
return True
except kubernetes.client.rest.ApiException as e:
if e.status == 404:
return False
raise e
### Pod operations ###
def get_pod_by_name(self, pod_name: str, namespace: str):
"""Get a pod by name.
Args:
pod_name (str): Name of the pod to get.
namespace (str): Namespace in which the pod is located.
"""
check.str_param(pod_name, "pod_name")
check.str_param(namespace, "namespace")
return self.core_api.read_namespaced_pod(pod_name, namespace=namespace)
def get_pods_in_job(self, job_name, namespace):
"""Get the pods launched by the job ``job_name``.
Args:
job_name (str): Name of the job to inspect.
namespace (str): Namespace in which the job is located.
Returns:
List[V1Pod]: List of all pod objects that have been launched by the job ``job_name``.
"""
check.str_param(job_name, "job_name")
check.str_param(namespace, "namespace")
return self.core_api.list_namespaced_pod(
namespace=namespace, label_selector=f"job-name={job_name}"
).items
def get_pod_names_in_job(self, job_name, namespace):
"""Get the names of pods launched by the job ``job_name``.
Args:
job_name (str): Name of the job to inspect.
namespace (str): Namespace in which the job is located.
Returns:
List[str]: List of all pod names that have been launched by the job ``job_name``.
"""
check.str_param(job_name, "job_name")
check.str_param(namespace, "namespace")
pods = self.get_pods_in_job(job_name, namespace)
return [p.metadata.name for p in pods]
def wait_for_pod(
self,
pod_name: str,
namespace: str,
wait_for_state: WaitForPodState = WaitForPodState.Ready,
pod_launch_timeout: float = DEFAULT_WAIT_TIMEOUT,
wait_timeout: float = DEFAULT_WAIT_TIMEOUT,
wait_time_between_attempts: float = DEFAULT_WAIT_BETWEEN_ATTEMPTS,
start_time: Any = None,
ignore_containers: Optional[set] = None,
) -> None:
"""Wait for a pod to launch and be running, or wait for termination (useful for job pods).
Args:
----
pod_name (str): Name of the pod to wait for.
namespace (str): Namespace in which the pod is located.
wait_for_state (WaitForPodState, optional): Whether to wait for pod readiness or
termination. Defaults to waiting for readiness.
pod_launch_timeout (numeric, optional): Timeout after which to give up and raise exception
if the pod never appears. Defaults to DEFAULT_WAIT_TIMEOUT. Set to 0 to disable.
wait_timeout (numeric, optional): Timeout after which to give up and raise exception.
Defaults to DEFAULT_WAIT_TIMEOUT. Set to 0 to disable.
wait_time_between_attempts (numeric, optional): Wait time between polling attempts. Defaults
to DEFAULT_WAIT_BETWEEN_ATTEMPTS.
start_time (numeric, optional): The start time of the wait, used for testing.
ignore_containers (set, optional): The container names that we should ignore
when waiting for the pod to be ready/terminate.
Raises:
------
DagsterK8sError: Raised when wait_timeout is exceeded or an error is encountered
"""
check.str_param(pod_name, "pod_name")
check.str_param(namespace, "namespace")
check.inst_param(wait_for_state, "wait_for_state", WaitForPodState)
check.numeric_param(wait_timeout, "wait_timeout")
check.numeric_param(pod_launch_timeout, "pod_launch_timeout")
check.numeric_param(wait_time_between_attempts, "wait_time_between_attempts")
self.logger(f'Waiting for pod "{pod_name}"')
start = start_time or self.timer()
# A set of container names that have exited.
exited_containers = set()
ready_containers = set()
ignore_containers = ignore_containers or set()
error_logs = []
while True:
pods = self.core_api.list_namespaced_pod(
namespace=namespace, field_selector=f"metadata.name={pod_name}"
).items
pod = pods[0] if pods else None
if pod_launch_timeout and self.timer() - start > pod_launch_timeout:
raise DagsterK8sError(
f"Timed out while waiting for pod to become ready with pod info: {pod!s}"
)
if pod is None:
self.logger(f'Waiting for pod "{pod_name}" to launch...')
self.sleeper(wait_time_between_attempts)
continue
if not pod.status.init_container_statuses and not pod.status.container_statuses:
self.logger(
"Waiting for pod init_container or container status to be set by kubernetes..."
)
self.sleeper(wait_time_between_attempts)
continue
break
while True:
pods = self.core_api.list_namespaced_pod(
namespace=namespace, field_selector=f"metadata.name={pod_name}"
).items
pod = pods[0] if pods else None
if pod is None:
raise DagsterK8sError(f'Pod "{pod_name}" was unexpectedly killed')
if wait_timeout and self.timer() - start > wait_timeout:
raise DagsterK8sError(
f"Timed out while waiting for pod to get to status {wait_for_state.value} with pod info: {pod!s}"
)
# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#containerstatus-v1-core
all_statuses = []
all_statuses.extend(pod.status.init_container_statuses or [])
all_statuses.extend(pod.status.container_statuses or [])
initcontainers = set(s.name for s in (pod.status.init_container_statuses or []))
# Filter out ignored containers
all_statuses = [s for s in all_statuses if s.name not in ignore_containers]
# Always get the first status from the list, which will first get the
# init container (if it exists), then will iterate through the loop
# of all containers if we are waiting for termination.
#
# In case we are waiting for the pod to be ready, we will exit after
# the first container in this list is ready.
container_status = next(
s for s in all_statuses if s.name not in exited_containers | ready_containers
)
# State checks below, see:
# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#containerstate-v1-core
state = container_status.state
if state.running is not None:
if wait_for_state == WaitForPodState.Ready:
# ready is boolean field of container status
ready = container_status.ready
if not ready:
self.logger(f'Waiting for pod "{pod_name}" to become ready...')
self.sleeper(wait_time_between_attempts)
continue
else:
ready_containers.add(container_status.name)
if container_status.name in initcontainers:
self.logger(
f'Init container "{container_status.name}" is ready, waiting for non-init containers...'
)
continue
if initcontainers.issubset(exited_containers | ready_containers):
self.logger(f'Pod "{pod_name}" is ready, done waiting')
break
else:
check.invariant(
wait_for_state == WaitForPodState.Terminated, "New invalid WaitForPodState"
)
self.sleeper(wait_time_between_attempts)
continue
elif state.waiting is not None:
# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#containerstatewaiting-v1-core
if state.waiting.reason == KubernetesWaitingReasons.PodInitializing:
self.logger(f'Waiting for pod "{pod_name}" to initialize...')
self.sleeper(wait_time_between_attempts)
continue
if state.waiting.reason == KubernetesWaitingReasons.CreateContainerConfigError:
self.logger(
f'Pod "{pod_name}" is waiting due to a CreateContainerConfigError with message "{state.waiting.message}"'
" - trying again to see if it recovers"
)
self.sleeper(wait_time_between_attempts)
continue
elif state.waiting.reason == KubernetesWaitingReasons.ContainerCreating:
self.logger("Waiting for container creation...")
self.sleeper(wait_time_between_attempts)
continue
elif state.waiting.reason is None:
self.logger(
f'Pod "{pod_name}" is waiting with reason "None" - this is temporary/transition state'
)
self.sleeper(wait_time_between_attempts)
continue
elif state.waiting.reason in [
KubernetesWaitingReasons.ErrImagePull,
KubernetesWaitingReasons.ImagePullBackOff,
KubernetesWaitingReasons.CrashLoopBackOff,
KubernetesWaitingReasons.RunContainerError,
]:
debug_info = self.get_pod_debug_info(pod_name, namespace, pod=pod)
raise DagsterK8sError(
f'Failed: Reason="{state.waiting.reason}"'
f' Message="{state.waiting.message}"\n{debug_info}'
)
else:
raise DagsterK8sError(f"Unknown issue: {state.waiting}")
# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#containerstateterminated-v1-core
elif state.terminated is not None:
container_name = container_status.name
if state.terminated.exit_code != 0:
tail_lines = int(
os.getenv("DAGSTER_K8S_WAIT_FOR_POD_FAILURE_LOG_LINE_COUNT", "100")
)
raw_logs = self.retrieve_pod_logs(
pod_name, namespace, container_name=container_name, tail_lines=tail_lines
)
message = state.terminated.message
msg = (
f'Container "{container_name}" failed with message: "{message}". '
f'Last {tail_lines} log lines: "{raw_logs}"'
)
self.logger(msg)
error_logs.append(msg)
elif container_name in initcontainers:
self.logger(
f"Init container {container_name} in {pod_name} has exited successfully"
)
else:
self.logger(f"Container {container_name} in {pod_name} has exited successfully")
exited_containers.add(container_name)
if len(all_statuses) != len(exited_containers):
continue
if error_logs:
logs = "\n\n".join(error_logs)
raise DagsterK8sError(
f"Pod {pod_name} terminated but some containers exited with errors:\n{logs}"
)
else:
self.logger(f"Pod {pod_name} exited successfully")
break
else:
raise DagsterK8sError("Should not get here, unknown pod state")
def retrieve_pod_logs(
self,
pod_name: str,
namespace: str,
container_name: Optional[str] = None,
**kwargs,
) -> str:
"""Retrieves the raw pod logs for the pod named `pod_name` from Kubernetes.
Args:
pod_name (str): The name of the pod from which to retrieve logs.
namespace (str): The namespace of the pod.
Returns:
str: The raw logs retrieved from the pod.
"""
check.str_param(pod_name, "pod_name")
check.str_param(namespace, "namespace")
# We set _preload_content to False here to prevent the k8 python api from processing the response.
# If the logs happen to be JSON - it will parse in to a dict and then coerce back to a str leaving
# us with invalid JSON as the quotes have been switched to '
#
# https://github.com/kubernetes-client/python/issues/811
return self.core_api.read_namespaced_pod_log(
name=pod_name,
namespace=namespace,
container=container_name,
_preload_content=False,
**kwargs,
).data.decode("utf-8")
def _get_container_status_str(self, container_status):
state = container_status.state
# https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1ContainerState.md
if state.running:
return "Ready" if container_status.ready else "Running but not ready"
elif state.terminated:
return f"Terminated with exit code {state.terminated.exit_code}: " + (
f"{state.terminated.reason}: {state.terminated.message}"
if state.terminated.message
else f"{state.terminated.reason}"
)
elif state.waiting:
return (
f"Waiting: {state.waiting.reason}: {state.waiting.message}"
if state.waiting.message
else f"Waiting: {state.waiting.reason}"
)
def _get_pod_status_str(self, pod):
if not pod.status:
return "Could not determine pod status."
pod_status = [
f"Pod status: {pod.status.phase}"
+ (f": {pod.status.message}" if pod.status.message else "")
]
if pod.status.container_statuses:
pod_status.extend(
[
f"Container '{status.name}' status: {self._get_container_status_str(status)}"
for status in pod.status.container_statuses
]
)
return "\n".join(pod_status)
def retrieve_pod_events(
self,
pod_name: str,
namespace: str,
) -> list[Any]:
# https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/EventsV1Event.md
field_selector = f"involvedObject.name={pod_name}"
return self.core_api.list_namespaced_event(namespace, field_selector=field_selector).items
def _has_container_logs(self, container_status):
# Logs are availalbe if either the container is running or terminated, or it's waiting
# but previously ran or terminated
if container_status.state:
if container_status.state.running or container_status.state.terminated:
return True
if container_status.last_state:
if container_status.last_state.running or container_status.last_state.terminated:
return True
return False
def _get_job_status_str(self, job):
if not job.status:
return "Could not determine job status."
job_status = (
"Job status:"
+ f"\n - start_time: {job.status.start_time.isoformat()}"
+ f"\n - active={job.status.active or 'None'}"
+ f"\n - succeeded={job.status.succeeded or 'None'}"
+ f"\n - failed={job.status.failed or 'None'}"
)
return job_status
def get_job_debug_info(
self,
job_name: str,
namespace: str,
) -> str:
jobs = self.batch_api.list_namespaced_job(
namespace=namespace, field_selector=f"metadata.name={job_name}"
).items
job = jobs[0] if jobs else None
job_status_str = self._get_job_status_str(job) if job else f"Could not find job {job_name}"
event_strs = []
if job:
events = self.core_api.list_namespaced_event(
namespace=namespace,
field_selector=f"involvedObject.name={job_name}",
).items
for event in events:
event_strs.append(f"{event.reason}: {event.message}")
return (
f"Debug information for job {job_name}:"
+ f"\n\n{job_status_str}"
+ "".join(["\n\n" + event_str for event_str in event_strs])
)
def get_pod_debug_info(
self,
pod_name,
namespace,
pod: Optional[kubernetes.client.V1Pod] = None, # the already fetched pod
include_container_logs: Optional[bool] = True,
) -> str:
if pod is None:
pods = self.core_api.list_namespaced_pod(
namespace=namespace, field_selector=f"metadata.name={pod_name}"
).items
pod = pods[0] if pods else None
pod_status_str = self._get_pod_status_str(pod) if pod else f"Could not find pod {pod_name}"
log_strs = []
specific_warnings = []
container_statuses_by_name = (
{status.name: status for status in pod.status.container_statuses}
if pod and pod.status and pod.status.container_statuses
else {}
)
if include_container_logs:
for container in (
pod.spec.containers if (pod and pod.spec and pod.spec.containers) else []
):
container_name = container.name
log_str = ""
container_status = container_statuses_by_name.get(container_name)
if not container_status or not self._has_container_logs(container_status):
log_str = f"No logs for container '{container_name}'."
else:
try:
pod_logs = self.retrieve_pod_logs(
pod_name,
namespace,
container_name,
tail_lines=25,
timestamps=True,
)
# Remove trailing newline if present
pod_logs = pod_logs[:-1] if pod_logs.endswith("\n") else pod_logs
if "exec format error" in pod_logs:
specific_warnings.append(
f"Logs for container '{container_name}' contained `exec format error`, which usually means that your"
" Docker image was built using the wrong architecture.\nTry rebuilding your"
" docker image with the `--platform linux/amd64` flag set."
)
log_str = (
f"Last 25 log lines for container '{container_name}':\n{pod_logs}"
if pod_logs
else f"No logs for container '{container_name}'."
)
except kubernetes.client.rest.ApiException as e:
log_str = f"Failure fetching pod logs for container '{container_name}': {e}"
log_strs.append(log_str)
if not K8S_EVENTS_API_PRESENT:
warning_str = (
"Could not fetch pod events: the k8s events API is not available in the current"
" version of the Python kubernetes client."
)
else:
try:
pod_events = self.retrieve_pod_events(pod_name, namespace)
warning_events = [event for event in pod_events if event.type == "Warning"]
if not warning_events:
warning_str = "No warning events for pod."
else:
event_strs = []
for event in warning_events:
count_str = (
f" (x{event.count})" if (event.count and event.count > 1) else ""
)
event_strs.append(f"{event.reason}: {event.message}{count_str}")
warning_str = "Warning events for pod:\n" + "\n".join(event_strs)
except kubernetes.client.rest.ApiException as e:
warning_str = f"Failure fetching pod events: {e}"
return (
f"Debug information for pod {pod_name}:"
+ f"\n\n{pod_status_str}"
+ "".join(["\n\n" + specific_warning for specific_warning in specific_warnings])
+ "".join(["\n\n" + log_str for log_str in log_strs])
+ f"\n\n{warning_str}"
)
def create_namespaced_job_with_retries(
self,
body: V1Job,
namespace: str,
wait_time_between_attempts: float = DEFAULT_WAIT_BETWEEN_ATTEMPTS,
) -> None:
k8s_api_retry_creation_mutation(
lambda: self.batch_api.create_namespaced_job(
body=body, namespace=namespace, _request_timeout=DEFAULT_JOB_CREATION_TIMEOUT
),
max_retries=3,
timeout=wait_time_between_attempts,
)
|
DagsterKubernetesClient
|
python
|
apache__airflow
|
airflow-core/src/airflow/models/dagrun.py
|
{
"start": 90158,
"end": 93112
}
|
class ____(Base):
"""For storage of arbitrary notes concerning the dagrun instance."""
__tablename__ = "dag_run_note"
user_id: Mapped[str | None] = mapped_column(String(128), nullable=True)
dag_run_id: Mapped[int] = mapped_column(Integer, primary_key=True, nullable=False)
content: Mapped[str | None] = mapped_column(String(1000).with_variant(Text(1000), "mysql"))
created_at: Mapped[datetime] = mapped_column(UtcDateTime, default=timezone.utcnow, nullable=False)
updated_at: Mapped[datetime] = mapped_column(
UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow, nullable=False
)
dag_run = relationship("DagRun", back_populates="dag_run_note")
__table_args__ = (
PrimaryKeyConstraint("dag_run_id", name="dag_run_note_pkey"),
ForeignKeyConstraint(
(dag_run_id,),
["dag_run.id"],
name="dag_run_note_dr_fkey",
ondelete="CASCADE",
),
)
def __init__(self, content, user_id=None):
self.content = content
self.user_id = user_id
def __repr__(self):
prefix = f"<{self.__class__.__name__}: {self.dag_id}.{self.dagrun_id} {self.run_id}"
if self.map_index != -1:
prefix += f" map_index={self.map_index}"
return prefix + ">"
def get_or_create_dagrun(
*,
dag: SerializedDAG,
run_id: str,
logical_date: datetime | None,
data_interval: tuple[datetime, datetime] | None,
run_after: datetime,
conf: dict | None,
triggered_by: DagRunTriggeredByType,
triggering_user_name: str | None,
start_date: datetime,
session: Session,
) -> DagRun:
"""
Create a DAG run, replacing an existing instance if needed to prevent collisions.
This function is only meant to be used by :meth:`DAG.test` as a helper function.
:param dag: DAG to be used to find run.
:param conf: Configuration to pass to newly created run.
:param start_date: Start date of new run.
:param logical_date: Logical date for finding an existing run.
:param run_id: Run ID for the new DAG run.
:param triggered_by: the entity which triggers the dag_run
:param triggering_user_name: the user name who triggers the dag_run
:return: The newly created DAG run.
"""
dr = session.scalar(
select(DagRun).where(DagRun.dag_id == dag.dag_id, DagRun.logical_date == logical_date)
)
if dr:
session.delete(dr)
session.commit()
dr = dag.create_dagrun(
run_id=run_id,
logical_date=logical_date,
data_interval=data_interval,
run_after=run_after,
conf=conf,
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
triggered_by=triggered_by,
triggering_user_name=triggering_user_name,
start_date=start_date or logical_date,
session=session,
)
log.info("Created dag run.", dagrun=dr)
return dr
|
DagRunNote
|
python
|
huggingface__transformers
|
tests/models/upernet/test_modeling_upernet.py
|
{
"start": 9408,
"end": 11135
}
|
class ____(unittest.TestCase):
def test_inference_swin_backbone(self):
processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny")
model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny").to(torch_device)
image = prepare_img()
inputs = processor(images=image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
expected_shape = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]
).to(torch_device)
torch.testing.assert_close(outputs.logits[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_convnext_backbone(self):
processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny")
model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny").to(torch_device)
image = prepare_img()
inputs = processor(images=image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
expected_shape = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]
).to(torch_device)
torch.testing.assert_close(outputs.logits[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
UperNetModelIntegrationTest
|
python
|
walkccc__LeetCode
|
solutions/2239. Find Closest Number to Zero/2239.py
|
{
"start": 0,
"end": 132
}
|
class ____:
def findClosestNumber(self, nums: list[int]) -> int:
nums.sort(key=lambda x: (abs(x), -x))
return nums[0]
|
Solution
|
python
|
pytorch__pytorch
|
tools/testing/target_determination/heuristics/profiling.py
|
{
"start": 650,
"end": 1146
}
|
class ____(HeuristicInterface):
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
def get_prediction_confidence(self, tests: list[str]) -> TestPrioritizations:
test_ratings = get_ratings_for_tests(
ADDITIONAL_CI_FILES_FOLDER / TD_HEURISTIC_PROFILING_FILE
)
test_ratings = {TestRun(k): v for (k, v) in test_ratings.items() if k in tests}
return TestPrioritizations(tests, normalize_ratings(test_ratings, 0.25))
|
Profiling
|
python
|
mlflow__mlflow
|
tests/gateway/tools.py
|
{
"start": 4514,
"end": 7163
}
|
class ____:
# This test utility class is used to validate the internal functionality of the
# AI Gateway within-process so that the provider endpoints can be mocked,
# allowing a nearly end-to-end validation of the entire AI Gateway stack.
# NB: this implementation should only be used for integration testing. Unit tests that
# require validation of the AI Gateway server should use the `Gateway` implementation in
# this module which executes the uvicorn server through gunicorn as a process manager.
def __init__(self, config_path: str | Path, *args, **kwargs):
self.port = get_safe_port()
self.host = "127.0.0.1"
self.url = f"http://{self.host}:{self.port}"
self.config_path = config_path
self.server = None
self.loop = None
self.thread = None
self.stop_event = threading.Event()
def start_server(self):
uvicorn_app = app.create_app_from_path(self.config_path)
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
config = uvicorn.Config(
app=uvicorn_app,
host=self.host,
port=self.port,
lifespan="on",
loop="auto",
log_level="info",
)
self.server = uvicorn.Server(config)
def run():
self.loop.run_until_complete(self.server.serve())
self.thread = threading.Thread(target=run)
self.thread.start()
def request(self, method: str, path: str, *args: Any, **kwargs: Any) -> requests.Response:
return requests.request(method, f"{self.url}/{path}", *args, **kwargs)
def get(self, path: str, *args: Any, **kwargs: Any) -> requests.Response:
return self.request("GET", path, *args, **kwargs)
def assert_health(self):
assert self.get("health").ok
def post(self, path: str, *args: Any, **kwargs: Any) -> requests.Response:
return self.request("POST", path, *args, **kwargs)
def stop(self):
if self.server is not None:
self.server.should_exit = True # Instruct the uvicorn server to stop
self.stop_event.wait() # Wait for the server to actually stop
self.thread.join() # block until thread termination
self.server = None
self.loop = None
self.thread = None
def __enter__(self):
self.start_server()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Stop the server and the thread
if self.server is not None:
self.server.should_exit = True
self.thread.join()
|
UvicornGateway
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/init_ops_v2_test.py
|
{
"start": 10796,
"end": 13716
}
|
class ____(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testTruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="truncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "truncated_normal",
wraps=random_ops.truncated_normal) as mock_truncated_normal:
x = self.evaluate(init(shape))
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="truncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "truncated_normal",
wraps=random_ops.truncated_normal) as mock_truncated_normal:
x = self.evaluate(init(shape))
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testUntruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="untruncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "random_normal",
wraps=random_ops.random_normal) as mock_random_normal:
x = self.evaluate(init(shape))
self.assertTrue(mock_random_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testUniformDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="uniform")
with test_util.use_gpu():
x = self.evaluate(init(shape))
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testInitializePartition(self):
partition_shape = (100, 100)
shape = [1000, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="untruncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "random_normal",
wraps=random_ops.random_normal) as mock_random_normal:
x = self.evaluate(init(shape, partition_shape=partition_shape))
self.assertTrue(mock_random_normal.called)
self.assertEqual(x.shape, partition_shape)
self.assertNear(np.mean(x), expect_mean, err=2e-3)
self.assertNear(np.var(x), expect_var, err=2e-3)
|
VarianceScalingInitializerTest
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_set_row04.py
|
{
"start": 315,
"end": 1233
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("set_row03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_row_pixels(0, 1)
worksheet.set_row_pixels(1, 2)
worksheet.set_row_pixels(2, 3)
worksheet.set_row_pixels(3, 4)
worksheet.set_row_pixels(11, 12)
worksheet.set_row_pixels(12, 13)
worksheet.set_row_pixels(13, 14)
worksheet.set_row_pixels(14, 15)
worksheet.set_row_pixels(18, 19)
worksheet.set_row_pixels(20, 21, None, {"hidden": True})
worksheet.set_row_pixels(21, 22)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
getsentry__sentry
|
tests/sentry/notifications/notification_action/metric_alert_registry/test_opsgenie_metric_alert_handler.py
|
{
"start": 920,
"end": 8035
}
|
class ____(MetricAlertHandlerBase):
def setUp(self) -> None:
self.create_models()
self.action = self.create_action(
type=Action.Type.OPSGENIE,
integration_id=1234567890,
config={"target_identifier": "team123", "target_type": ActionTarget.SPECIFIC},
data={"priority": "P1"},
)
self.handler = OpsgenieMetricAlertHandler()
@mock.patch(
"sentry.notifications.notification_action.metric_alert_registry.handlers.opsgenie_metric_alert_handler.send_incident_alert_notification"
)
def test_send_alert(self, mock_send_incident_alert_notification: mock.MagicMock) -> None:
notification_context = NotificationContext.from_action_model(self.action)
assert self.group_event.occurrence is not None
assert self.group_event.occurrence.priority is not None
alert_context = AlertContext.from_workflow_engine_models(
self.detector,
self.evidence_data,
self.group_event.group.status,
DetectorPriorityLevel(self.group_event.occurrence.priority),
)
metric_issue_context = MetricIssueContext.from_group_event(
self.group,
self.evidence_data,
DetectorPriorityLevel(self.group_event.occurrence.priority),
)
open_period_context = OpenPeriodContext.from_group(self.group)
notification_uuid = str(uuid.uuid4())
self.handler.send_alert(
notification_context=notification_context,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
open_period_context=open_period_context,
trigger_status=TriggerStatus.ACTIVE,
project=self.detector.project,
organization=self.detector.project.organization,
notification_uuid=notification_uuid,
)
mock_send_incident_alert_notification.assert_called_once_with(
notification_context=notification_context,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
organization=self.detector.project.organization,
notification_uuid=notification_uuid,
)
@mock.patch(
"sentry.notifications.notification_action.metric_alert_registry.OpsgenieMetricAlertHandler.send_alert"
)
def test_invoke_legacy_registry(self, mock_send_alert: mock.MagicMock) -> None:
self.handler.invoke_legacy_registry(self.event_data, self.action, self.detector)
assert mock_send_alert.call_count == 1
(
notification_context,
alert_context,
metric_issue_context,
open_period_context,
organization,
notification_uuid,
) = self.unpack_kwargs(mock_send_alert)
assert isinstance(notification_context, NotificationContext)
assert isinstance(alert_context, AlertContext)
assert isinstance(metric_issue_context, MetricIssueContext)
self.assert_notification_context(
notification_context,
integration_id=1234567890,
target_identifier="team123",
target_display=None,
sentry_app_config={"priority": "P1"},
sentry_app_id=None,
)
self.assert_alert_context(
alert_context,
name=self.detector.name,
action_identifier_id=self.detector.id,
threshold_type=AlertRuleThresholdType.ABOVE,
detection_type=AlertRuleDetectionType.STATIC,
comparison_delta=None,
alert_threshold=self.evidence_data.conditions[0]["comparison"],
)
self.assert_metric_issue_context(
metric_issue_context,
open_period_identifier=self.open_period.id,
snuba_query=self.snuba_query,
new_status=IncidentStatus.CRITICAL,
metric_value=123.45,
group=self.group_event.group,
title=self.group_event.group.title,
subscription=self.subscription,
)
self.assert_open_period_context(
open_period_context,
id=self.open_period.id,
date_started=self.group_event.group.first_seen,
date_closed=None,
)
assert organization == self.detector.project.organization
assert isinstance(notification_uuid, str)
@mock.patch(
"sentry.notifications.notification_action.metric_alert_registry.OpsgenieMetricAlertHandler.send_alert"
)
def test_invoke_legacy_registry_with_activity(self, mock_send_alert: mock.MagicMock) -> None:
# Create an Activity instance with evidence data and priority
activity_data = asdict(self.evidence_data)
activity = Activity(
project=self.project,
group=self.group,
type=ActivityType.SET_RESOLVED.value,
data=activity_data,
)
activity.save()
# Create event data with Activity instead of GroupEvent
event_data_with_activity = WorkflowEventData(
event=activity,
workflow_env=self.workflow.environment,
group=self.group,
)
self.handler.invoke_legacy_registry(event_data_with_activity, self.action, self.detector)
assert mock_send_alert.call_count == 1
(
notification_context,
alert_context,
metric_issue_context,
open_period_context,
organization,
notification_uuid,
) = self.unpack_kwargs(mock_send_alert)
# Verify that the same data is extracted from Activity.data as from GroupEvent.occurrence.evidence_data
self.assert_notification_context(
notification_context,
integration_id=1234567890,
target_identifier="team123",
target_display=None,
sentry_app_config={"priority": "P1"},
sentry_app_id=None,
)
self.assert_alert_context(
alert_context,
name=self.detector.name,
action_identifier_id=self.detector.id,
threshold_type=AlertRuleThresholdType.BELOW,
detection_type=AlertRuleDetectionType.STATIC,
comparison_delta=None,
alert_threshold=self.evidence_data.conditions[2]["comparison"],
)
self.assert_metric_issue_context(
metric_issue_context,
open_period_identifier=self.open_period.id,
snuba_query=self.snuba_query,
new_status=IncidentStatus.CLOSED,
metric_value=123.45,
group=self.group,
title=self.group.title,
subscription=self.subscription,
)
self.assert_open_period_context(
open_period_context,
id=self.open_period.id,
date_started=self.group.first_seen,
date_closed=None,
)
assert organization == self.detector.project.organization
assert isinstance(notification_uuid, str)
|
TestOpsgenieMetricAlertHandler
|
python
|
falconry__falcon
|
tests/test_app_initializers.py
|
{
"start": 83,
"end": 241
}
|
class ____:
def on_get(self, req, resp):
resp.media = {'foo': 'bar'}
def on_post(self, req, resp):
resp.media = req.media
|
MediaResource
|
python
|
scipy__scipy
|
scipy/special/tests/test_spherical_bessel.py
|
{
"start": 2662,
"end": 4700
}
|
class ____:
def test_spherical_yn_exact(self):
# https://dlmf.nist.gov/10.49.E5
# Note: exact expression is numerically stable only for small
# n or z >> n.
x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
assert_allclose(spherical_yn(2, x),
(1/x - 3/x**3)*cos(x) - 3/x**2*sin(x))
def test_spherical_yn_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1,x),
(2*n + 1)/x*spherical_yn(n, x))
def test_spherical_yn_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1, x),
(2*n + 1)/x*spherical_yn(n, x))
def test_spherical_yn_inf_real(self):
# https://dlmf.nist.gov/10.52.E3
n = 6
x = np.array([-inf, inf])
assert_allclose(spherical_yn(n, x), np.array([0, 0]))
def test_spherical_yn_inf_complex(self):
# https://dlmf.nist.gov/10.52.E3
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "invalid value encountered in multiply", RuntimeWarning)
assert_allclose(spherical_yn(n, x), np.array([0, 0, inf*(1+1j)]))
def test_spherical_yn_at_zero(self):
# https://dlmf.nist.gov/10.52.E2
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_yn(n, x), np.full(n.shape, -inf))
def test_spherical_yn_at_zero_complex(self):
# Consistently with numpy:
# >>> -np.cos(0)/0
# -inf
# >>> -np.cos(0+0j)/(0+0j)
# (-inf + nan*j)
n = np.array([0, 1, 2, 5, 10, 100])
x = 0 + 0j
assert_allclose(spherical_yn(n, x), np.full(n.shape, nan))
|
TestSphericalYn
|
python
|
langchain-ai__langchain
|
libs/langchain/tests/unit_tests/stubs.py
|
{
"start": 544,
"end": 639
}
|
class ____(AIMessageChunk, _AnyIDMixin):
"""AIMessageChunk with any ID."""
|
_AnyIdAIMessageChunk
|
python
|
tensorflow__tensorflow
|
tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py
|
{
"start": 32275,
"end": 33967
}
|
class ____(test.TestCase):
def _validateReverseSequence(
self, x, batch_axis, seq_axis, seq_lengths, truth, use_gpu=False
):
with self.cached_session(use_gpu=use_gpu):
ans = array_ops.reverse_sequence(
x, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=seq_lengths
)
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
self.assertShapeEqual(truth, ans)
def _testBasic(self, dtype, len_dtype=np.int64):
x = numpy_compat.np_asarray(
[
[[1, 2, 3, 4], [5, 6, 7, 8]],
[[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]],
],
dtype=dtype,
)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
# reverse dim 2 up to (0:3, none, 0:4) along dim=0
seq_lengths = numpy_compat.np_asarray([3, 0, 4], dtype=len_dtype)
truth_orig = numpy_compat.np_asarray(
[
[[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
[[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
[[20, 19, 18, 17], [24, 23, 22, 21]],
], # reverse 0:4 (all)
dtype=dtype,
)
truth_orig = truth_orig.reshape(3, 2, 4, 1, 1)
truth = truth_orig.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
seq_axis = 0 # permute seq_axis and batch_axis (originally 2 and 0, resp.)
batch_axis = 2
self._validateReverseSequence(
x, batch_axis, seq_axis, seq_lengths, truth, use_gpu=True
)
def testFloat(self):
self._testBasic(np.float32, len_dtype=np.int32)
self._testBasic(np.float32, len_dtype=np.int64)
|
ReverseSequenceTest
|
python
|
bokeh__bokeh
|
src/bokeh/document/events.py
|
{
"start": 6948,
"end": 8569
}
|
class ____(DocumentChangedEvent, Serializable):
''' A Base class for events that represent updating Bokeh Models and
their properties.
'''
kind: ClassVar[str]
_handlers: ClassVar[dict[str, type[DocumentPatchedEvent]]] = {}
def __init_subclass__(cls):
cls._handlers[cls.kind] = cls
def dispatch(self, receiver: Any) -> None:
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._document_patched`` if it exists.
'''
super().dispatch(receiver)
if hasattr(receiver, '_document_patched'):
cast(DocumentPatchedMixin, receiver)._document_patched(self)
def to_serializable(self, serializer: Serializer) -> DocumentPatched:
''' Create a JSON representation of this event suitable for sending
to clients.
*Sub-classes must implement this method.*
Args:
serializer (Serializer):
'''
raise NotImplementedError()
@staticmethod
def handle_event(doc: Document, event_rep: DocumentPatched, setter: Setter | None) -> None:
'''
'''
event_kind = event_rep.pop("kind")
event_cls = DocumentPatchedEvent._handlers.get(event_kind, None)
if event_cls is None:
raise RuntimeError(f"unknown patch event type '{event_kind!r}'")
event = event_cls(document=doc, setter=setter, **event_rep)
event_cls._handle_event(doc, event)
@staticmethod
def _handle_event(doc: Document, event: DocumentPatchedEvent) -> None:
raise NotImplementedError()
|
DocumentPatchedEvent
|
python
|
Textualize__textual
|
docs/examples/how-to/containers08.py
|
{
"start": 130,
"end": 275
}
|
class ____(Placeholder):
"""Example widget."""
DEFAULT_CSS = """
Box {
width: 16;
height: 5;
}
"""
|
Box
|
python
|
django-import-export__django-import-export
|
tests/core/tests/test_resources/test_natural_foreign_key.py
|
{
"start": 140,
"end": 308
}
|
class ____(resources.ModelResource):
class Meta:
model = Book
fields = ["name", "author"]
use_natural_foreign_keys = True
|
BookUsingNaturalKeys
|
python
|
lazyprogrammer__machine_learning_examples
|
rnn_class/srn_language.py
|
{
"start": 546,
"end": 7700
}
|
class ____:
def __init__(self, D, M, V):
self.D = D # dimensionality of word embedding
self.M = M # hidden layer size
self.V = V # vocabulary size
def fit(self, X, learning_rate=1., mu=0.99, reg=1.0, activation=T.tanh, epochs=500, show_fig=False):
N = len(X)
D = self.D
M = self.M
V = self.V
self.f = activation
# initial weights
We = init_weight(V, D)
Wx = init_weight(D, M)
Wh = init_weight(M, M)
bh = np.zeros(M)
h0 = np.zeros(M)
Wo = init_weight(M, V)
bo = np.zeros(V)
# make them theano shared
self.We = theano.shared(We)
self.Wx = theano.shared(Wx)
self.Wh = theano.shared(Wh)
self.bh = theano.shared(bh)
self.h0 = theano.shared(h0)
self.Wo = theano.shared(Wo)
self.bo = theano.shared(bo)
self.params = [self.We, self.Wx, self.Wh, self.bh, self.h0, self.Wo, self.bo]
thX = T.ivector('X')
Ei = self.We[thX] # will be a TxD matrix
thY = T.ivector('Y')
# sentence input:
# [START, w1, w2, ..., wn]
# sentence target:
# [w1, w2, w3, ..., END]
def recurrence(x_t, h_t1):
# returns h(t), y(t)
h_t = self.f(x_t.dot(self.Wx) + h_t1.dot(self.Wh) + self.bh)
y_t = T.nnet.softmax(h_t.dot(self.Wo) + self.bo)
return h_t, y_t
[h, y], _ = theano.scan(
fn=recurrence,
outputs_info=[self.h0, None],
sequences=Ei,
n_steps=Ei.shape[0],
)
py_x = y[:, 0, :]
prediction = T.argmax(py_x, axis=1)
cost = -T.mean(T.log(py_x[T.arange(thY.shape[0]), thY]))
grads = T.grad(cost, self.params)
dparams = [theano.shared(p.get_value()*0) for p in self.params]
updates = []
for p, dp, g in zip(self.params, dparams, grads):
new_dp = mu*dp - learning_rate*g
updates.append((dp, new_dp))
new_p = p + new_dp
updates.append((p, new_p))
self.predict_op = theano.function(inputs=[thX], outputs=prediction)
self.train_op = theano.function(
inputs=[thX, thY],
outputs=[cost, prediction],
updates=updates
)
costs = []
n_total = sum((len(sentence)+1) for sentence in X)
for i in range(epochs):
X = shuffle(X)
n_correct = 0
cost = 0
for j in range(N):
# problem! many words --> END token are overrepresented
# result: generated lines will be very short
# we will try to fix in a later iteration
# BAD! magic numbers 0 and 1...
input_sequence = [0] + X[j]
output_sequence = X[j] + [1]
# we set 0 to start and 1 to end
c, p = self.train_op(input_sequence, output_sequence)
# print "p:", p
cost += c
# print "j:", j, "c:", c/len(X[j]+1)
for pj, xj in zip(p, output_sequence):
if pj == xj:
n_correct += 1
print("i:", i, "cost:", cost, "correct rate:", (float(n_correct)/n_total))
costs.append(cost)
if show_fig:
plt.plot(costs)
plt.show()
def save(self, filename):
np.savez(filename, *[p.get_value() for p in self.params])
@staticmethod
def load(filename, activation):
# TODO: would prefer to save activation to file too
npz = np.load(filename)
We = npz['arr_0']
Wx = npz['arr_1']
Wh = npz['arr_2']
bh = npz['arr_3']
h0 = npz['arr_4']
Wo = npz['arr_5']
bo = npz['arr_6']
V, D = We.shape
_, M = Wx.shape
rnn = SimpleRNN(D, M, V)
rnn.set(We, Wx, Wh, bh, h0, Wo, bo, activation)
return rnn
def set(self, We, Wx, Wh, bh, h0, Wo, bo, activation):
self.f = activation
# redundant - see how you can improve it
self.We = theano.shared(We)
self.Wx = theano.shared(Wx)
self.Wh = theano.shared(Wh)
self.bh = theano.shared(bh)
self.h0 = theano.shared(h0)
self.Wo = theano.shared(Wo)
self.bo = theano.shared(bo)
self.params = [self.We, self.Wx, self.Wh, self.bh, self.h0, self.Wo, self.bo]
thX = T.ivector('X')
Ei = self.We[thX] # will be a TxD matrix
thY = T.ivector('Y')
def recurrence(x_t, h_t1):
# returns h(t), y(t)
h_t = self.f(x_t.dot(self.Wx) + h_t1.dot(self.Wh) + self.bh)
y_t = T.nnet.softmax(h_t.dot(self.Wo) + self.bo)
return h_t, y_t
[h, y], _ = theano.scan(
fn=recurrence,
outputs_info=[self.h0, None],
sequences=Ei,
n_steps=Ei.shape[0],
)
py_x = y[:, 0, :]
prediction = T.argmax(py_x, axis=1)
self.predict_op = theano.function(
inputs=[thX],
outputs=prediction,
allow_input_downcast=True,
)
def generate(self, pi, word2idx):
# convert word2idx -> idx2word
idx2word = {v:k for k,v in iteritems(word2idx)}
V = len(pi)
# generate 4 lines at a time
n_lines = 0
# why? because using the START symbol will always yield the same first word!
X = [ np.random.choice(V, p=pi) ]
print(idx2word[X[0]], end=" ")
while n_lines < 4:
# print "X:", X
P = self.predict_op(X)[-1]
X += [P]
if P > 1:
# it's a real word, not start/end token
word = idx2word[P]
print(word, end=" ")
elif P == 1:
# end token
n_lines += 1
print('')
if n_lines < 4:
X = [ np.random.choice(V, p=pi) ] # reset to start of line
print(idx2word[X[0]], end=" ")
def train_poetry():
sentences, word2idx = get_robert_frost()
rnn = SimpleRNN(30, 30, len(word2idx))
rnn.fit(sentences, learning_rate=1e-4, show_fig=True, activation=T.nnet.relu, epochs=2000)
rnn.save('RNN_D30_M30_epochs2000_relu.npz')
def generate_poetry():
sentences, word2idx = get_robert_frost()
rnn = SimpleRNN.load('RNN_D30_M30_epochs2000_relu.npz', T.nnet.relu)
# determine initial state distribution for starting sentences
V = len(word2idx)
pi = np.zeros(V)
for sentence in sentences:
pi[sentence[0]] += 1
pi /= pi.sum()
rnn.generate(pi, word2idx)
def wikipedia():
sentences, word2idx = get_wikipedia_data()
print("finished retrieving data")
print("vocab size:", len(word2idx), "number of sentences:", len(sentences))
rnn = SimpleRNN(20, 15, len(word2idx))
rnn.fit(sentences, learning_rate=1e-4, show_fig=True, activation=T.nnet.relu)
if __name__ == '__main__':
train_poetry()
generate_poetry()
# wikipedia()
|
SimpleRNN
|
python
|
huggingface__transformers
|
src/transformers/models/parakeet/modeling_parakeet.py
|
{
"start": 4214,
"end": 4970
}
|
class ____(nn.Module):
def __init__(self, config: ParakeetEncoderConfig):
super().__init__()
self.linear1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=config.attention_bias)
self.activation = ACT2FN[config.hidden_act]
self.linear2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.attention_bias)
self.activation_dropout = config.activation_dropout
def forward(self, hidden_states):
hidden_states = self.activation(self.linear1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.linear2(hidden_states)
return hidden_states
|
ParakeetEncoderFeedForward
|
python
|
huggingface__transformers
|
src/transformers/models/cpmant/modeling_cpmant.py
|
{
"start": 10516,
"end": 11379
}
|
class ____(nn.Module):
def __init__(self, config: CpmAntConfig):
super().__init__()
self.layernorm_before_ffn = CpmAntLayerNorm(config)
self.ffn = CpmAntFeedForward(config)
if config.dropout_p:
self.dropout = torch.nn.Dropout(config.dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_states: torch.Tensor,
):
"""
Args:
hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`):
Hidden states before feed forward layer.
"""
ln_outputs = self.layernorm_before_ffn(hidden_states)
outputs = self.ffn(ln_outputs)
if self.dropout is not None:
outputs = self.dropout(outputs)
hidden_states = hidden_states + outputs
return hidden_states
|
CpmAntFFNBlock
|
python
|
PyCQA__pylint
|
tests/functional/m/missing/missing_docstring_new_style.py
|
{
"start": 127,
"end": 234
}
|
class ____: # [missing-class-docstring]
pass
# pylint: disable=missing-class-docstring
|
UndocumentedClass
|
python
|
apache__airflow
|
airflow-core/src/airflow/models/serialized_dag.py
|
{
"start": 2398,
"end": 12099
}
|
class ____:
"""Resolver that resolves dag dependencies to include asset id and assets link to asset aliases."""
def __init__(self, dag_id_dependencies: Sequence[tuple[str, dict]], session: Session) -> None:
self.dag_id_dependencies = dag_id_dependencies
self.session = session
self.asset_key_to_id: dict[AssetUniqueKey, int] = {}
self.asset_ref_name_to_asset_id_name: dict[str, tuple[int, str]] = {}
self.asset_ref_uri_to_asset_id_name: dict[str, tuple[int, str]] = {}
self.alias_names_to_asset_ids_names: dict[str, list[tuple[int, str]]] = {}
def resolve(self) -> dict[str, list[DagDependency]]:
asset_names_uris, asset_ref_names, asset_ref_uris, asset_alias_names = self.collect_asset_info()
self.asset_key_to_id = self.collect_asset_key_to_ids(asset_names_uris)
self.asset_ref_name_to_asset_id_name = self.collect_asset_name_ref_to_ids_names(asset_ref_names)
self.asset_ref_uri_to_asset_id_name = self.collect_asset_uri_ref_to_ids_names(asset_ref_uris)
self.alias_names_to_asset_ids_names = self.collect_alias_to_assets(asset_alias_names)
dag_depdendencies_by_dag: dict[str, list[DagDependency]] = {}
for dag_id, deps_data in self.dag_id_dependencies:
dag_deps: list[DagDependency] = []
for dep_data in deps_data or {}:
dep_type = dep_data["dependency_type"]
if dep_type == "asset":
dag_deps.append(self.resolve_asset_dag_dep(dep_data))
elif dep_type == "asset-name-ref":
dag_deps.extend(self.resolve_asset_name_ref_dag_dep(dep_data))
elif dep_type == "asset-uri-ref":
dag_deps.extend(self.resolve_asset_uri_ref_dag_dep(dep_data))
elif dep_type == "asset-alias":
dag_deps.extend(self.resolve_asset_alias_dag_dep(dep_data))
else:
# Replace asset_key with asset id if it's in source or target
for node_key in ("source", "target"):
if dep_data[node_key].startswith("asset:"):
unique_key = AssetUniqueKey.from_str(dep_data[node_key].split(":")[1])
asset_id = self.asset_key_to_id[unique_key]
dep_data[node_key] = f"asset:{asset_id}"
break
dep_id = dep_data["dependency_id"]
dag_deps.append(
DagDependency(
source=dep_data["source"],
target=dep_data["target"],
# handle the case that serialized_dag does not have label column (e.g., from 2.x)
label=dep_data.get("label", dep_id),
dependency_type=dep_data["dependency_type"],
dependency_id=dep_id,
)
)
dag_depdendencies_by_dag[dag_id] = dag_deps
return dag_depdendencies_by_dag
def collect_asset_info(self) -> tuple[set, set, set, set]:
asset_names_uris: set[tuple[str, str]] = set()
asset_ref_names: set[str] = set()
asset_ref_uris: set[str] = set()
asset_alias_names: set[str] = set()
for _, deps_data in self.dag_id_dependencies:
for dep_data in deps_data or {}:
dep_type = dep_data["dependency_type"]
dep_id = dep_data["dependency_id"]
if dep_type == "asset":
unique_key = AssetUniqueKey.from_str(dep_id)
asset_names_uris.add((unique_key.name, unique_key.uri))
elif dep_type == "asset-name-ref":
asset_ref_names.add(dep_id)
elif dep_type == "asset-uri-ref":
asset_ref_uris.add(dep_id)
elif dep_type == "asset-alias":
asset_alias_names.add(dep_id)
return asset_names_uris, asset_ref_names, asset_ref_uris, asset_alias_names
def collect_asset_key_to_ids(self, asset_name_uris: set[tuple[str, str]]) -> dict[AssetUniqueKey, int]:
return {
AssetUniqueKey(name=name, uri=uri): asset_id
for name, uri, asset_id in self.session.execute(
select(AssetModel.name, AssetModel.uri, AssetModel.id).where(
tuple_(AssetModel.name, AssetModel.uri).in_(asset_name_uris)
)
)
}
def collect_asset_name_ref_to_ids_names(self, asset_ref_names: set[str]) -> dict[str, tuple[int, str]]:
return {
name: (asset_id, name)
for name, asset_id in self.session.execute(
select(AssetModel.name, AssetModel.id).where(
AssetModel.name.in_(asset_ref_names), AssetModel.active.has()
)
)
}
def collect_asset_uri_ref_to_ids_names(self, asset_ref_uris: set[str]) -> dict[str, tuple[int, str]]:
return {
uri: (asset_id, name)
for uri, name, asset_id in self.session.execute(
select(AssetModel.uri, AssetModel.name, AssetModel.id).where(
AssetModel.uri.in_(asset_ref_uris), AssetModel.active.has()
)
)
}
def collect_alias_to_assets(self, asset_alias_names: set[str]) -> dict[str, list[tuple[int, str]]]:
return {
aam.name: [(am.id, am.name) for am in aam.assets]
for aam in self.session.scalars(
select(AssetAliasModel).where(AssetAliasModel.name.in_(asset_alias_names))
)
}
def resolve_asset_dag_dep(self, dep_data: dict) -> DagDependency:
dep_id = dep_data["dependency_id"]
unique_key = AssetUniqueKey.from_str(dep_id)
return DagDependency(
source=dep_data["source"],
target=dep_data["target"],
# handle the case that serialized_dag does not have label column (e.g., from 2.x)
label=dep_data.get("label", unique_key.name),
dependency_type=dep_data["dependency_type"],
dependency_id=str(self.asset_key_to_id[unique_key]),
)
def resolve_asset_ref_dag_dep(
self, dep_data: dict, ref_type: Literal["asset-name-ref", "asset-uri-ref"]
) -> Iterator[DagDependency]:
if ref_type == "asset-name-ref":
ref_to_asset_id_name = self.asset_ref_name_to_asset_id_name
elif ref_type == "asset-uri-ref":
ref_to_asset_id_name = self.asset_ref_uri_to_asset_id_name
else:
raise ValueError(
f"ref_type {ref_type} is invalid. It should be either asset-name-ref or asset-uri-ref"
)
dep_id = dep_data["dependency_id"]
is_source_ref = dep_data["source"] == ref_type
if dep_id in ref_to_asset_id_name:
# The asset ref can be resolved into a valid asset
asset_id, asset_name = ref_to_asset_id_name[dep_id]
yield DagDependency(
source="asset" if is_source_ref else dep_data["source"],
target=dep_data["target"] if is_source_ref else "asset",
label=asset_name,
dependency_type="asset",
dependency_id=str(asset_id),
)
else:
yield DagDependency(
source=dep_data["source"],
target=dep_data["target"],
# handle the case that serialized_dag does not have label column (e.g., from 2.x)
label=dep_data.get("label", dep_id),
dependency_type=dep_data["dependency_type"],
dependency_id=dep_id,
)
def resolve_asset_name_ref_dag_dep(self, dep_data: dict) -> Iterator[DagDependency]:
return self.resolve_asset_ref_dag_dep(dep_data=dep_data, ref_type="asset-name-ref")
def resolve_asset_uri_ref_dag_dep(self, dep_data: dict) -> Iterator[DagDependency]:
return self.resolve_asset_ref_dag_dep(dep_data=dep_data, ref_type="asset-uri-ref")
def resolve_asset_alias_dag_dep(self, dep_data: dict) -> Iterator[DagDependency]:
dep_id = dep_data["dependency_id"]
assets = self.alias_names_to_asset_ids_names[dep_id]
if assets:
for asset_id, asset_name in assets:
is_source_alias = dep_data["source"] == "asset-alias"
# asset
yield DagDependency(
source="asset" if is_source_alias else f"asset-alias:{dep_id}",
target=f"asset-alias:{dep_id}" if is_source_alias else "asset",
label=asset_name,
dependency_type="asset",
dependency_id=str(asset_id),
)
# asset alias
yield DagDependency(
source=f"asset:{asset_id}" if is_source_alias else dep_data["source"],
target=dep_data["target"] if is_source_alias else f"asset:{asset_id}",
label=dep_id,
dependency_type="asset-alias",
dependency_id=dep_id,
)
else:
yield DagDependency(
source=dep_data["source"],
target=dep_data["target"],
# handle the case that serialized_dag does not have label column (e.g., from 2.x)
label=dep_data.get("label", dep_id),
dependency_type=dep_data["dependency_type"],
dependency_id=dep_id,
)
|
_DagDependenciesResolver
|
python
|
huggingface__transformers
|
src/transformers/models/big_bird/modeling_big_bird.py
|
{
"start": 57911,
"end": 61871
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config, seed=None):
super().__init__()
self.config = config
self.attention_type = config.attention_type
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BigBirdAttention(config, seed=seed)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise TypeError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BigBirdAttention(config, seed=seed)
self.intermediate = BigBirdIntermediate(config)
self.output = BigBirdOutput(config)
def set_attention_type(self, value: str, layer_idx=None):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
self.attention.set_attention_type(value, layer_idx=layer_idx)
if self.add_cross_attention:
self.crossattention.set_attention_type(value, layer_idx=layer_idx)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
band_mask=None,
from_mask=None,
to_mask=None,
blocked_encoder_mask=None,
past_key_values=None,
output_attentions=False,
cache_position=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attention_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
from_blocked_mask=blocked_encoder_mask,
to_blocked_mask=blocked_encoder_mask,
cache_position=cache_position,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
" cross-attention layers by setting `config.add_cross_attention=True`"
)
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
return (layer_output,) + outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
BigBirdLayer
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/vml/test_write_idmap.py
|
{
"start": 289,
"end": 741
}
|
class ____(unittest.TestCase):
"""
Test the Vml _write_idmap() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_idmap(self):
"""Test the _write_idmap() method"""
self.vml._write_idmap(1)
exp = """<o:idmap v:ext="edit" data="1"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestWriteOidmap
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 35485,
"end": 35763
}
|
class ____(BaseModel):
time: Optional[float] = Field(default=None, description="Time spent to process this request")
status: Optional["ErrorResponseStatus"] = Field(default=None, description="")
result: Optional[Any] = Field(default=None, description="")
|
ErrorResponse
|
python
|
pandas-dev__pandas
|
pandas/io/sas/sasreader.py
|
{
"start": 663,
"end": 5447
}
|
class ____(Iterator["DataFrame"], ABC):
"""
Abstract class for XportReader and SAS7BDATReader.
"""
@abstractmethod
def read(self, nrows: int | None = None) -> DataFrame: ...
@abstractmethod
def close(self) -> None: ...
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.close()
@overload
def read_sas(
filepath_or_buffer: FilePath | ReadBuffer[bytes],
*,
format: str | None = ...,
index: Hashable | None = ...,
encoding: str | None = ...,
chunksize: int = ...,
iterator: bool = ...,
compression: CompressionOptions = ...,
) -> SASReader: ...
@overload
def read_sas(
filepath_or_buffer: FilePath | ReadBuffer[bytes],
*,
format: str | None = ...,
index: Hashable | None = ...,
encoding: str | None = ...,
chunksize: None = ...,
iterator: bool = ...,
compression: CompressionOptions = ...,
) -> DataFrame | SASReader: ...
@set_module("pandas")
@doc(decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer")
def read_sas(
filepath_or_buffer: FilePath | ReadBuffer[bytes],
*,
format: str | None = None,
index: Hashable | None = None,
encoding: str | None = None,
chunksize: int | None = None,
iterator: bool = False,
compression: CompressionOptions = "infer",
) -> DataFrame | SASReader:
"""
Read SAS files stored as either XPORT or SAS7BDAT format files.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function. The string could be a URL.
Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.sas7bdat``.
format : str {{'xport', 'sas7bdat'}} or None
If None, file format is inferred from file extension. If 'xport' or
'sas7bdat', uses the corresponding format.
index : identifier of index column, defaults to None
Identifier of column that should be used as index of the DataFrame.
encoding : str, default is None
Encoding for text data. If None, text data are stored as raw bytes.
chunksize : int
Read file `chunksize` lines at a time, returns iterator.
iterator : bool, defaults to False
If True, returns an iterator for reading the file incrementally.
{decompression_options}
Returns
-------
DataFrame, SAS7BDATReader, or XportReader
DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
or XportReader, file format is inferred from file extension.
See Also
--------
read_csv : Read a comma-separated values (csv) file into a pandas DataFrame.
read_excel : Read an Excel file into a pandas DataFrame.
read_spss : Read an SPSS file into a pandas DataFrame.
read_orc : Load an ORC object into a pandas DataFrame.
read_feather : Load a feather-format object into a pandas DataFrame.
Examples
--------
>>> df = pd.read_sas("sas_data.sas7bdat") # doctest: +SKIP
"""
if format is None:
buffer_error_msg = (
"If this is a buffer object rather "
"than a string name, you must specify a format string"
)
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
raise ValueError(buffer_error_msg)
fname = filepath_or_buffer.lower()
if ".xpt" in fname:
format = "xport"
elif ".sas7bdat" in fname:
format = "sas7bdat"
else:
raise ValueError(
f"unable to infer format of SAS file from filename: {fname!r}"
)
reader: SASReader
if format.lower() == "xport":
from pandas.io.sas.sas_xport import XportReader
reader = XportReader(
filepath_or_buffer,
index=index,
encoding=encoding,
chunksize=chunksize,
compression=compression,
)
elif format.lower() == "sas7bdat":
from pandas.io.sas.sas7bdat import SAS7BDATReader
reader = SAS7BDATReader(
filepath_or_buffer,
index=index,
encoding=encoding,
chunksize=chunksize,
compression=compression,
)
else:
raise ValueError("unknown SAS format")
if iterator or chunksize:
return reader
with reader:
return reader.read()
|
SASReader
|
python
|
ethereum__web3.py
|
web3/exceptions.py
|
{
"start": 179,
"end": 728
}
|
class ____(Exception):
"""
Exception mixin inherited by all exceptions of web3.py
This allows::
try:
some_call()
except Web3Exception:
# deal with web3 exception
except:
# deal with other exceptions
"""
user_message: str | None = None
def __init__(
self,
*args: Any,
user_message: str | None = None,
):
super().__init__(*args)
# Assign properties of Web3Exception
self.user_message = user_message
|
Web3Exception
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/datafusion.py
|
{
"start": 21388,
"end": 25557
}
|
class ____(GoogleBaseAsyncHook):
"""Class to get asynchronous hook for DataFusion."""
sync_hook_class = DataFusionHook
scopes = ["https://www.googleapis.com/auth/cloud-platform"]
@staticmethod
def _base_url(instance_url: str, namespace: str) -> str:
return urljoin(f"{instance_url}/", f"v3/namespaces/{quote(namespace)}/apps/")
async def _get_link(self, url: str, session):
# Adding sleep generator to catch 404 in case if pipeline was not retrieved during first attempt.
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
async with Token(scopes=self.scopes) as token:
session_aio = AioSession(session)
headers = {
"Authorization": f"Bearer {await token.get()}",
}
try:
pipeline = await session_aio.get(url=url, headers=headers)
break
except Exception as exc:
if "404" in str(exc):
await asyncio.sleep(time_to_wait)
else:
raise
if pipeline:
return pipeline
raise AirflowException("Could not retrieve pipeline. Aborting.")
async def get_pipeline(
self,
instance_url: str,
namespace: str,
pipeline_name: str,
pipeline_id: str,
session,
pipeline_type: DataFusionPipelineType = DataFusionPipelineType.BATCH,
):
program_type = self.sync_hook_class.cdap_program_type(pipeline_type=pipeline_type)
program_id = self.sync_hook_class.cdap_program_id(pipeline_type=pipeline_type)
base_url_link = self._base_url(instance_url, namespace)
url = urljoin(
base_url_link, f"{quote(pipeline_name)}/{program_type}s/{program_id}/runs/{quote(pipeline_id)}"
)
return await self._get_link(url=url, session=session)
async def get_pipeline_status(
self,
pipeline_name: str,
instance_url: str,
pipeline_id: str,
pipeline_type: DataFusionPipelineType = DataFusionPipelineType.BATCH,
namespace: str = "default",
success_states: list[str] | None = None,
) -> str:
"""
Get a Cloud Data Fusion pipeline status asynchronously.
:param pipeline_name: Your pipeline name.
:param instance_url: Endpoint on which the REST APIs is accessible for the instance.
:param pipeline_id: Unique pipeline ID associated with specific pipeline.
:param pipeline_type: Optional pipeline type (by default batch).
:param namespace: if your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param success_states: If provided the operator will wait for pipeline to be in one of
the provided states.
"""
success_states = success_states or SUCCESS_STATES
async with ClientSession() as session:
try:
pipeline = await self.get_pipeline(
instance_url=instance_url,
namespace=namespace,
pipeline_name=pipeline_name,
pipeline_id=pipeline_id,
pipeline_type=pipeline_type,
session=session,
)
pipeline = await pipeline.json(content_type=None)
current_pipeline_state = pipeline["status"]
if current_pipeline_state in success_states:
pipeline_status = "success"
elif current_pipeline_state in FAILURE_STATES:
pipeline_status = "failed"
else:
pipeline_status = "pending"
except OSError:
pipeline_status = "pending"
except Exception as e:
self.log.info("Retrieving pipeline status finished with errors...")
pipeline_status = str(e)
return pipeline_status
|
DataFusionAsyncHook
|
python
|
numba__numba
|
numba/cuda/tests/cudadrv/test_cuda_driver.py
|
{
"start": 6873,
"end": 7663
}
|
class ____(CUDATestCase):
def test_device_get_uuid(self):
# A device UUID looks like:
#
# GPU-e6489c45-5b68-3b03-bab7-0e7c8e809643
#
# To test, we construct an RE that matches this form and verify that
# the returned UUID matches.
#
# Device UUIDs may not conform to parts of the UUID specification (RFC
# 4122) pertaining to versions and variants, so we do not extract and
# validate the values of these bits.
h = '[0-9a-f]{%d}'
h4 = h % 4
h8 = h % 8
h12 = h % 12
uuid_format = f'^GPU-{h8}-{h4}-{h4}-{h4}-{h12}$'
dev = devices.get_context().device
self.assertRegex(dev.uuid, uuid_format)
if __name__ == '__main__':
unittest.main()
|
TestDevice
|
python
|
pytorch__pytorch
|
torch/ao/nn/intrinsic/qat/modules/conv_fused.py
|
{
"start": 17315,
"end": 18484
}
|
class ____(ConvBn1d):
r"""
A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU,
attached with FakeQuantize modules for weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv1d` and
:class:`torch.nn.BatchNorm1d` and :class:`torch.nn.ReLU`.
Similar to `torch.nn.Conv1d`, with FakeQuantize modules initialized to
default.
Attributes:
weight_fake_quant: fake quant module for weight
"""
# base class defines _FLOAT_MODULE as "ConvBn1d"
_FLOAT_MODULE: ClassVar[type[nn.Module]] = nni.ConvBnReLU1d
_FLOAT_CONV_MODULE: ClassVar[type[nn.Conv1d]] = nn.Conv1d
_FLOAT_BN_MODULE: ClassVar[type[nn.BatchNorm1d]] = nn.BatchNorm1d
_FLOAT_RELU_MODULE: ClassVar[type[nn.Module] | None] = nn.ReLU
# module class after fusing bn into conv
_FUSED_FLOAT_MODULE: ClassVar[type[nn.Module] | None] = nni.ConvReLU1d
def forward(self, input):
return F.relu(self._forward(input))
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
return super().from_float(mod, use_precomputed_fake_quant)
|
ConvBnReLU1d
|
python
|
encode__django-rest-framework
|
tests/test_serializer.py
|
{
"start": 24968,
"end": 27643
}
|
class ____:
def test_declared_field_disabling(self):
class Parent(serializers.Serializer):
f1 = serializers.CharField()
f2 = serializers.CharField()
class Child(Parent):
f1 = None
class Grandchild(Child):
pass
assert len(Parent._declared_fields) == 2
assert len(Child._declared_fields) == 1
assert len(Grandchild._declared_fields) == 1
def test_meta_field_disabling(self):
# Declaratively setting a field on a child class will *not* prevent
# the ModelSerializer from generating a default field.
class MyModel(models.Model):
f1 = models.CharField(max_length=10)
f2 = models.CharField(max_length=10)
class Parent(serializers.ModelSerializer):
class Meta:
model = MyModel
fields = ['f1', 'f2']
class Child(Parent):
f1 = None
class Grandchild(Child):
pass
assert len(Parent().get_fields()) == 2
assert len(Child().get_fields()) == 2
assert len(Grandchild().get_fields()) == 2
def test_multiple_inheritance(self):
class A(serializers.Serializer):
field = serializers.CharField()
class B(serializers.Serializer):
field = serializers.IntegerField()
class TestSerializer(A, B):
pass
fields = {
name: type(f) for name, f
in TestSerializer()._declared_fields.items()
}
assert fields == {
'field': serializers.CharField,
}
def test_field_ordering(self):
class Base(serializers.Serializer):
f1 = serializers.CharField()
f2 = serializers.CharField()
class A(Base):
f3 = serializers.IntegerField()
class B(serializers.Serializer):
f3 = serializers.CharField()
f4 = serializers.CharField()
class TestSerializer(A, B):
f2 = serializers.IntegerField()
f5 = serializers.CharField()
fields = {
name: type(f) for name, f
in TestSerializer()._declared_fields.items()
}
# `IntegerField`s should be the 'winners' in field name conflicts
# - `TestSerializer.f2` should override `Base.F2`
# - `A.f3` should override `B.f3`
assert fields == {
'f1': serializers.CharField,
'f2': serializers.IntegerField,
'f3': serializers.IntegerField,
'f4': serializers.CharField,
'f5': serializers.CharField,
}
|
TestDeclaredFieldInheritance
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/image_ops_test.py
|
{
"start": 188778,
"end": 191044
}
|
class ____(test_util.TensorFlowTestCase):
def _testValid(self, filename):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.cached_session():
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = self.evaluate([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testValid(self):
self._testValid("scan.gif")
self._testValid("optimized.gif")
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
def testAnimatedGif(self):
# Test if all frames in the animated GIF file is properly decoded.
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif = io_ops.read_file(os.path.join(base, "pendulum_sm.gif"))
gt_frame0 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame0.png"))
gt_frame1 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame1.png"))
gt_frame2 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame2.png"))
image = image_ops.decode_gif(gif)
frame0 = image_ops.decode_png(gt_frame0)
frame1 = image_ops.decode_png(gt_frame1)
frame2 = image_ops.decode_png(gt_frame2)
image, frame0, frame1, frame2 = self.evaluate([image, frame0, frame1,
frame2])
# Compare decoded gif frames with ground-truth data.
self.assertAllEqual(image[0], frame0)
self.assertAllEqual(image[1], frame1)
self.assertAllEqual(image[2], frame2)
|
GifTest
|
python
|
matplotlib__matplotlib
|
galleries/examples/units/basic_units.py
|
{
"start": 1179,
"end": 1436
}
|
class ____:
def __init__(self, fn_name, obj):
self.fn_name = fn_name
self.target = obj.proxy_target
def __call__(self, *args):
fn = getattr(self.target, self.fn_name)
ret = fn(*args)
return ret
|
PassThroughProxy
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/execution_api/datamodels/taskinstance.py
|
{
"start": 3729,
"end": 3909
}
|
class ____(StrictBaseModel):
"""Schema for updating TaskInstance to a target state, excluding terminal and running states."""
state: IntermediateTIState
|
TITargetStatePayload
|
python
|
numba__numba
|
numba/cuda/simulator/cudadrv/devices.py
|
{
"start": 1691,
"end": 2689
}
|
class ____:
'''
This stub implements a device list containing a single GPU. It also
keeps track of the GPU status, i.e. whether the context is closed or not,
which may have been set by the user calling reset()
'''
def __init__(self):
self.lst = (FakeCUDAContext(0),)
self.closed = False
def __getitem__(self, devnum):
self.closed = False
return self.lst[devnum]
def __str__(self):
return ', '.join([str(d) for d in self.lst])
def __iter__(self):
return iter(self.lst)
def __len__(self):
return len(self.lst)
@property
def current(self):
if self.closed:
return None
return self.lst[0]
gpus = FakeDeviceList()
def reset():
gpus[0].closed = True
def get_context(devnum=0):
return FakeCUDAContext(devnum)
def require_context(func):
'''
In the simulator, a context is always "available", so this is a no-op.
'''
return func
|
FakeDeviceList
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 39151,
"end": 40381
}
|
class ____(BaseModel, extra="forbid"):
"""
All possible payload filtering conditions
"""
key: str = Field(..., description="Payload key")
match: Optional["Match"] = Field(default=None, description="Check if point has field with a given value")
range: Optional["RangeInterface"] = Field(default=None, description="Check if points value lies in a given range")
geo_bounding_box: Optional["GeoBoundingBox"] = Field(
default=None, description="Check if points geolocation lies in a given area"
)
geo_radius: Optional["GeoRadius"] = Field(default=None, description="Check if geo point is within a given radius")
geo_polygon: Optional["GeoPolygon"] = Field(
default=None, description="Check if geo point is within a given polygon"
)
values_count: Optional["ValuesCount"] = Field(default=None, description="Check number of values of the field")
is_empty: Optional[bool] = Field(
default=None, description="Check that the field is empty, alternative syntax for `is_empty: 'field_name'`"
)
is_null: Optional[bool] = Field(
default=None, description="Check that the field is null, alternative syntax for `is_null: 'field_name'`"
)
|
FieldCondition
|
python
|
numpy__numpy
|
numpy/polynomial/tests/test_printing.py
|
{
"start": 13592,
"end": 15003
}
|
class ____:
def test_polynomial_repr(self):
res = repr(poly.Polynomial([0, 1]))
tgt = (
"Polynomial([0., 1.], domain=[-1., 1.], window=[-1., 1.], "
"symbol='x')"
)
assert_equal(res, tgt)
def test_chebyshev_repr(self):
res = repr(poly.Chebyshev([0, 1]))
tgt = (
"Chebyshev([0., 1.], domain=[-1., 1.], window=[-1., 1.], "
"symbol='x')"
)
assert_equal(res, tgt)
def test_legendre_repr(self):
res = repr(poly.Legendre([0, 1]))
tgt = (
"Legendre([0., 1.], domain=[-1., 1.], window=[-1., 1.], "
"symbol='x')"
)
assert_equal(res, tgt)
def test_hermite_repr(self):
res = repr(poly.Hermite([0, 1]))
tgt = (
"Hermite([0., 1.], domain=[-1., 1.], window=[-1., 1.], "
"symbol='x')"
)
assert_equal(res, tgt)
def test_hermiteE_repr(self):
res = repr(poly.HermiteE([0, 1]))
tgt = (
"HermiteE([0., 1.], domain=[-1., 1.], window=[-1., 1.], "
"symbol='x')"
)
assert_equal(res, tgt)
def test_laguerre_repr(self):
res = repr(poly.Laguerre([0, 1]))
tgt = (
"Laguerre([0., 1.], domain=[0., 1.], window=[0., 1.], "
"symbol='x')"
)
assert_equal(res, tgt)
|
TestRepr
|
python
|
spyder-ide__spyder
|
spyder/plugins/completion/providers/snippets/provider.py
|
{
"start": 989,
"end": 2887
}
|
class ____(SpyderCompletionProvider):
COMPLETION_PROVIDER_NAME = 'snippets'
DEFAULT_ORDER = 3
CONF_DEFAULTS = [(lang, SNIPPETS[lang]) for lang in SNIPPETS]
CONF_VERSION = "0.1.0"
CONF_TABS = [SnippetsConfigTab]
def __init__(self, parent, config):
SpyderCompletionProvider.__init__(self, parent, config)
self.snippets_actor = SnippetsActor(self)
self.snippets_actor.sig_snippets_ready.connect(
self.signal_provider_ready)
self.snippets_actor.sig_snippets_response.connect(
lambda _id, resp: self.sig_response_ready.emit(
self.COMPLETION_PROVIDER_NAME, _id, resp))
self.started = False
self.requests = {}
self.config = config
def get_name(self):
return _('Text snippets')
def start_completion_services_for_language(self, language):
return self.started
def start(self):
if not self.started:
self.snippets_actor.start()
self.started = True
def signal_provider_ready(self):
self.update_snippets(self.config)
self.sig_provider_ready.emit(self.COMPLETION_PROVIDER_NAME)
def shutdown(self):
if self.started:
self.snippets_actor.stop()
self.started = False
def send_request(self, language, req_type, req, req_id=None):
request = {
'type': req_type,
'file': req['file'],
'id': req_id,
'msg': req
}
req['language'] = language
self.snippets_actor.sig_mailbox.emit(request)
@on_conf_change
def update_snippets(self, snippets):
self.config = snippets
snippet_info = {}
for language in SUPPORTED_LANGUAGES_PY:
snippet_info[language] = snippets.get(language, {})
self.snippets_actor.sig_update_snippets.emit(snippet_info)
|
SnippetsProvider
|
python
|
pydantic__pydantic
|
pydantic-core/tests/serializers/test_model.py
|
{
"start": 2105,
"end": 12438
}
|
class ____:
class_var: ClassVar[int] = 1
foo: int
bar: str
spam: bytes
frog: dataclasses.InitVar[int]
def test_dataclass():
schema = core_schema.call_schema(
core_schema.arguments_schema(
[
core_schema.arguments_parameter('foo', core_schema.int_schema()),
core_schema.arguments_parameter('bar', core_schema.str_schema()),
core_schema.arguments_parameter('spam', core_schema.bytes_schema(), mode='keyword_only'),
core_schema.arguments_parameter('frog', core_schema.int_schema(), mode='keyword_only'),
]
),
DataClass,
serialization=core_schema.model_ser_schema(
DataClass,
core_schema.model_fields_schema(
{
'foo': core_schema.model_field(core_schema.int_schema()),
'bar': core_schema.model_field(core_schema.str_schema()),
'spam': core_schema.model_field(core_schema.bytes_schema()),
}
),
),
)
# just check validation works as expected
v = SchemaValidator(schema)
dc = v.validate_python({'foo': 1, 'bar': 'bar-str', 'spam': 'bite', 'frog': 123})
assert dc == DataClass(foo=1, bar='bar-str', spam=b'bite', frog=123)
dc.class_var = 2
assert dataclasses.is_dataclass(dc)
s = SchemaSerializer(schema)
assert dataclasses.asdict(dc) == IsStrictDict(foo=1, bar='bar-str', spam=b'bite')
assert s.to_python(dc) == IsStrictDict(foo=1, bar='bar-str', spam=b'bite')
assert s.to_python(dc, mode='json') == {'foo': 1, 'bar': 'bar-str', 'spam': 'bite'}
assert json.loads(s.to_json(dc)) == {'foo': 1, 'bar': 'bar-str', 'spam': 'bite'}
def test_model_allow_extra():
s = SchemaSerializer(
core_schema.model_schema(
BasicModel,
core_schema.model_fields_schema(
{
'foo': core_schema.model_field(core_schema.int_schema()),
'bar': core_schema.model_field(core_schema.bytes_schema()),
},
extra_behavior='allow',
),
extra_behavior='allow',
)
)
assert s.to_python(BasicModel(foo=1, bar=b'more', __pydantic_extra__={})) == IsStrictDict(foo=1, bar=b'more')
assert s.to_python(BasicModel(bar=b'more', foo=1, __pydantic_extra__={})) == IsStrictDict(bar=b'more', foo=1)
assert s.to_python(BasicModel(foo=1, __pydantic_extra__=dict(c=3), bar=b'more')) == IsStrictDict(
foo=1, bar=b'more', c=3
)
assert s.to_python(BasicModel(bar=b'more', __pydantic_extra__=dict(c=3, foo=1)), mode='json') == IsStrictDict(
bar='more', c=3, foo=1
)
j = s.to_json(BasicModel(bar=b'more', foo=1, __pydantic_extra__=dict(c=3)))
if on_pypy:
assert j == IsJson({'bar': 'more', 'foo': 1, 'c': 3})
else:
assert j == b'{"bar":"more","foo":1,"c":3}'
def test_model_recursive_in_extra():
# See https://github.com/pydantic/pydantic/issues/6571
class Model(BasicModel):
__slots__ = '__pydantic_extra__'
s = SchemaSerializer(
core_schema.model_schema(
Model, core_schema.model_fields_schema({}, extra_behavior='allow'), extra_behavior='allow'
)
)
Model.__pydantic_serializer__ = s
assert s.to_json(Model(__pydantic_extra__=dict(other=Model(__pydantic_extra__={})))) == b'{"other":{}}'
@pytest.mark.parametrize(
'params',
[
dict(include=None, exclude=None, expected={'a': 0, 'b': 1, 'c': 2, 'd': 3}),
dict(include={'a', 'b'}, exclude=None, expected={'a': 0, 'b': 1}),
dict(include={'a': ..., 'b': ...}, exclude=None, expected={'a': 0, 'b': 1}),
dict(include={'a': {1}, 'b': {1}}, exclude=None, expected={'a': 0, 'b': 1}),
dict(include=None, exclude={'a', 'b'}, expected={'c': 2, 'd': 3}),
dict(include=None, exclude={'a': ..., 'b': ...}, expected={'c': 2, 'd': 3}),
dict(include={'a', 'b'}, exclude={'b', 'c'}, expected={'a': 0}),
dict(include=None, exclude={'d': {1}}, expected={'a': 0, 'b': 1, 'c': 2, 'd': 3}),
dict(include={'a', 'b'}, exclude={'d': {1}}, expected={'a': 0, 'b': 1}),
dict(include={'a', 'b'}, exclude={'b': {1}}, expected={'a': 0, 'b': 1}),
dict(include={'a', 'b'}, exclude={'b': ...}, expected={'a': 0}),
dict(include=None, exclude={'__all__'}, expected={}),
],
)
def test_include_exclude_args(params):
s = SchemaSerializer(
core_schema.model_schema(
BasicModel,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(core_schema.int_schema()),
'b': core_schema.model_field(core_schema.int_schema()),
'c': core_schema.model_field(core_schema.int_schema()),
'd': core_schema.model_field(core_schema.int_schema()),
}
),
)
)
# user IsStrictDict to check dict order
include, exclude, expected = params['include'], params['exclude'], IsStrictDict(params['expected'])
value = BasicModel(a=0, b=1, c=2, d=3)
assert s.to_python(value, include=include, exclude=exclude) == expected
assert s.to_python(value, mode='json', include=include, exclude=exclude) == expected
assert json.loads(s.to_json(value, include=include, exclude=exclude)) == expected
def test_exclude_if():
s = SchemaSerializer(
core_schema.model_schema(
BasicModel,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(core_schema.int_schema(), serialization_exclude_if=lambda x: x > 1),
'b': core_schema.model_field(
core_schema.str_schema(), serialization_exclude_if=lambda x: 'foo' in x
),
'c': core_schema.model_field(
core_schema.str_schema(),
serialization_exclude=True,
serialization_exclude_if=lambda x: 'foo' in x,
),
}
),
)
)
assert s.to_python(BasicModel(a=0, b='bar', c='bar')) == {'a': 0, 'b': 'bar'}
assert s.to_python(BasicModel(a=2, b='bar', c='bar')) == {'b': 'bar'}
assert s.to_python(BasicModel(a=0, b='foo', c='bar')) == {'a': 0}
assert s.to_python(BasicModel(a=2, b='foo', c='bar')) == {}
assert s.to_json(BasicModel(a=0, b='bar', c='bar')) == b'{"a":0,"b":"bar"}'
assert s.to_json(BasicModel(a=2, b='bar', c='bar')) == b'{"b":"bar"}'
assert s.to_json(BasicModel(a=0, b='foo', c='bar')) == b'{"a":0}'
assert s.to_json(BasicModel(a=2, b='foo', c='bar')) == b'{}'
def test_alias():
s = SchemaSerializer(
core_schema.model_schema(
BasicModel,
core_schema.model_fields_schema(
{
'cat': core_schema.model_field(core_schema.int_schema(), serialization_alias='Meow'),
'dog': core_schema.model_field(core_schema.int_schema(), serialization_alias='Woof'),
'bird': core_schema.model_field(core_schema.int_schema()),
}
),
)
)
value = BasicModel(cat=0, dog=1, bird=2)
assert s.to_python(value, by_alias=True) == IsStrictDict(Meow=0, Woof=1, bird=2)
def test_model_wrong_warn():
s = SchemaSerializer(
core_schema.model_schema(
type('MyModel', (), {}),
core_schema.model_fields_schema(
{
'foo': core_schema.model_field(core_schema.int_schema()),
'bar': core_schema.model_field(core_schema.bytes_schema()),
}
),
)
)
assert s.to_python(None) is None
assert s.to_python(None, mode='json') is None
assert s.to_json(None) == b'null'
with pytest.warns(
UserWarning,
match=r'Expected `MyModel` - serialized value may not be as expected \[input_value=123, input_type=int\]',
):
assert s.to_python(123) == 123
with pytest.warns(
UserWarning,
match=r'Expected `MyModel` - serialized value may not be as expected \[input_value=123, input_type=int\]',
):
assert s.to_python(123, mode='json') == 123
with pytest.warns(
UserWarning,
match=r'Expected `MyModel` - serialized value may not be as expected \[input_value=123, input_type=int\]',
):
assert s.to_json(123) == b'123'
with pytest.warns(
UserWarning,
match=r"Expected `MyModel` - serialized value may not be as expected \[input_value={'foo': 1, 'bar': b'more'}, input_type=dict\]",
):
assert s.to_python({'foo': 1, 'bar': b'more'}) == {'foo': 1, 'bar': b'more'}
with pytest.warns(
UserWarning,
match=r"Expected `int` - serialized value may not be as expected \[field_name='foo', input_value='lorem', input_type=str\]",
):
assert s.to_python(BasicModel(foo='lorem')) == {'foo': 'lorem'}
def test_exclude_none():
s = SchemaSerializer(
core_schema.model_schema(
BasicModel,
core_schema.model_fields_schema(
{
'foo': core_schema.model_field(core_schema.nullable_schema(core_schema.int_schema())),
'bar': core_schema.model_field(core_schema.bytes_schema()),
},
extra_behavior='ignore', # this is the default
),
)
)
assert s.to_python(BasicModel(foo=1, bar=b'more')) == {'foo': 1, 'bar': b'more'}
assert s.to_python(BasicModel(foo=None, bar=b'more')) == {'foo': None, 'bar': b'more'}
assert s.to_python(BasicModel(foo=None, bar=b'more'), exclude_none=True) == {'bar': b'more'}
assert s.to_python(BasicModel(foo=None, bar=b'more'), mode='json') == {'foo': None, 'bar': 'more'}
assert s.to_python(BasicModel(foo=None, bar=b'more'), mode='json', exclude_none=True) == {'bar': 'more'}
assert s.to_json(BasicModel(foo=1, bar=b'more')) == b'{"foo":1,"bar":"more"}'
assert s.to_json(BasicModel(foo=None, bar=b'more')) == b'{"foo":null,"bar":"more"}'
assert s.to_json(BasicModel(foo=None, bar=b'more'), exclude_none=True) == b'{"bar":"more"}'
|
DataClass
|
python
|
django__django
|
django/db/models/fetch_modes.py
|
{
"start": 235,
"end": 439
}
|
class ____(FetchMode):
__slots__ = ()
def fetch(self, fetcher, instance):
fetcher.fetch_one(instance)
def __reduce__(self):
return "FETCH_ONE"
FETCH_ONE = FetchOne()
|
FetchOne
|
python
|
django__django
|
django/contrib/humanize/apps.py
|
{
"start": 91,
"end": 194
}
|
class ____(AppConfig):
name = "django.contrib.humanize"
verbose_name = _("Humanize")
|
HumanizeConfig
|
python
|
getsentry__sentry
|
tests/snuba/api/serializers/test_group.py
|
{
"start": 20650,
"end": 22271
}
|
class ____(
APITestCase,
SnubaTestCase,
SearchIssueTestMixin,
):
def test_profiling_seen_stats(self) -> None:
proj = self.create_project()
environment = self.create_environment(project=proj)
first_group_fingerprint = f"{ProfileFileIOGroupType.type_id}-group1"
timestamp = (timezone.now() - timedelta(days=5)).replace(
hour=0, minute=0, second=0, microsecond=0
)
times = 5
for incr in range(0, times):
# for user_0 - user_4, first_group
self.store_search_issue(
proj.id,
incr,
[first_group_fingerprint],
environment.name,
timestamp + timedelta(minutes=incr),
)
# user_5, another_group
event, issue_occurrence, group_info = self.store_search_issue(
proj.id,
5,
[first_group_fingerprint],
environment.name,
timestamp + timedelta(minutes=5),
)
assert group_info is not None
first_group = group_info.group
result = serialize(
first_group,
serializer=GroupSerializerSnuba(
environment_ids=[environment.id],
start=timestamp - timedelta(days=1),
end=timestamp + timedelta(days=1),
),
)
assert result["userCount"] == 6
assert result["lastSeen"] == (timestamp + timedelta(minutes=5))
assert result["firstSeen"] == timestamp
assert result["count"] == str(times + 1)
|
ProfilingGroupSerializerSnubaTest
|
python
|
spyder-ide__spyder
|
external-deps/spyder-remote-services/spyder_remote_services/app.py
|
{
"start": 2233,
"end": 3186
}
|
class ____(JupyterApp):
description: str = "Show information about the currently running Spyder server."
def start(self):
"""Start the server list application."""
runtime_dir = Path(jupyter_runtime_dir())
# The runtime dir might not exist
if not runtime_dir.is_dir():
return
conf_file = runtime_dir / SpyderServerApp.spyder_server_info_file
if not conf_file.exists():
return
with conf_file.open(mode="rb") as f:
info = json.load(f)
# Simple check whether that process is really still running
# Also remove leftover files from IPython 2.x without a pid field
if ("pid" in info) and check_pid(info["pid"]):
print(json.dumps(info, indent=None))
else:
# If the process has died, try to delete its info file
with suppress(OSError):
conf_file.unlink()
|
SpyderServerInfoApp
|
python
|
sympy__sympy
|
sympy/printing/lambdarepr.py
|
{
"start": 7451,
"end": 8307
}
|
class ____(MpmathPrinter, LambdaPrinter):
"""Use ``lambda`` printer but print numbers as ``mpi`` intervals. """
def _print_Integer(self, expr):
return "mpi('%s')" % super(PythonCodePrinter, self)._print_Integer(expr)
def _print_Rational(self, expr):
return "mpi('%s')" % super(PythonCodePrinter, self)._print_Rational(expr)
def _print_Half(self, expr):
return "mpi('%s')" % super(PythonCodePrinter, self)._print_Rational(expr)
def _print_Pow(self, expr):
return super(MpmathPrinter, self)._print_Pow(expr, rational=True)
for k in NumExprPrinter._numexpr_functions:
setattr(NumExprPrinter, '_print_%s' % k, NumExprPrinter._print_Function)
def lambdarepr(expr, **settings):
"""
Returns a string usable for lambdifying.
"""
return LambdaPrinter(settings).doprint(expr)
|
IntervalPrinter
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol46.py
|
{
"start": 337,
"end": 504
}
|
class ____(Protocol[T_contra, T]):
def method1(self, value: T_contra) -> "ProtoA[T_contra, T]": ...
@classmethod
def method2(cls, value: T) -> T: ...
|
ProtoA
|
python
|
Pylons__pyramid
|
src/pyramid/predicates.py
|
{
"start": 561,
"end": 1071
}
|
class ____:
def __init__(self, val, config):
request_method = as_sorted_tuple(val)
if 'GET' in request_method and 'HEAD' not in request_method:
# GET implies HEAD too
request_method = as_sorted_tuple(request_method + ('HEAD',))
self.val = request_method
def text(self):
return 'request_method = %s' % (','.join(self.val))
phash = text
def __call__(self, context, request):
return request.method in self.val
|
RequestMethodPredicate
|
python
|
TheAlgorithms__Python
|
data_structures/heap/binomial_heap.py
|
{
"start": 1268,
"end": 12247
}
|
class ____:
r"""
Min-oriented priority queue implemented with the Binomial Heap data
structure implemented with the BinomialHeap class. It supports:
- Insert element in a heap with n elements: Guaranteed logn, amoratized 1
- Merge (meld) heaps of size m and n: O(logn + logm)
- Delete Min: O(logn)
- Peek (return min without deleting it): O(1)
Example:
Create a random permutation of 30 integers to be inserted and 19 of them deleted
>>> import numpy as np
>>> permutation = np.random.permutation(list(range(30)))
Create a Heap and insert the 30 integers
__init__() test
>>> first_heap = BinomialHeap()
30 inserts - insert() test
>>> for number in permutation:
... first_heap.insert(number)
Size test
>>> first_heap.size
30
Deleting - delete() test
>>> [int(first_heap.delete_min()) for _ in range(20)]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
Create a new Heap
>>> second_heap = BinomialHeap()
>>> vals = [17, 20, 31, 34]
>>> for value in vals:
... second_heap.insert(value)
The heap should have the following structure:
17
/ \
# 31
/ \
20 34
/ \ / \
# # # #
preOrder() test
>>> " ".join(str(x) for x in second_heap.pre_order())
"(17, 0) ('#', 1) (31, 1) (20, 2) ('#', 3) ('#', 3) (34, 2) ('#', 3) ('#', 3)"
printing Heap - __str__() test
>>> print(second_heap)
17
-#
-31
--20
---#
---#
--34
---#
---#
mergeHeaps() test
>>>
>>> merged = second_heap.merge_heaps(first_heap)
>>> merged.peek()
17
values in merged heap; (merge is inplace)
>>> results = []
>>> while not first_heap.is_empty():
... results.append(int(first_heap.delete_min()))
>>> results
[17, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34]
"""
def __init__(self, bottom_root=None, min_node=None, heap_size=0):
self.size = heap_size
self.bottom_root = bottom_root
self.min_node = min_node
def merge_heaps(self, other):
"""
In-place merge of two binomial heaps.
Both of them become the resulting merged heap
"""
# Empty heaps corner cases
if other.size == 0:
return None
if self.size == 0:
self.size = other.size
self.bottom_root = other.bottom_root
self.min_node = other.min_node
return None
# Update size
self.size = self.size + other.size
# Update min.node
if self.min_node.val > other.min_node.val:
self.min_node = other.min_node
# Merge
# Order roots by left_subtree_size
combined_roots_list = []
i, j = self.bottom_root, other.bottom_root
while i or j:
if i and ((not j) or i.left_tree_size < j.left_tree_size):
combined_roots_list.append((i, True))
i = i.parent
else:
combined_roots_list.append((j, False))
j = j.parent
# Insert links between them
for i in range(len(combined_roots_list) - 1):
if combined_roots_list[i][1] != combined_roots_list[i + 1][1]:
combined_roots_list[i][0].parent = combined_roots_list[i + 1][0]
combined_roots_list[i + 1][0].left = combined_roots_list[i][0]
# Consecutively merge roots with same left_tree_size
i = combined_roots_list[0][0]
while i.parent:
if (
(i.left_tree_size == i.parent.left_tree_size) and (not i.parent.parent)
) or (
i.left_tree_size == i.parent.left_tree_size
and i.left_tree_size != i.parent.parent.left_tree_size
):
# Neighbouring Nodes
previous_node = i.left
next_node = i.parent.parent
# Merging trees
i = i.merge_trees(i.parent)
# Updating links
i.left = previous_node
i.parent = next_node
if previous_node:
previous_node.parent = i
if next_node:
next_node.left = i
else:
i = i.parent
# Updating self.bottom_root
while i.left:
i = i.left
self.bottom_root = i
# Update other
other.size = self.size
other.bottom_root = self.bottom_root
other.min_node = self.min_node
# Return the merged heap
return self
def insert(self, val):
"""
insert a value in the heap
"""
if self.size == 0:
self.bottom_root = Node(val)
self.size = 1
self.min_node = self.bottom_root
else:
# Create new node
new_node = Node(val)
# Update size
self.size += 1
# update min_node
if val < self.min_node.val:
self.min_node = new_node
# Put new_node as a bottom_root in heap
self.bottom_root.left = new_node
new_node.parent = self.bottom_root
self.bottom_root = new_node
# Consecutively merge roots with same left_tree_size
while (
self.bottom_root.parent
and self.bottom_root.left_tree_size
== self.bottom_root.parent.left_tree_size
):
# Next node
next_node = self.bottom_root.parent.parent
# Merge
self.bottom_root = self.bottom_root.merge_trees(self.bottom_root.parent)
# Update Links
self.bottom_root.parent = next_node
self.bottom_root.left = None
if next_node:
next_node.left = self.bottom_root
def peek(self):
"""
return min element without deleting it
"""
return self.min_node.val
def is_empty(self):
return self.size == 0
def delete_min(self):
"""
delete min element and return it
"""
# assert not self.isEmpty(), "Empty Heap"
# Save minimal value
min_value = self.min_node.val
# Last element in heap corner case
if self.size == 1:
# Update size
self.size = 0
# Update bottom root
self.bottom_root = None
# Update min_node
self.min_node = None
return min_value
# No right subtree corner case
# The structure of the tree implies that this should be the bottom root
# and there is at least one other root
if self.min_node.right is None:
# Update size
self.size -= 1
# Update bottom root
self.bottom_root = self.bottom_root.parent
self.bottom_root.left = None
# Update min_node
self.min_node = self.bottom_root
i = self.bottom_root.parent
while i:
if i.val < self.min_node.val:
self.min_node = i
i = i.parent
return min_value
# General case
# Find the BinomialHeap of the right subtree of min_node
bottom_of_new = self.min_node.right
bottom_of_new.parent = None
min_of_new = bottom_of_new
size_of_new = 1
# Size, min_node and bottom_root
while bottom_of_new.left:
size_of_new = size_of_new * 2 + 1
bottom_of_new = bottom_of_new.left
if bottom_of_new.val < min_of_new.val:
min_of_new = bottom_of_new
# Corner case of single root on top left path
if (not self.min_node.left) and (not self.min_node.parent):
self.size = size_of_new
self.bottom_root = bottom_of_new
self.min_node = min_of_new
# print("Single root, multiple nodes case")
return min_value
# Remaining cases
# Construct heap of right subtree
new_heap = BinomialHeap(
bottom_root=bottom_of_new, min_node=min_of_new, heap_size=size_of_new
)
# Update size
self.size = self.size - 1 - size_of_new
# Neighbour nodes
previous_node = self.min_node.left
next_node = self.min_node.parent
# Initialize new bottom_root and min_node
self.min_node = previous_node or next_node
self.bottom_root = next_node
# Update links of previous_node and search below for new min_node and
# bottom_root
if previous_node:
previous_node.parent = next_node
# Update bottom_root and search for min_node below
self.bottom_root = previous_node
self.min_node = previous_node
while self.bottom_root.left:
self.bottom_root = self.bottom_root.left
if self.bottom_root.val < self.min_node.val:
self.min_node = self.bottom_root
if next_node:
next_node.left = previous_node
# Search for new min_node above min_node
i = next_node
while i:
if i.val < self.min_node.val:
self.min_node = i
i = i.parent
# Merge heaps
self.merge_heaps(new_heap)
return int(min_value)
def pre_order(self):
"""
Returns the Pre-order representation of the heap including
values of nodes plus their level distance from the root;
Empty nodes appear as #
"""
# Find top root
top_root = self.bottom_root
while top_root.parent:
top_root = top_root.parent
# preorder
heap_pre_order = []
self.__traversal(top_root, heap_pre_order)
return heap_pre_order
def __traversal(self, curr_node, preorder, level=0):
"""
Pre-order traversal of nodes
"""
if curr_node:
preorder.append((curr_node.val, level))
self.__traversal(curr_node.left, preorder, level + 1)
self.__traversal(curr_node.right, preorder, level + 1)
else:
preorder.append(("#", level))
def __str__(self):
"""
Overwriting str for a pre-order print of nodes in heap;
Performance is poor, so use only for small examples
"""
if self.is_empty():
return ""
preorder_heap = self.pre_order()
return "\n".join(("-" * level + str(value)) for value, level in preorder_heap)
# Unit Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
|
BinomialHeap
|
python
|
doocs__leetcode
|
solution/0900-0999/0989.Add to Array-Form of Integer/Solution.py
|
{
"start": 0,
"end": 295
}
|
class ____:
def addToArrayForm(self, num: List[int], k: int) -> List[int]:
ans = []
i = len(num) - 1
while i >= 0 or k:
k += 0 if i < 0 else num[i]
k, x = divmod(k, 10)
ans.append(x)
i -= 1
return ans[::-1]
|
Solution
|
python
|
pytorch__pytorch
|
torch/ao/nn/quantized/modules/activation.py
|
{
"start": 9365,
"end": 11891
}
|
class ____(torch.nn.Module):
r"""This is the quantized equivalent of :class:`~torch.nn.PReLU`.
Args:
scale: quantization scale of the output tensor
zero_point: quantization zero point of the output tensor
num_parameters: number of parameters: 1, or the number of channels at input. Default: 1
"""
def __init__(
self, output_scale: float, output_zero_point: int, num_parameters: int = 1
) -> None:
super().__init__()
self.num_parameters = num_parameters
self.scale = output_scale
self.zero_point = output_zero_point
w = torch.randn(num_parameters, dtype=torch.float)
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.quint8)
self.set_weight(qw)
def set_weight(self, w: torch.Tensor) -> None:
self.weight = w
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.ops.quantized.prelu(
input, self.weight, self.scale, self.zero_point
)
def _get_name(self):
return "QuantizedPReLU"
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
scale, zero_point = mod.activation_post_process.calculate_qparams()
qprelu = cls(float(scale), int(zero_point), mod.num_parameters)
float_wt = mod.weight.float()
observer = mod.qconfig.weight()
observer(float_wt)
if observer.dtype != torch.quint8:
warn(
f"PReLU's weight observer should have dtype quint8 but got {observer.dtype}",
stacklevel=2,
)
wt_scale, wt_zp = observer.calculate_qparams()
qweight = torch.quantize_per_tensor(
float_wt, float(wt_scale), int(wt_zp), torch.quint8
)
qprelu.set_weight(qweight)
return qprelu
@classmethod
def from_reference(cls, mod, scale, zero_point):
qprelu = cls(float(scale), int(zero_point), mod.num_parameters)
float_wt = mod.weight.float()
observer = mod.qconfig.weight()
observer(float_wt)
if observer.dtype != torch.quint8:
warn(
f"PReLU's weight observer should have dtype quint8 but got {observer.dtype}",
stacklevel=2,
)
wt_scale, wt_zp = observer.calculate_qparams()
qweight = torch.quantize_per_tensor(
float_wt, float(wt_scale), int(wt_zp), torch.quint8
)
qprelu.set_weight(qweight)
return qprelu
|
PReLU
|
python
|
kamyu104__LeetCode-Solutions
|
Python/max-consecutive-ones-ii.py
|
{
"start": 29,
"end": 442
}
|
class ____(object):
def findMaxConsecutiveOnes(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result, prev, curr = 0, 0, 0
for n in nums:
if n == 0:
result = max(result, prev+curr+1)
prev, curr = curr, 0
else:
curr += 1
return min(max(result, prev+curr+1), len(nums))
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/3093. Longest Common Suffix Queries/3093.py
|
{
"start": 0,
"end": 155
}
|
class ____:
def __init__(self):
self.children: dict[str, TrieNode] = {}
self.isWord = False
self.length = math.inf
self.index = -1
|
TrieNode
|
python
|
donnemartin__system-design-primer
|
solutions/object_oriented_design/online_chat/online_chat.py
|
{
"start": 1788,
"end": 1904
}
|
class ____(Chat):
def add_user(self, user):
pass
def remove_user(self, user):
pass
|
GroupChat
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/cli_tests/workspace_tests/pending_repo/pending_repo.py
|
{
"start": 214,
"end": 927
}
|
class ____(CacheableAssetsDefinition):
def compute_cacheable_data(self):
return [
AssetsDefinitionCacheableData(
keys_by_input_name={}, keys_by_output_name={"result": dg.AssetKey(self.unique_id)}
)
]
def build_definitions(self, data):
@dg.op
def my_op():
return 1
return [
AssetsDefinition.from_op(
my_op,
keys_by_input_name=cd.keys_by_input_name,
keys_by_output_name=cd.keys_by_output_name,
)
for cd in data
]
@dg.repository
def pending_repo():
return [MyCacheableAssetsDefinition("abc")]
|
MyCacheableAssetsDefinition
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.