language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | marshmallow-code__apispec | tests/schemas.py | {
"start": 872,
"end": 1041
} | class ____(Schema):
count = fields.Int(dump_only=True, metadata={"x-count": 1})
count2 = fields.Int(dump_only=True, metadata={"x_count2": 2})
| PatternedObjectSchema |
python | ray-project__ray | python/ray/tune/analysis/experiment_analysis.py | {
"start": 1032,
"end": 27759
} | class ____:
"""Analyze results from a Ray Train/Tune experiment.
To use this class, the run must store the history of reported metrics
in log files (e.g., `result.json` and `progress.csv`).
This is the default behavior, unless default loggers are explicitly excluded
with the `TUNE_DISABLE_AUTO_CALLBACK_LOGGERS=1` environment variable.
Parameters:
experiment_checkpoint_path: Path to an `experiment_state.json` file,
or a directory that contains an `experiment_state.json` file.
default_metric: Default metric for comparing results. Can be
overwritten with the ``metric`` parameter in the respective
functions.
default_mode: Default mode for comparing results. Has to be one
of [min, max]. Can be overwritten with the ``mode`` parameter
in the respective functions.
trials: List of trials that can be accessed via `analysis.trials`.
"""
def __init__(
self,
experiment_checkpoint_path: Union[str, os.PathLike],
*,
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None,
trials: Optional[List[Trial]] = None,
default_metric: Optional[str] = None,
default_mode: Optional[str] = None,
):
self.default_metric = default_metric
if default_mode and default_mode not in ["min", "max"]:
raise ValueError("`default_mode` has to be None or one of [min, max]")
self.default_mode = default_mode
if self.default_metric is None and self.default_mode is not None:
# If only a mode was passed, use anonymous metric
self.default_metric = DEFAULT_METRIC
# Resolve the filesystem if not specified.
if storage_filesystem:
self._fs = storage_filesystem
else:
self._fs, experiment_checkpoint_path = get_fs_and_path(
experiment_checkpoint_path
)
# Find the json state file.
experiment_checkpoint_path = str(experiment_checkpoint_path)
if experiment_checkpoint_path.endswith(".json"):
self._experiment_fs_path = os.path.dirname(experiment_checkpoint_path)
self._experiment_json_fs_path = experiment_checkpoint_path
else:
self._experiment_fs_path = experiment_checkpoint_path
experiment_json_fs_path = _find_newest_experiment_checkpoint(
experiment_path=self._experiment_fs_path, fs=self._fs
)
if experiment_json_fs_path is None:
pattern = TuneController.CKPT_FILE_TMPL.format("*")
raise ValueError(
f"No experiment snapshot file of form '{pattern}' was found at: "
f"({self._fs.type_name}, {self._experiment_fs_path})\n"
"Please check if you specified the correct experiment path, "
"which should be a combination of the `storage_path` and `name` "
"specified in your run."
)
self._experiment_json_fs_path = experiment_json_fs_path
self.trials = trials or self._load_trials()
self._trial_dataframes = self._fetch_trial_dataframes()
self._configs = self.get_all_configs()
def _load_trials(self) -> List[Trial]:
with self._fs.open_input_stream(self._experiment_json_fs_path) as f:
experiment_state = json.loads(f.readall(), cls=TuneFunctionDecoder)
experiment_fs_path = Path(self._experiment_fs_path)
trials = []
trial_states = experiment_state["trial_data"]
for trial_json_state, trial_runtime_metadata in trial_states:
trial = Trial.from_json_state(trial_json_state, stub=True)
trial.restore_run_metadata(trial_runtime_metadata)
new_storage = copy.copy(trial.storage)
new_storage.storage_fs_path = experiment_fs_path.parent.as_posix()
new_storage.storage_filesystem = self._fs
new_storage.experiment_dir_name = experiment_fs_path.name
trial.set_storage(new_storage)
trials.append(trial)
return trials
def _fetch_trial_dataframe(self, trial: Trial) -> DataFrame:
force_dtype = {"trial_id": str} # Never convert trial_id to float.
# If there were no reported results, there will be no files into a DataFrame
if trial.last_result is None:
return DataFrame()
json_fs_path = Path(trial.storage.trial_fs_path, EXPR_RESULT_FILE).as_posix()
csv_fs_path = Path(trial.storage.trial_fs_path, EXPR_PROGRESS_FILE).as_posix()
# Prefer reading the JSON if it exists.
if _exists_at_fs_path(trial.storage.storage_filesystem, json_fs_path):
with trial.storage.storage_filesystem.open_input_stream(json_fs_path) as f:
content = f.readall().decode("utf-8").rstrip("\n")
if not content:
return DataFrame()
json_list = [json.loads(row) for row in content.split("\n")]
df = pd.json_normalize(json_list, sep="/")
# Fallback to reading the CSV.
elif _exists_at_fs_path(trial.storage.storage_filesystem, csv_fs_path):
with trial.storage.storage_filesystem.open_input_stream(csv_fs_path) as f:
csv_str = f.readall().decode("utf-8")
df = pd.read_csv(io.StringIO(csv_str), dtype=force_dtype)
else:
raise FileNotFoundError(
f"Could not fetch metrics for {trial}: both {EXPR_RESULT_FILE} and "
f"{EXPR_PROGRESS_FILE} were not found at {trial.storage.trial_fs_path}"
)
return df
def _fetch_trial_dataframes(self) -> Dict[str, DataFrame]:
"""Fetches trial dataframes from files.
Returns:
A dictionary mapping trial_id -> pd.DataFrame
"""
failures = []
trial_dfs = {}
for trial in self.trials:
try:
trial_dfs[trial.trial_id] = self._fetch_trial_dataframe(trial)
except Exception as e:
failures.append((trial, e))
trial_dfs[trial.trial_id] = DataFrame()
continue
if failures:
fail_str = "\n".join(
[f"- {trial}: {repr(error)}" for trial, error in failures]
)
logger.warning(
f"Failed to fetch metrics for {len(failures)} trial(s):\n{fail_str}"
)
return trial_dfs
def get_all_configs(self, prefix: bool = False) -> Dict[str, Dict]:
"""Returns all trial hyperparameter configurations.
Args:
prefix: If True, flattens the config dict
and prepends `config/`.
Returns:
Dict[str, Dict]: Mapping trial_id -> config dict
"""
return {
trial.trial_id: (
flatten_dict({CONFIG_PREFIX: trial.config}) if prefix else trial.config
)
for trial in self.trials
}
@property
def experiment_path(self) -> str:
"""Path pointing to the experiment directory on persistent storage.
This can point to a remote storage location (e.g. S3) or to a local
location (path on the head node)."""
return self._experiment_fs_path
@property
def best_trial(self) -> Trial:
"""Get the best trial of the experiment
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_trial(metric, mode, scope)` instead.
"""
if not self.default_metric or not self.default_mode:
raise ValueError(
"To fetch the `best_trial`, pass a `metric` and `mode` "
"parameter to `tune.run()`. Alternatively, use the "
"`get_best_trial(metric, mode)` method to set the metric "
"and mode explicitly."
)
return self.get_best_trial(self.default_metric, self.default_mode)
@property
def best_config(self) -> Dict:
"""Get the config of the best trial of the experiment
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_config(metric, mode, scope)` instead.
"""
if not self.default_metric or not self.default_mode:
raise ValueError(
"To fetch the `best_config`, pass a `metric` and `mode` "
"parameter to `tune.run()`. Alternatively, use the "
"`get_best_config(metric, mode)` method to set the metric "
"and mode explicitly."
)
return self.get_best_config(self.default_metric, self.default_mode)
@property
def best_checkpoint(self) -> Checkpoint:
"""Get the checkpoint path of the best trial of the experiment
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_checkpoint(trial, metric, mode)` instead.
Returns:
:class:`Checkpoint <ray.tune.Checkpoint>` object.
"""
if not self.default_metric or not self.default_mode:
raise ValueError(
"To fetch the `best_checkpoint`, pass a `metric` and `mode` "
"parameter to `tune.run()`. Alternatively, use the "
"`get_best_checkpoint(trial, metric, mode)` method to set the "
"metric and mode explicitly."
)
best_trial = self.best_trial
if not best_trial:
raise ValueError(
f"No best trial found. Please check if you specified the "
f"correct default metric ({self.default_metric}) and mode "
f"({self.default_mode})."
)
return self.get_best_checkpoint(
best_trial, self.default_metric, self.default_mode
)
@property
def best_dataframe(self) -> DataFrame:
"""Get the full result dataframe of the best trial of the experiment
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_trial(metric, mode)` and use it to look for the dataframe
in the `self.trial_dataframes` dict.
"""
if not self.default_metric or not self.default_mode:
raise ValueError(
"To fetch the `best_result`, pass a `metric` and `mode` "
"parameter to `tune.run()`."
)
return self.trial_dataframes[self.best_trial.trial_id]
@property
def best_result(self) -> Dict:
"""Get the last result of the best trial of the experiment
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_trial(metric, mode, scope).last_result` instead.
"""
if not self.default_metric or not self.default_mode:
raise ValueError(
"To fetch the `best_result`, pass a `metric` and `mode` "
"parameter to `tune.run()`. Alternatively, use "
"`get_best_trial(metric, mode).last_result` to set "
"the metric and mode explicitly and fetch the last result."
)
return self.best_trial.last_result
def _delimiter(self):
return os.environ.get("TUNE_RESULT_DELIM", "/")
@property
def best_result_df(self) -> DataFrame:
"""Get the best result of the experiment as a pandas dataframe.
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_trial(metric, mode, scope).last_result` instead.
"""
if not pd:
raise ValueError(
"`best_result_df` requires pandas. Install with "
"`pip install pandas`."
)
best_result = flatten_dict(self.best_result, delimiter=self._delimiter())
return pd.DataFrame.from_records([best_result], index="trial_id")
@property
def results(self) -> Dict[str, Dict]:
"""Get the last result of the all trials of the experiment"""
return {trial.trial_id: trial.last_result for trial in self.trials}
@property
def results_df(self) -> DataFrame:
"""Get all the last results as a pandas dataframe."""
if not pd:
raise ValueError(
"`results_df` requires pandas. Install with `pip install pandas`."
)
return pd.DataFrame.from_records(
[
flatten_dict(trial.last_result, delimiter=self._delimiter())
for trial in self.trials
],
index="trial_id",
)
@property
def trial_dataframes(self) -> Dict[str, DataFrame]:
"""List of all dataframes of the trials.
Each dataframe is indexed by iterations and contains reported
metrics.
"""
return self._trial_dataframes
def dataframe(
self, metric: Optional[str] = None, mode: Optional[str] = None
) -> DataFrame:
"""Returns a pandas.DataFrame object constructed from the trials.
This function will look through all observed results of each trial
and return the one corresponding to the passed ``metric`` and
``mode``: If ``mode=min``, it returns the result with the lowest
*ever* observed ``metric`` for this trial (this is not necessarily
the last)! For ``mode=max``, it's the highest, respectively. If
``metric=None`` or ``mode=None``, the last result will be returned.
Args:
metric: Key for trial info to order on. If None, uses last result.
mode: One of [None, "min", "max"].
Returns:
pd.DataFrame: Constructed from a result dict of each trial.
"""
# Do not validate metric/mode here or set from default metric/mode!
# Otherwise we will get confusing results as the lowest ever observed
# result may not be the last result.
if mode and mode not in ["min", "max"]:
raise ValueError("If set, `mode` has to be one of [min, max]")
if mode and not metric:
raise ValueError(
"If a `mode` is passed to `ExperimentAnalysis.dataframe(),"
" you'll also have to pass a `metric`!"
)
rows = self._retrieve_rows(metric=metric, mode=mode)
all_configs = self.get_all_configs(prefix=True)
for path, config in all_configs.items():
if path in rows:
rows[path].update(config)
rows[path].update(logdir=path)
return pd.DataFrame(list(rows.values()))
def _get_trial_checkpoints_with_metric(
self, trial: Trial, metric: Optional[str] = None
) -> List[Tuple[Checkpoint, Number]]:
"""Get all checkpoints and a specified metric of a trial.
Args:
trial: The log directory of a trial, or a trial instance.
metric: key for trial info to return, e.g. "mean_accuracy".
"training_iteration" is used by default if no value was
passed to ``self.default_metric``.
Returns:
List of [Checkpoint, metric] for all checkpoints of the trial.
"""
metric = metric or self.default_metric or TRAINING_ITERATION
best_checkpoint_results = (
trial.run_metadata.checkpoint_manager.best_checkpoint_results
)
best_checkpoints = [
(checkpoint_result.checkpoint, checkpoint_result.metrics)
for checkpoint_result in best_checkpoint_results
]
# Support nested metrics given as flattened strings, e.g.
# "info/learner/default_policy/policy_loss".
return [
(checkpoint, unflattened_lookup(metric, metrics))
for checkpoint, metrics in best_checkpoints
]
def get_best_checkpoint(
self,
trial: Trial,
metric: Optional[str] = None,
mode: Optional[str] = None,
) -> Optional[Checkpoint]:
"""Gets best persistent checkpoint path of provided trial.
Any checkpoints with an associated metric value of ``nan`` will be filtered out.
Args:
trial: The log directory of a trial, or a trial instance.
metric: key of trial info to return, e.g. "mean_accuracy".
"training_iteration" is used by default if no value was
passed to ``self.default_metric``.
mode: One of [min, max]. Defaults to ``self.default_mode``.
Returns:
A :class:`Checkpoint <ray.tune.Checkpoint>` object
"""
metric = metric or self.default_metric or TRAINING_ITERATION
mode = self._validate_mode(mode)
checkpoints_and_metrics = self._get_trial_checkpoints_with_metric(trial, metric)
# Filter out nan. Sorting nan values leads to undefined behavior.
checkpoints_and_metrics = list(
filter(lambda x: not is_nan(x[1]), checkpoints_and_metrics)
)
if not checkpoints_and_metrics:
logger.error(f"No checkpoints have been found for trial {trial}.")
return None
score_order_factor = -1 if mode == "min" else 1
best_checkpoint, _ = max(
checkpoints_and_metrics, key=lambda x: score_order_factor * x[1]
)
return best_checkpoint
def get_best_trial(
self,
metric: Optional[str] = None,
mode: Optional[str] = None,
scope: str = "last",
filter_nan_and_inf: bool = True,
) -> Optional[Trial]:
"""Retrieve the best trial object.
Compares all trials' scores on ``metric``.
If ``metric`` is not specified, ``self.default_metric`` will be used.
If `mode` is not specified, ``self.default_mode`` will be used.
These values are usually initialized by passing the ``metric`` and
``mode`` parameters to ``tune.run()``.
Args:
metric: Key for trial info to order on. Defaults to
``self.default_metric``.
mode: One of [min, max]. Defaults to ``self.default_mode``.
scope: One of [all, last, avg, last-5-avg, last-10-avg].
If `scope=last`, only look at each trial's final step for
`metric`, and compare across trials based on `mode=[min,max]`.
If `scope=avg`, consider the simple average over all steps
for `metric` and compare across trials based on
`mode=[min,max]`. If `scope=last-5-avg` or `scope=last-10-avg`,
consider the simple average over the last 5 or 10 steps for
`metric` and compare across trials based on `mode=[min,max]`.
If `scope=all`, find each trial's min/max score for `metric`
based on `mode`, and compare trials based on `mode=[min,max]`.
filter_nan_and_inf: If True (default), NaN or infinite
values are disregarded and these trials are never selected as
the best trial.
Returns:
The best trial for the provided metric. If no trials contain the provided
metric, or if the value for the metric is NaN for all trials,
then returns None.
"""
if len(self.trials) == 1:
return self.trials[0]
metric = self._validate_metric(metric)
mode = self._validate_mode(mode)
if scope not in ["all", "last", "avg", "last-5-avg", "last-10-avg"]:
raise ValueError(
"ExperimentAnalysis: attempting to get best trial for "
'metric {} for scope {} not in ["all", "last", "avg", '
'"last-5-avg", "last-10-avg"]. '
"If you didn't pass a `metric` parameter to `tune.run()`, "
"you have to pass one when fetching the best trial.".format(
metric, scope
)
)
best_trial = None
best_metric_score = None
for trial in self.trials:
if metric not in trial.metric_analysis:
continue
if scope in ["last", "avg", "last-5-avg", "last-10-avg"]:
metric_score = trial.metric_analysis[metric][scope]
else:
metric_score = trial.metric_analysis[metric][mode]
if filter_nan_and_inf and is_nan_or_inf(metric_score):
continue
if best_metric_score is None:
best_metric_score = metric_score
best_trial = trial
continue
if (mode == "max") and (best_metric_score < metric_score):
best_metric_score = metric_score
best_trial = trial
elif (mode == "min") and (best_metric_score > metric_score):
best_metric_score = metric_score
best_trial = trial
if not best_trial:
logger.warning(
"Could not find best trial. Did you pass the correct `metric` "
"parameter?"
)
return best_trial
def get_best_config(
self,
metric: Optional[str] = None,
mode: Optional[str] = None,
scope: str = "last",
) -> Optional[Dict]:
"""Retrieve the best config corresponding to the trial.
Compares all trials' scores on `metric`.
If ``metric`` is not specified, ``self.default_metric`` will be used.
If `mode` is not specified, ``self.default_mode`` will be used.
These values are usually initialized by passing the ``metric`` and
``mode`` parameters to ``tune.run()``.
Args:
metric: Key for trial info to order on. Defaults to
``self.default_metric``.
mode: One of [min, max]. Defaults to ``self.default_mode``.
scope: One of [all, last, avg, last-5-avg, last-10-avg].
If `scope=last`, only look at each trial's final step for
`metric`, and compare across trials based on `mode=[min,max]`.
If `scope=avg`, consider the simple average over all steps
for `metric` and compare across trials based on
`mode=[min,max]`. If `scope=last-5-avg` or `scope=last-10-avg`,
consider the simple average over the last 5 or 10 steps for
`metric` and compare across trials based on `mode=[min,max]`.
If `scope=all`, find each trial's min/max score for `metric`
based on `mode`, and compare trials based on `mode=[min,max]`.
"""
best_trial = self.get_best_trial(metric, mode, scope)
return best_trial.config if best_trial else None
def get_last_checkpoint(
self, trial=None, metric="training_iteration", mode="max"
) -> Optional[Checkpoint]:
"""Gets the last checkpoint of the provided trial,
i.e., with the highest "training_iteration".
If no trial is specified, it loads the best trial according to the
provided metric and mode (defaults to max. training iteration).
Args:
trial: If None, load the best trial automatically.
metric: If no trial is specified, use this metric to identify
the best trial and load the last checkpoint from this trial.
mode: If no trial is specified, use the metric and this mode
to identify the best trial and load the last checkpoint from it.
Returns:
Path for last checkpoint of trial
"""
trial = trial or self.get_best_trial(metric, mode)
return self.get_best_checkpoint(trial, TRAINING_ITERATION, "max")
def _validate_metric(self, metric: str) -> str:
if not metric and not self.default_metric:
raise ValueError(
"No `metric` has been passed and `default_metric` has "
"not been set. Please specify the `metric` parameter."
)
return metric or self.default_metric
def _validate_mode(self, mode: str) -> str:
if not mode and not self.default_mode:
raise ValueError(
"No `mode` has been passed and `default_mode` has "
"not been set. Please specify the `mode` parameter."
)
if mode and mode not in ["min", "max"]:
raise ValueError("If set, `mode` has to be one of [min, max]")
return mode or self.default_mode
def _retrieve_rows(
self, metric: Optional[str] = None, mode: Optional[str] = None
) -> Dict[str, Any]:
assert mode is None or mode in ["max", "min"]
assert not mode or metric
rows = {}
for path, df in self.trial_dataframes.items():
if df.empty:
continue
if metric not in df:
idx = -1
elif mode == "max":
idx = df[metric].idxmax()
elif mode == "min":
idx = df[metric].idxmin()
else:
idx = -1
try:
rows[path] = df.iloc[idx].to_dict()
except TypeError:
# idx is nan
logger.warning(
"Warning: Non-numerical value(s) encountered for {}".format(path)
)
return rows
def __getstate__(self) -> Dict[str, Any]:
"""Ensure that trials are marked as stubs when pickling,
so that they can be loaded later without the trainable
being registered.
"""
state = self.__dict__.copy()
def make_stub_if_needed(trial: Trial) -> Trial:
if trial.stub:
return trial
trial_copy = Trial(trial.trainable_name, stub=True)
trial_copy.__setstate__(trial.__getstate__())
return trial_copy
state["trials"] = [make_stub_if_needed(t) for t in state["trials"]]
return state
| ExperimentAnalysis |
python | catalyst-team__catalyst | catalyst/metrics/_metric.py | {
"start": 1938,
"end": 3056
} | class ____(IMetric):
"""Interface for all batch-based Metrics."""
def __init__(
self, compute_on_call: bool = True, prefix: str = None, suffix: str = None
):
"""Init"""
super().__init__(compute_on_call=compute_on_call)
self.prefix = prefix or ""
self.suffix = suffix or ""
@abstractmethod
def update_key_value(self, *args, **kwargs) -> Dict[str, float]:
"""Updates the metric based with new input.
By default, this is called at the end of each loader
(`on_loader_end` event).
Args:
*args: some args
**kwargs: some kwargs
Returns:
Dict: computed value in key-value format. # noqa: DAR202
"""
pass
@abstractmethod
def compute_key_value(self) -> Dict[str, float]:
"""Computes the metric based on it's accumulated state.
By default, this is called at the end of each loader
(`on_loader_end` event).
Returns:
Dict: computed value in key-value format. # noqa: DAR202
"""
pass
| ICallbackBatchMetric |
python | huggingface__transformers | src/transformers/models/glm/modeling_glm.py | {
"start": 22179,
"end": 22421
} | class ____(GenericForTokenClassification, GlmPreTrainedModel):
pass
__all__ = [
"GlmPreTrainedModel",
"GlmModel",
"GlmForCausalLM",
"GlmForSequenceClassification",
"GlmForTokenClassification",
]
| GlmForTokenClassification |
python | facebook__pyre-check | client/tests/coverage_data_tests.py | {
"start": 37663,
"end": 41757
} | class ____(testslide.TestCase):
def assert_counts(
self,
source: str,
default_strict: bool,
mode: ModuleMode,
explicit_comment_line: Optional[int],
is_generated: bool = False,
is_test: bool = False,
path: Optional[str] = None,
) -> None:
source_module = parse_code(source)
if path is None:
path = "/a/b/c.py"
result = coverage_data.collect_mode(source_module, default_strict, Path(path))
self.assertEqual(mode, result.mode)
self.assertEqual(explicit_comment_line, result.explicit_comment_line)
self.assertEqual(is_generated, result.is_generated)
self.assertEqual(is_test, result.is_test)
def test_strict_files(self) -> None:
generated_string = "generated"
self.assert_counts(
"""
# pyre-unsafe
def foo():
return 1
""",
default_strict=True,
mode=ModuleMode.UNSAFE,
explicit_comment_line=2,
)
self.assert_counts(
"""
# pyre-strict
def foo():
return 1
""",
default_strict=False,
mode=ModuleMode.STRICT,
explicit_comment_line=2,
)
self.assert_counts(
"""
def foo():
return 1
""",
default_strict=False,
mode=ModuleMode.UNSAFE,
explicit_comment_line=None,
)
self.assert_counts(
"""
def foo():
return 1
""",
default_strict=True,
mode=ModuleMode.STRICT,
explicit_comment_line=None,
)
self.assert_counts(
"""
# pyre-ignore-all-errors
def foo():
return 1
""",
default_strict=True,
mode=ModuleMode.IGNORE_ALL,
explicit_comment_line=2,
)
self.assert_counts(
"""
def foo(x: str) -> int:
return x
""",
default_strict=False,
mode=ModuleMode.UNSAFE,
explicit_comment_line=None,
)
self.assert_counts(
"""
# pyre-strict
def foo(x: str) -> int:
return x
""",
default_strict=False,
mode=ModuleMode.STRICT,
explicit_comment_line=2,
)
self.assert_counts(
"""
# pyre-ignore-all-errors[56]
def foo(x: str) -> int:
return x
""",
default_strict=True,
mode=ModuleMode.STRICT,
explicit_comment_line=None,
)
self.assert_counts(
f"""
# @{generated_string}
def foo(x: str) -> int:
return x
""",
default_strict=True,
mode=ModuleMode.STRICT,
explicit_comment_line=None,
is_generated=True,
)
self.assert_counts(
"""
def foo(x: str) -> int:
return x
""",
default_strict=True,
mode=ModuleMode.STRICT,
explicit_comment_line=None,
is_test=True,
path="path/tests/example_test.py",
)
self.assert_counts(
"""
def foo(x: str) -> int:
return x
""",
default_strict=True,
mode=ModuleMode.STRICT,
explicit_comment_line=None,
is_test=True,
path="path/tests/example_tests.py",
)
self.assert_counts(
"""
def foo(x: str) -> int:
return x
""",
default_strict=True,
mode=ModuleMode.STRICT,
explicit_comment_line=None,
is_test=False,
path="path/test_example.py",
)
| ModuleModecollectorTest |
python | ansible__ansible | test/units/_internal/templating/test_templar.py | {
"start": 7553,
"end": 11487
} | class ____(BaseTemplar, unittest.TestCase):
def test_templar_simple(self):
templar = self.templar
# test some basic templating
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{foo}}")), "bar")
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{foo}}\n")), "bar\n")
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{foo}}\n"), options=TemplateOptions(preserve_trailing_newlines=True)), "bar\n")
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{foo}}\n"), options=TemplateOptions(preserve_trailing_newlines=False)), "bar")
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{bam}}")), "bar")
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{num}}")), 1)
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{var_true}}")), True)
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{var_false}}")), False)
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{var_dict}}")), dict(a="b"))
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{bad_dict}}")), "{a='b'")
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{var_list}}")), [1])
# force errors
self.assertRaises(AnsibleUndefinedVariable, templar.template, TrustedAsTemplate().tag("{{bad_var}}"))
self.assertRaises(AnsibleUndefinedVariable, templar.template, TrustedAsTemplate().tag("{{lookup('file', bad_var)}}"))
self.assertRaises(AnsibleError, templar.template, TrustedAsTemplate().tag("{{lookup('bad_lookup')}}"))
self.assertRaises(AnsibleError, templar.template, TrustedAsTemplate().tag("{{recursive}}"))
self.assertRaises(AnsibleUndefinedVariable, templar.template, TrustedAsTemplate().tag("{{foo-bar}}"))
result = templar.extend(marker_behavior=ReplacingMarkerBehavior()).template(TrustedAsTemplate().tag("{{bad_var}}"))
assert "<< error 1 - 'bad_var' is undefined >>" in result
# test setting available_variables
templar.available_variables = dict(foo="bam")
self.assertEqual(templar.template(TrustedAsTemplate().tag("{{foo}}")), "bam")
def test_templar_escape_backslashes(self):
# Rule of thumb: If escape backslashes is True you should end up with
# the same number of backslashes as when you started.
self.assertEqual(self.templar.template(TrustedAsTemplate().tag("\t{{foo}}"), options=TemplateOptions(escape_backslashes=True)), "\tbar")
self.assertEqual(self.templar.template(TrustedAsTemplate().tag("\t{{foo}}"), options=TemplateOptions(escape_backslashes=False)), "\tbar")
self.assertEqual(self.templar.template(TrustedAsTemplate().tag("\\{{foo}}"), options=TemplateOptions(escape_backslashes=True)), "\\bar")
self.assertEqual(self.templar.template(TrustedAsTemplate().tag("\\{{foo}}"), options=TemplateOptions(escape_backslashes=False)), "\\bar")
self.assertEqual(self.templar.template(TrustedAsTemplate().tag("\\{{foo + '\t' }}"), options=TemplateOptions(escape_backslashes=True)), "\\bar\t")
self.assertEqual(self.templar.template(TrustedAsTemplate().tag("\\{{foo + '\t' }}"), options=TemplateOptions(escape_backslashes=False)), "\\bar\t")
self.assertEqual(self.templar.template(TrustedAsTemplate().tag("\\{{foo + '\\t' }}"), options=TemplateOptions(escape_backslashes=True)), "\\bar\\t")
self.assertEqual(self.templar.template(TrustedAsTemplate().tag("\\{{foo + '\\t' }}"), options=TemplateOptions(escape_backslashes=False)), "\\bar\t")
self.assertEqual(self.templar.template(TrustedAsTemplate().tag("\\{{foo + '\\\\t' }}"), options=TemplateOptions(escape_backslashes=True)), "\\bar\\\\t")
self.assertEqual(self.templar.template(TrustedAsTemplate().tag("\\{{foo + '\\\\t' }}"), options=TemplateOptions(escape_backslashes=False)), "\\bar\\t")
| TestTemplarMisc |
python | tiangolo__fastapi | tests/test_pydantic_v1_v2_01.py | {
"start": 375,
"end": 17792
} | class ____(BaseModel):
title: str
size: int
description: Union[str, None] = None
sub: SubItem
multi: List[SubItem] = []
app = FastAPI()
@app.post("/simple-model")
def handle_simple_model(data: SubItem) -> SubItem:
return data
@app.post("/simple-model-filter", response_model=SubItem)
def handle_simple_model_filter(data: SubItem) -> Any:
extended_data = data.dict()
extended_data.update({"secret_price": 42})
return extended_data
@app.post("/item")
def handle_item(data: Item) -> Item:
return data
@app.post("/item-filter", response_model=Item)
def handle_item_filter(data: Item) -> Any:
extended_data = data.dict()
extended_data.update({"secret_data": "classified", "internal_id": 12345})
extended_data["sub"].update({"internal_id": 67890})
return extended_data
client = TestClient(app)
def test_old_simple_model():
response = client.post(
"/simple-model",
json={"name": "Foo"},
)
assert response.status_code == 200, response.text
assert response.json() == {"name": "Foo"}
def test_old_simple_model_validation_error():
response = client.post(
"/simple-model",
json={"wrong_name": "Foo"},
)
assert response.status_code == 422, response.text
assert response.json() == snapshot(
{
"detail": [
{
"loc": ["body", "name"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
)
def test_old_simple_model_filter():
response = client.post(
"/simple-model-filter",
json={"name": "Foo"},
)
assert response.status_code == 200, response.text
assert response.json() == {"name": "Foo"}
def test_item_model():
response = client.post(
"/item",
json={
"title": "Test Item",
"size": 100,
"description": "This is a test item",
"sub": {"name": "SubItem1"},
"multi": [{"name": "Multi1"}, {"name": "Multi2"}],
},
)
assert response.status_code == 200, response.text
assert response.json() == {
"title": "Test Item",
"size": 100,
"description": "This is a test item",
"sub": {"name": "SubItem1"},
"multi": [{"name": "Multi1"}, {"name": "Multi2"}],
}
def test_item_model_minimal():
response = client.post(
"/item",
json={"title": "Minimal Item", "size": 50, "sub": {"name": "SubMin"}},
)
assert response.status_code == 200, response.text
assert response.json() == {
"title": "Minimal Item",
"size": 50,
"description": None,
"sub": {"name": "SubMin"},
"multi": [],
}
def test_item_model_validation_errors():
response = client.post(
"/item",
json={"title": "Missing fields"},
)
assert response.status_code == 422, response.text
error_detail = response.json()["detail"]
assert len(error_detail) == 2
assert {
"loc": ["body", "size"],
"msg": "field required",
"type": "value_error.missing",
} in error_detail
assert {
"loc": ["body", "sub"],
"msg": "field required",
"type": "value_error.missing",
} in error_detail
def test_item_model_nested_validation_error():
response = client.post(
"/item",
json={"title": "Test Item", "size": 100, "sub": {"wrong_field": "test"}},
)
assert response.status_code == 422, response.text
assert response.json() == snapshot(
{
"detail": [
{
"loc": ["body", "sub", "name"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
)
def test_item_model_invalid_type():
response = client.post(
"/item",
json={"title": "Test Item", "size": "not_a_number", "sub": {"name": "SubItem"}},
)
assert response.status_code == 422, response.text
assert response.json() == snapshot(
{
"detail": [
{
"loc": ["body", "size"],
"msg": "value is not a valid integer",
"type": "type_error.integer",
}
]
}
)
def test_item_filter():
response = client.post(
"/item-filter",
json={
"title": "Filtered Item",
"size": 200,
"description": "Test filtering",
"sub": {"name": "SubFiltered"},
"multi": [],
},
)
assert response.status_code == 200, response.text
result = response.json()
assert result == {
"title": "Filtered Item",
"size": 200,
"description": "Test filtering",
"sub": {"name": "SubFiltered"},
"multi": [],
}
assert "secret_data" not in result
assert "internal_id" not in result
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == snapshot(
{
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/simple-model": {
"post": {
"summary": "Handle Simple Model",
"operationId": "handle_simple_model_simple_model_post",
"requestBody": {
"content": {
"application/json": {
"schema": pydantic_snapshot(
v2=snapshot(
{
"allOf": [
{
"$ref": "#/components/schemas/SubItem"
}
],
"title": "Data",
}
),
v1=snapshot(
{"$ref": "#/components/schemas/SubItem"}
),
)
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/SubItem"
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/simple-model-filter": {
"post": {
"summary": "Handle Simple Model Filter",
"operationId": "handle_simple_model_filter_simple_model_filter_post",
"requestBody": {
"content": {
"application/json": {
"schema": pydantic_snapshot(
v2=snapshot(
{
"allOf": [
{
"$ref": "#/components/schemas/SubItem"
}
],
"title": "Data",
}
),
v1=snapshot(
{"$ref": "#/components/schemas/SubItem"}
),
)
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/SubItem"
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/item": {
"post": {
"summary": "Handle Item",
"operationId": "handle_item_item_post",
"requestBody": {
"content": {
"application/json": {
"schema": pydantic_snapshot(
v2=snapshot(
{
"allOf": [
{
"$ref": "#/components/schemas/Item"
}
],
"title": "Data",
}
),
v1=snapshot(
{"$ref": "#/components/schemas/Item"}
),
)
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/item-filter": {
"post": {
"summary": "Handle Item Filter",
"operationId": "handle_item_filter_item_filter_post",
"requestBody": {
"content": {
"application/json": {
"schema": pydantic_snapshot(
v2=snapshot(
{
"allOf": [
{
"$ref": "#/components/schemas/Item"
}
],
"title": "Data",
}
),
v1=snapshot(
{"$ref": "#/components/schemas/Item"}
),
)
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"HTTPValidationError": {
"properties": {
"detail": {
"items": {
"$ref": "#/components/schemas/ValidationError"
},
"type": "array",
"title": "Detail",
}
},
"type": "object",
"title": "HTTPValidationError",
},
"Item": {
"properties": {
"title": {"type": "string", "title": "Title"},
"size": {"type": "integer", "title": "Size"},
"description": {"type": "string", "title": "Description"},
"sub": {"$ref": "#/components/schemas/SubItem"},
"multi": {
"items": {"$ref": "#/components/schemas/SubItem"},
"type": "array",
"title": "Multi",
"default": [],
},
},
"type": "object",
"required": ["title", "size", "sub"],
"title": "Item",
},
"SubItem": {
"properties": {"name": {"type": "string", "title": "Name"}},
"type": "object",
"required": ["name"],
"title": "SubItem",
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
"type": "array",
"title": "Location",
},
"msg": {"type": "string", "title": "Message"},
"type": {"type": "string", "title": "Error Type"},
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError",
},
}
},
}
)
| Item |
python | sqlalchemy__sqlalchemy | test/engine/test_parseconnect.py | {
"start": 37690,
"end": 39513
} | class ____(fixtures.TestBase):
@testing.requires.sqlite
@testing.combinations(True, False, None)
def test_is_async_to_create_engine(self, is_async):
def get_dialect_cls(url):
url = url.set(drivername="sqlite")
return url.get_dialect()
global MockDialectGetDialect
MockDialectGetDialect = Mock()
MockDialectGetDialect.get_dialect_cls.side_effect = get_dialect_cls
MockDialectGetDialect.get_async_dialect_cls.side_effect = (
get_dialect_cls
)
registry.register("mockdialect", __name__, "MockDialectGetDialect")
from sqlalchemy.dialects import sqlite
kw = {}
if is_async is not None:
kw["_is_async"] = is_async
e = create_engine("mockdialect://", **kw)
eq_(e.dialect.name, "sqlite")
assert isinstance(e.dialect, sqlite.dialect)
if is_async:
eq_(
MockDialectGetDialect.mock_calls,
[
call.get_async_dialect_cls(url.make_url("mockdialect://")),
call.engine_created(e),
],
)
else:
eq_(
MockDialectGetDialect.mock_calls,
[
call.get_dialect_cls(url.make_url("mockdialect://")),
call.engine_created(e),
],
)
MockDialectGetDialect.reset_mock()
u = url.make_url("mockdialect://")
u.get_dialect(**kw)
if is_async:
eq_(
MockDialectGetDialect.mock_calls,
[call.get_async_dialect_cls(u)],
)
else:
eq_(
MockDialectGetDialect.mock_calls,
[call.get_dialect_cls(u)],
)
| TestGetDialect |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/random/candidate_sampler_ops_test.py | {
"start": 1132,
"end": 6236
} | class ____(test.TestCase):
BATCH_SIZE = 3
NUM_TRUE = 2
RANGE = 5
NUM_SAMPLED = RANGE
TRUE_LABELS = [[1, 2], [0, 4], [3, 3]]
@test_util.run_deprecated_v1
def testTrueCandidates(self):
with self.cached_session() as sess:
indices = constant_op.constant([0, 0, 1, 1, 2, 2])
true_candidates_vec = constant_op.constant([1, 2, 0, 4, 3, 3])
true_candidates_matrix = array_ops.reshape(
true_candidates_vec, [self.BATCH_SIZE, self.NUM_TRUE])
indices_val, true_candidates_val = sess.run(
[indices, true_candidates_matrix])
self.assertAllEqual(indices_val, [0, 0, 1, 1, 2, 2])
self.assertAllEqual(true_candidates_val, self.TRUE_LABELS)
def testSampledCandidates(self):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
result = self.evaluate(sampled_candidates)
expected_ids = [0, 1, 2, 3, 4]
self.assertAllEqual(result, expected_ids)
self.assertEqual(sampled_candidates.get_shape(), [self.NUM_SAMPLED])
def testTrueLogExpectedCount(self):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
_, true_expected_count, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
true_log_expected_count = math_ops.log(true_expected_count)
result = self.evaluate(true_log_expected_count)
self.assertAllEqual(result, [[0.0] * self.NUM_TRUE] * self.BATCH_SIZE)
self.assertEqual(true_expected_count.get_shape(),
[self.BATCH_SIZE, self.NUM_TRUE])
self.assertEqual(true_log_expected_count.get_shape(),
[self.BATCH_SIZE, self.NUM_TRUE])
def testSampledLogExpectedCount(self):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
_, _, sampled_expected_count = candidate_sampling_ops.all_candidate_sampler( # pylint: disable=line-too-long
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
sampled_log_expected_count = math_ops.log(sampled_expected_count)
result = self.evaluate(sampled_log_expected_count)
self.assertAllEqual(result, [0.0] * self.NUM_SAMPLED)
self.assertEqual(sampled_expected_count.get_shape(), [self.NUM_SAMPLED])
self.assertEqual(sampled_log_expected_count.get_shape(), [self.NUM_SAMPLED])
def testAccidentalHits(self):
with self.cached_session() as sess:
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
accidental_hits = candidate_sampling_ops.compute_accidental_hits(
true_classes, sampled_candidates, self.NUM_TRUE)
indices, ids, weights = self.evaluate(accidental_hits)
self.assertEqual(1, accidental_hits[0].get_shape().ndims)
self.assertEqual(1, accidental_hits[1].get_shape().ndims)
self.assertEqual(1, accidental_hits[2].get_shape().ndims)
for index, id_, weight in zip(indices, ids, weights):
self.assertTrue(id_ in self.TRUE_LABELS[index])
self.assertLess(weight, -1.0e37)
@test_util.run_deprecated_v1
def testSeed(self):
def draw(seed):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled, _, _ = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True, 5, seed=seed)
return self.evaluate(sampled)
# Non-zero seed. Repeatable.
for seed in [1, 12, 123, 1234]:
self.assertAllEqual(draw(seed), draw(seed))
# Seed=0 means random seeds.
num_same = 0
for _ in range(10):
if np.allclose(draw(None), draw(None)):
num_same += 1
# Accounts for the fact that the same random seed may be picked
# twice very rarely.
self.assertLessEqual(num_same, 2)
def testCandidateOutOfRange(self):
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"out of range"):
self.evaluate(
candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=[[0, 10]],
num_true=2,
num_sampled=1000,
unique=False,
range_max=2))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"out of range"):
self.evaluate(
candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=[[0, -10]],
num_true=2,
num_sampled=1000,
unique=False,
range_max=2))
if __name__ == "__main__":
test.main()
| RangeSamplerOpsTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_excel2003_style02.py | {
"start": 315,
"end": 1287
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("excel2003_style02.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename, {"excel2003_style": True})
worksheet = workbook.add_worksheet()
worksheet.set_paper(9)
bold = workbook.add_format({"bold": 1})
worksheet.write("A1", "Foo")
worksheet.write("A2", "Bar", bold)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | scipy__scipy | scipy/integrate/_ivp/ivp.py | {
"start": 594,
"end": 31824
} | class ____(OptimizeResult):
pass
def prepare_events(events):
"""Standardize event functions and extract attributes."""
if callable(events):
events = (events,)
max_events = np.empty(len(events))
direction = np.empty(len(events))
for i, event in enumerate(events):
terminal = getattr(event, 'terminal', None)
direction[i] = getattr(event, 'direction', 0)
message = ('The `terminal` attribute of each event '
'must be a boolean or positive integer.')
if terminal is None or terminal == 0:
max_events[i] = np.inf
elif int(terminal) == terminal and terminal > 0:
max_events[i] = terminal
else:
raise ValueError(message)
return events, max_events, direction
def solve_event_equation(event, sol, t_old, t):
"""Solve an equation corresponding to an ODE event.
The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an
ODE solver using some sort of interpolation. It is solved by
`scipy.optimize.brentq` with xtol=atol=4*EPS.
Parameters
----------
event : callable
Function ``event(t, y)``.
sol : callable
Function ``sol(t)`` which evaluates an ODE solution between `t_old`
and `t`.
t_old, t : float
Previous and new values of time. They will be used as a bracketing
interval.
Returns
-------
root : float
Found solution.
"""
from scipy.optimize import brentq
return brentq(lambda t: event(t, sol(t)), t_old, t,
xtol=4 * EPS, rtol=4 * EPS)
def handle_events(sol, events, active_events, event_count, max_events,
t_old, t):
"""Helper function to handle events.
Parameters
----------
sol : DenseOutput
Function ``sol(t)`` which evaluates an ODE solution between `t_old`
and `t`.
events : list of callables, length n_events
Event functions with signatures ``event(t, y)``.
active_events : ndarray
Indices of events which occurred.
event_count : ndarray
Current number of occurrences for each event.
max_events : ndarray, shape (n_events,)
Number of occurrences allowed for each event before integration
termination is issued.
t_old, t : float
Previous and new values of time.
Returns
-------
root_indices : ndarray
Indices of events which take zero between `t_old` and `t` and before
a possible termination.
roots : ndarray
Values of t at which events occurred.
terminate : bool
Whether a terminal event occurred.
"""
roots = [solve_event_equation(events[event_index], sol, t_old, t)
for event_index in active_events]
roots = np.asarray(roots)
if np.any(event_count[active_events] >= max_events[active_events]):
if t > t_old:
order = np.argsort(roots)
else:
order = np.argsort(-roots)
active_events = active_events[order]
roots = roots[order]
t = np.nonzero(event_count[active_events]
>= max_events[active_events])[0][0]
active_events = active_events[:t + 1]
roots = roots[:t + 1]
terminate = True
else:
terminate = False
return active_events, roots, terminate
def find_active_events(g, g_new, direction):
"""Find which event occurred during an integration step.
Parameters
----------
g, g_new : array_like, shape (n_events,)
Values of event functions at a current and next points.
direction : ndarray, shape (n_events,)
Event "direction" according to the definition in `solve_ivp`.
Returns
-------
active_events : ndarray
Indices of events which occurred during the step.
"""
g, g_new = np.asarray(g), np.asarray(g_new)
up = (g <= 0) & (g_new >= 0)
down = (g >= 0) & (g_new <= 0)
either = up | down
mask = (up & (direction > 0) |
down & (direction < 0) |
either & (direction == 0))
return np.nonzero(mask)[0]
@xp_capabilities(np_only=True)
def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False,
events=None, vectorized=False, args=None, **options):
"""Solve an initial value problem for a system of ODEs.
This function numerically integrates a system of ordinary differential
equations given an initial value::
dy / dt = f(t, y)
y(t0) = y0
Here t is a 1-D independent variable (time), y(t) is an
N-D vector-valued function (state), and an N-D
vector-valued function f(t, y) determines the differential equations.
The goal is to find y(t) approximately satisfying the differential
equations, given an initial value y(t0)=y0.
Some of the solvers support integration in the complex domain, but note
that for stiff ODE solvers, the right-hand side must be
complex-differentiable (satisfy Cauchy-Riemann equations [11]_).
To solve a problem in the complex domain, pass y0 with a complex data type.
Another option always available is to rewrite your problem for real and
imaginary parts separately.
Parameters
----------
fun : callable
Right-hand side of the system: the time derivative of the state ``y``
at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. Additional
arguments need to be passed if ``args`` is used (see documentation of
``args`` argument). ``fun`` must return an array of the same shape as
``y``. See `vectorized` for more information.
t_span : 2-member sequence
Interval of integration (t0, tf). The solver starts with t=t0 and
integrates until it reaches t=tf. Both t0 and tf must be floats
or values interpretable by the float conversion function.
y0 : array_like, shape (n,)
Initial state. For problems in the complex domain, pass `y0` with a
complex data type (even if the initial value is purely real).
method : string or `OdeSolver`, optional
Integration method to use:
* 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_.
The error is controlled assuming accuracy of the fourth-order
method, but steps are taken using the fifth-order accurate
formula (local extrapolation is done). A quartic interpolation
polynomial is used for the dense output [2]_. Can be applied in
the complex domain.
* 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error
is controlled assuming accuracy of the second-order method, but
steps are taken using the third-order accurate formula (local
extrapolation is done). A cubic Hermite polynomial is used for the
dense output. Can be applied in the complex domain.
* 'DOP853': Explicit Runge-Kutta method of order 8 [13]_.
Python implementation of the "DOP853" algorithm originally
written in Fortran [14]_. A 7-th order interpolation polynomial
accurate to 7-th order is used for the dense output.
Can be applied in the complex domain.
* 'Radau': Implicit Runge-Kutta method of the Radau IIA family of
order 5 [4]_. The error is controlled with a third-order accurate
embedded formula. A cubic polynomial which satisfies the
collocation conditions is used for the dense output.
* 'BDF': Implicit multi-step variable-order (1 to 5) method based
on a backward differentiation formula for the derivative
approximation [5]_. The implementation follows the one described
in [6]_. A quasi-constant step scheme is used and accuracy is
enhanced using the NDF modification. Can be applied in the
complex domain.
* 'LSODA': Adams/BDF method with automatic stiffness detection and
switching [7]_, [8]_. This is a wrapper of the Fortran solver
from ODEPACK.
Explicit Runge-Kutta methods ('RK23', 'RK45', 'DOP853') should be used
for non-stiff problems and implicit methods ('Radau', 'BDF') for
stiff problems [9]_. Among Runge-Kutta methods, 'DOP853' is recommended
for solving with high precision (low values of `rtol` and `atol`).
If not sure, first try to run 'RK45'. If it makes unusually many
iterations, diverges, or fails, your problem is likely to be stiff and
you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal
choice, but it might be somewhat less convenient to work with as it
wraps old Fortran code.
You can also pass an arbitrary class derived from `OdeSolver` which
implements the solver.
t_eval : array_like or None, optional
Times at which to store the computed solution, must be sorted and lie
within `t_span`. If None (default), use points selected by the solver.
dense_output : bool, optional
Whether to compute a continuous solution. Default is False.
events : callable, or list of callables, optional
Events to track. If None (default), no events will be tracked.
Each event occurs at the zeros of a continuous function of time and
state. Each function must have the signature ``event(t, y)`` where
additional argument have to be passed if ``args`` is used (see
documentation of ``args`` argument). Each function must return a
float. The solver will find an accurate value of `t` at which
``event(t, y(t)) = 0`` using a root-finding algorithm. By default,
all zeros will be found. The solver looks for a sign change over
each step, so if multiple zero crossings occur within one step,
events may be missed. Additionally each `event` function might
have the following attributes:
terminal: bool or int, optional
When boolean, whether to terminate integration if this event occurs.
When integral, termination occurs after the specified the number of
occurrences of this event.
Implicitly False if not assigned.
direction: float, optional
Direction of a zero crossing. If `direction` is positive,
`event` will only trigger when going from negative to positive,
and vice versa if `direction` is negative. If 0, then either
direction will trigger event. Implicitly 0 if not assigned.
You can assign attributes like ``event.terminal = True`` to any
function in Python.
vectorized : bool, optional
Whether `fun` can be called in a vectorized fashion. Default is False.
If ``vectorized`` is False, `fun` will always be called with ``y`` of
shape ``(n,)``, where ``n = len(y0)``.
If ``vectorized`` is True, `fun` may be called with ``y`` of shape
``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
the returned array is the time derivative of the state corresponding
with a column of ``y``).
Setting ``vectorized=True`` allows for faster finite difference
approximation of the Jacobian by methods 'Radau' and 'BDF', but
will result in slower execution for other methods and for 'Radau' and
'BDF' in some circumstances (e.g. small ``len(y0)``).
args : tuple, optional
Additional arguments to pass to the user-defined functions. If given,
the additional arguments are passed to all user-defined functions.
So if, for example, `fun` has the signature ``fun(t, y, a, b, c)``,
then `jac` (if given) and any event functions must have the same
signature, and `args` must be a tuple of length 3.
**options
Options passed to a chosen solver. All options available for already
implemented solvers are listed below.
first_step : float or None, optional
Initial step size. Default is `None` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float or array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits), while `atol` controls
absolute accuracy (number of correct decimal places). To achieve the
desired `rtol`, set `atol` to be smaller than the smallest value that
can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
allowable error. If `atol` is larger than ``rtol * abs(y)`` the
number of correct digits is not guaranteed. Conversely, to achieve the
desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
than `atol`. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : array_like, sparse_matrix, callable or None, optional
Jacobian matrix of the right-hand side of the system with respect
to y, required by the 'Radau', 'BDF' and 'LSODA' method. The
Jacobian matrix has shape (n, n) and its element (i, j) is equal to
``d f_i / d y_j``. There are three ways to define the Jacobian:
* If array_like or sparse_matrix, the Jacobian is assumed to
be constant. Not supported by 'LSODA'.
* If callable, the Jacobian is assumed to depend on both
t and y; it will be called as ``jac(t, y)``, as necessary.
Additional arguments have to be passed if ``args`` is
used (see documentation of ``args`` argument).
For 'Radau' and 'BDF' methods, the return value might be a
sparse matrix.
* If None (default), the Jacobian will be approximated by
finite differences.
It is generally recommended to provide the Jacobian rather than
relying on a finite-difference approximation.
jac_sparsity : array_like, sparse matrix or None, optional
Defines a sparsity structure of the Jacobian matrix for a finite-
difference approximation. Its shape must be (n, n). This argument
is ignored if `jac` is not `None`. If the Jacobian has only few
non-zero elements in *each* row, providing the sparsity structure
will greatly speed up the computations [10]_. A zero entry means that
a corresponding element in the Jacobian is always zero. If None
(default), the Jacobian is assumed to be dense.
Not supported by 'LSODA', see `lband` and `uband` instead.
lband, uband : int or None, optional
Parameters defining the bandwidth of the Jacobian for the 'LSODA'
method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``.
Default is None. Setting these requires your jac routine to return the
Jacobian in the packed format: the returned array must have ``n``
columns and ``uband + lband + 1`` rows in which Jacobian diagonals are
written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``.
The same format is used in `scipy.linalg.solve_banded` (check for an
illustration). These parameters can be also used with ``jac=None`` to
reduce the number of Jacobian elements estimated by finite differences.
min_step : float, optional
The minimum allowed step size for 'LSODA' method.
By default `min_step` is zero.
Returns
-------
Bunch object with the following fields defined:
t : ndarray, shape (n_points,)
Time points.
y : ndarray, shape (n, n_points)
Values of the solution at `t`.
sol : `OdeSolution` or None
Found solution as `OdeSolution` instance; None if `dense_output` was
set to False.
t_events : list of ndarray or None
Contains for each event type a list of arrays at which an event of
that type event was detected. None if `events` was None.
y_events : list of ndarray or None
For each value of `t_events`, the corresponding value of the solution.
None if `events` was None.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
nlu : int
Number of LU decompositions.
status : int
Reason for algorithm termination:
* -1: Integration step failed.
* 0: The solver successfully reached the end of `tspan`.
* 1: A termination event occurred.
message : string
Human-readable description of the termination reason.
success : bool
True if the solver reached the interval end or a termination event
occurred (``status >= 0``).
References
----------
.. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
formulae", Journal of Computational and Applied Mathematics, Vol. 6,
No. 1, pp. 19-26, 1980.
.. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
.. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
.. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
Stiff and Differential-Algebraic Problems", Sec. IV.8.
.. [5] `Backward Differentiation Formula
<https://en.wikipedia.org/wiki/Backward_differentiation_formula>`_
on Wikipedia.
.. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
.. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
pp. 55-64, 1983.
.. [8] L. Petzold, "Automatic selection of methods for solving stiff and
nonstiff systems of ordinary differential equations", SIAM Journal
on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
1983.
.. [9] `Stiff equation <https://en.wikipedia.org/wiki/Stiff_equation>`_ on
Wikipedia.
.. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13, pp. 117-120, 1974.
.. [11] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
.. [12] `Lotka-Volterra equations
<https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations>`_
on Wikipedia.
.. [13] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.
.. [14] `Page with original Fortran code of DOP853
<http://www.unige.ch/~hairer/software.html>`_.
Examples
--------
Basic exponential decay showing automatically chosen time points.
>>> import numpy as np
>>> from scipy.integrate import solve_ivp
>>> def exponential_decay(t, y): return -0.5 * y
>>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
>>> print(sol.t)
[ 0. 0.11487653 1.26364188 3.06061781 4.81611105 6.57445806
8.33328988 10. ]
>>> print(sol.y)
[[2. 1.88836035 1.06327177 0.43319312 0.18017253 0.07483045
0.03107158 0.01350781]
[4. 3.7767207 2.12654355 0.86638624 0.36034507 0.14966091
0.06214316 0.02701561]
[8. 7.5534414 4.25308709 1.73277247 0.72069014 0.29932181
0.12428631 0.05403123]]
Specifying points where the solution is desired.
>>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8],
... t_eval=[0, 1, 2, 4, 10])
>>> print(sol.t)
[ 0 1 2 4 10]
>>> print(sol.y)
[[2. 1.21305369 0.73534021 0.27066736 0.01350938]
[4. 2.42610739 1.47068043 0.54133472 0.02701876]
[8. 4.85221478 2.94136085 1.08266944 0.05403753]]
Cannon fired upward with terminal event upon impact. The ``terminal`` and
``direction`` fields of an event are applied by monkey patching a function.
Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts
at position 0 with velocity +10. Note that the integration never reaches
t=100 because the event is terminal.
>>> def upward_cannon(t, y): return [y[1], -0.5]
>>> def hit_ground(t, y): return y[0]
>>> hit_ground.terminal = True
>>> hit_ground.direction = -1
>>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground)
>>> print(sol.t_events)
[array([40.])]
>>> print(sol.t)
[0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
Use `dense_output` and `events` to find position, which is 100, at the apex
of the cannonball's trajectory. Apex is not defined as terminal, so both
apex and hit_ground are found. There is no information at t=20, so the sol
attribute is used to evaluate the solution. The sol attribute is returned
by setting ``dense_output=True``. Alternatively, the `y_events` attribute
can be used to access the solution at the time of the event.
>>> def apex(t, y): return y[1]
>>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10],
... events=(hit_ground, apex), dense_output=True)
>>> print(sol.t_events)
[array([40.]), array([20.])]
>>> print(sol.t)
[0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
>>> print(sol.sol(sol.t_events[1][0]))
[100. 0.]
>>> print(sol.y_events)
[array([[-5.68434189e-14, -1.00000000e+01]]),
array([[1.00000000e+02, 1.77635684e-15]])]
As an example of a system with additional parameters, we'll implement
the Lotka-Volterra equations [12]_.
>>> def lotkavolterra(t, z, a, b, c, d):
... x, y = z
... return [a*x - b*x*y, -c*y + d*x*y]
...
We pass in the parameter values a=1.5, b=1, c=3 and d=1 with the `args`
argument.
>>> sol = solve_ivp(lotkavolterra, [0, 15], [10, 5], args=(1.5, 1, 3, 1),
... dense_output=True)
Compute a dense solution and plot it.
>>> t = np.linspace(0, 15, 300)
>>> z = sol.sol(t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, z.T)
>>> plt.xlabel('t')
>>> plt.legend(['x', 'y'], shadow=True)
>>> plt.title('Lotka-Volterra System')
>>> plt.show()
A couple examples of using solve_ivp to solve the differential
equation ``y' = Ay`` with complex matrix ``A``.
>>> A = np.array([[-0.25 + 0.14j, 0, 0.33 + 0.44j],
... [0.25 + 0.58j, -0.2 + 0.14j, 0],
... [0, 0.2 + 0.4j, -0.1 + 0.97j]])
Solving an IVP with ``A`` from above and ``y`` as 3x1 vector:
>>> def deriv_vec(t, y):
... return A @ y
>>> result = solve_ivp(deriv_vec, [0, 25],
... np.array([10 + 0j, 20 + 0j, 30 + 0j]),
... t_eval=np.linspace(0, 25, 101))
>>> print(result.y[:, 0])
[10.+0.j 20.+0.j 30.+0.j]
>>> print(result.y[:, -1])
[18.46291039+45.25653651j 10.01569306+36.23293216j
-4.98662741+80.07360388j]
Solving an IVP with ``A`` from above with ``y`` as 3x3 matrix :
>>> def deriv_mat(t, y):
... return (A @ y.reshape(3, 3)).flatten()
>>> y0 = np.array([[2 + 0j, 3 + 0j, 4 + 0j],
... [5 + 0j, 6 + 0j, 7 + 0j],
... [9 + 0j, 34 + 0j, 78 + 0j]])
>>> result = solve_ivp(deriv_mat, [0, 25], y0.flatten(),
... t_eval=np.linspace(0, 25, 101))
>>> print(result.y[:, 0].reshape(3, 3))
[[ 2.+0.j 3.+0.j 4.+0.j]
[ 5.+0.j 6.+0.j 7.+0.j]
[ 9.+0.j 34.+0.j 78.+0.j]]
>>> print(result.y[:, -1].reshape(3, 3))
[[ 5.67451179 +12.07938445j 17.2888073 +31.03278837j
37.83405768 +63.25138759j]
[ 3.39949503 +11.82123994j 21.32530996 +44.88668871j
53.17531184+103.80400411j]
[ -2.26105874 +22.19277664j -15.1255713 +70.19616341j
-38.34616845+153.29039931j]]
"""
if method not in METHODS and not (
inspect.isclass(method) and issubclass(method, OdeSolver)):
raise ValueError(f"`method` must be one of {METHODS} or OdeSolver class.")
t0, tf = map(float, t_span)
if args is not None:
# Wrap the user's fun (and jac, if given) in lambdas to hide the
# additional parameters. Pass in the original fun as a keyword
# argument to keep it in the scope of the lambda.
try:
_ = [*(args)]
except TypeError as exp:
suggestion_tuple = (
"Supplied 'args' cannot be unpacked. Please supply `args`"
f" as a tuple (e.g. `args=({args},)`)"
)
raise TypeError(suggestion_tuple) from exp
def fun(t, x, fun=fun):
return fun(t, x, *args)
jac = options.get('jac')
if callable(jac):
options['jac'] = lambda t, x: jac(t, x, *args)
if t_eval is not None:
t_eval = np.asarray(t_eval)
if t_eval.ndim != 1:
raise ValueError("`t_eval` must be 1-dimensional.")
if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)):
raise ValueError("Values in `t_eval` are not within `t_span`.")
d = np.diff(t_eval)
if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0):
raise ValueError("Values in `t_eval` are not properly sorted.")
if tf > t0:
t_eval_i = 0
else:
# Make order of t_eval decreasing to use np.searchsorted.
t_eval = t_eval[::-1]
# This will be an upper bound for slices.
t_eval_i = t_eval.shape[0]
if method in METHODS:
method = METHODS[method]
solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
if t_eval is None:
ts = [t0]
ys = [y0]
elif t_eval is not None and dense_output:
ts = []
ti = [t0]
ys = []
else:
ts = []
ys = []
interpolants = []
if events is not None:
events, max_events, event_dir = prepare_events(events)
event_count = np.zeros(len(events))
if args is not None:
# Wrap user functions in lambdas to hide the additional parameters.
# The original event function is passed as a keyword argument to the
# lambda to keep the original function in scope (i.e., avoid the
# late binding closure "gotcha").
events = [lambda t, x, event=event: event(t, x, *args)
for event in events]
g = [event(t0, y0) for event in events]
t_events = [[] for _ in range(len(events))]
y_events = [[] for _ in range(len(events))]
else:
t_events = None
y_events = None
status = None
while status is None:
message = solver.step()
if solver.status == 'finished':
status = 0
elif solver.status == 'failed':
status = -1
break
t_old = solver.t_old
t = solver.t
y = solver.y
if dense_output:
sol = solver.dense_output()
interpolants.append(sol)
else:
sol = None
if events is not None:
g_new = [event(t, y) for event in events]
active_events = find_active_events(g, g_new, event_dir)
if active_events.size > 0:
if sol is None:
sol = solver.dense_output()
event_count[active_events] += 1
root_indices, roots, terminate = handle_events(
sol, events, active_events, event_count, max_events,
t_old, t)
for e, te in zip(root_indices, roots):
t_events[e].append(te)
y_events[e].append(sol(te))
if terminate:
status = 1
t = roots[-1]
y = sol(t)
g = g_new
if t_eval is None:
donot_append = (len(ts) > 1 and
ts[-1] == t and
dense_output)
if not donot_append:
ts.append(t)
ys.append(y)
else:
if len(interpolants) > 0:
interpolants.pop()
else:
# The value in t_eval equal to t will be included.
if solver.direction > 0:
t_eval_i_new = np.searchsorted(t_eval, t, side='right')
t_eval_step = t_eval[t_eval_i:t_eval_i_new]
else:
t_eval_i_new = np.searchsorted(t_eval, t, side='left')
# It has to be done with two slice operations, because
# you can't slice to 0th element inclusive using backward
# slicing.
t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1]
if t_eval_step.size > 0:
if sol is None:
sol = solver.dense_output()
ts.append(t_eval_step)
ys.append(sol(t_eval_step))
t_eval_i = t_eval_i_new
if t_eval is not None and dense_output:
ti.append(t)
message = MESSAGES.get(status, message)
if t_events is not None:
t_events = [np.asarray(te) for te in t_events]
y_events = [np.asarray(ye) for ye in y_events]
if t_eval is None:
ts = np.array(ts)
ys = np.vstack(ys).T
elif ts:
ts = np.hstack(ts)
ys = np.hstack(ys)
if dense_output:
if t_eval is None:
sol = OdeSolution(
ts, interpolants, alt_segment=True if method in [BDF, LSODA] else False
)
else:
sol = OdeSolution(
ti, interpolants, alt_segment=True if method in [BDF, LSODA] else False
)
else:
sol = None
return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, y_events=y_events,
nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu,
status=status, message=message, success=status >= 0)
| OdeResult |
python | davidhalter__jedi | jedi/inference/gradual/typing.py | {
"start": 7203,
"end": 7524
} | class ____(ClassMixin):
def py__bases__(self):
return [LazyKnownValues(
self.inference_state.builtins_module.py__getattribute__('object')
)]
def get_metaclasses(self):
return []
@property
def name(self):
return ValueName(self, self._tree_name)
| _TypingClassMixin |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/external_team_index.py | {
"start": 987,
"end": 2396
} | class ____(TeamEndpoint, ExternalActorEndpointMixin):
publish_status = {
"POST": ApiPublishStatus.PUBLIC,
}
owner = ApiOwner.ENTERPRISE
@extend_schema(
operation_id="Create an External Team",
parameters=[GlobalParams.ORG_ID_OR_SLUG, GlobalParams.TEAM_ID_OR_SLUG],
request=ExternalTeamSerializer,
responses={
200: ExternalActorSerializer,
201: ExternalActorSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
},
examples=IntegrationExamples.EXTERNAL_TEAM_CREATE,
)
def post(self, request: Request, team: Team) -> Response:
"""
Link a team from an external provider to a Sentry team.
"""
self.assert_has_feature(request, team.organization)
if "teamId" in request.data:
del request.data["teamId"]
serializer = ExternalTeamSerializer(
data={**request.data, "team_id": team.id}, context={"organization": team.organization}
)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
external_team, created = serializer.save()
status_code = status.HTTP_201_CREATED if created else status.HTTP_200_OK
return Response(serialize(external_team, request.user, key="team"), status=status_code)
| ExternalTeamEndpoint |
python | redis__redis-py | redis/asyncio/client.py | {
"start": 2377,
"end": 29265
} | class ____(
AbstractRedis, AsyncRedisModuleCommands, AsyncCoreCommands, AsyncSentinelCommands
):
"""
Implementation of the Redis protocol.
This abstract class provides a Python interface to all Redis commands
and an implementation of the Redis protocol.
Pipelines derive from this, implementing how
the commands are sent and received to the Redis server. Based on
configuration, an instance will either use a ConnectionPool, or
Connection object to talk to redis.
"""
response_callbacks: MutableMapping[Union[str, bytes], ResponseCallbackT]
@classmethod
def from_url(
cls,
url: str,
single_connection_client: bool = False,
auto_close_connection_pool: Optional[bool] = None,
**kwargs,
):
"""
Return a Redis client object configured from the given URL
For example::
redis://[[username]:[password]]@localhost:6379/0
rediss://[[username]:[password]]@localhost:6379/0
unix://[username@]/path/to/socket.sock?db=0[&password=password]
Three URL schemes are supported:
- `redis://` creates a TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/redis>
- `rediss://` creates a SSL wrapped TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/rediss>
- ``unix://``: creates a Unix Domain Socket connection.
The username, password, hostname, path and all querystring values
are passed through urllib.parse.unquote in order to replace any
percent-encoded values with their corresponding characters.
There are several ways to specify a database number. The first value
found will be used:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// or rediss:// schemes, the path argument
of the url, e.g. redis://localhost/0
3. A ``db`` keyword argument to this function.
If none of these options are specified, the default db=0 is used.
All querystring options are cast to their appropriate Python types.
Boolean arguments can be specified with string values "True"/"False"
or "Yes"/"No". Values that cannot be properly cast cause a
``ValueError`` to be raised. Once parsed, the querystring arguments
and keyword arguments are passed to the ``ConnectionPool``'s
class initializer. In the case of conflicting arguments, querystring
arguments always win.
"""
connection_pool = ConnectionPool.from_url(url, **kwargs)
client = cls(
connection_pool=connection_pool,
single_connection_client=single_connection_client,
)
if auto_close_connection_pool is not None:
warnings.warn(
DeprecationWarning(
'"auto_close_connection_pool" is deprecated '
"since version 5.0.1. "
"Please create a ConnectionPool explicitly and "
"provide to the Redis() constructor instead."
)
)
else:
auto_close_connection_pool = True
client.auto_close_connection_pool = auto_close_connection_pool
return client
@classmethod
def from_pool(
cls: Type["Redis"],
connection_pool: ConnectionPool,
) -> "Redis":
"""
Return a Redis client from the given connection pool.
The Redis client will take ownership of the connection pool and
close it when the Redis client is closed.
"""
client = cls(
connection_pool=connection_pool,
)
client.auto_close_connection_pool = True
return client
@deprecated_args(
args_to_warn=["retry_on_timeout"],
reason="TimeoutError is included by default.",
version="6.0.0",
)
def __init__(
self,
*,
host: str = "localhost",
port: int = 6379,
db: Union[str, int] = 0,
password: Optional[str] = None,
socket_timeout: Optional[float] = None,
socket_connect_timeout: Optional[float] = None,
socket_keepalive: Optional[bool] = None,
socket_keepalive_options: Optional[Mapping[int, Union[int, bytes]]] = None,
connection_pool: Optional[ConnectionPool] = None,
unix_socket_path: Optional[str] = None,
encoding: str = "utf-8",
encoding_errors: str = "strict",
decode_responses: bool = False,
retry_on_timeout: bool = False,
retry: Retry = Retry(
backoff=ExponentialWithJitterBackoff(base=1, cap=10), retries=3
),
retry_on_error: Optional[list] = None,
ssl: bool = False,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
ssl_cert_reqs: Union[str, VerifyMode] = "required",
ssl_include_verify_flags: Optional[List[VerifyFlags]] = None,
ssl_exclude_verify_flags: Optional[List[VerifyFlags]] = None,
ssl_ca_certs: Optional[str] = None,
ssl_ca_data: Optional[str] = None,
ssl_check_hostname: bool = True,
ssl_min_version: Optional[TLSVersion] = None,
ssl_ciphers: Optional[str] = None,
max_connections: Optional[int] = None,
single_connection_client: bool = False,
health_check_interval: int = 0,
client_name: Optional[str] = None,
lib_name: Optional[str] = "redis-py",
lib_version: Optional[str] = get_lib_version(),
username: Optional[str] = None,
auto_close_connection_pool: Optional[bool] = None,
redis_connect_func=None,
credential_provider: Optional[CredentialProvider] = None,
protocol: Optional[int] = 2,
event_dispatcher: Optional[EventDispatcher] = None,
):
"""
Initialize a new Redis client.
To specify a retry policy for specific errors, you have two options:
1. Set the `retry_on_error` to a list of the error/s to retry on, and
you can also set `retry` to a valid `Retry` object(in case the default
one is not appropriate) - with this approach the retries will be triggered
on the default errors specified in the Retry object enriched with the
errors specified in `retry_on_error`.
2. Define a `Retry` object with configured 'supported_errors' and set
it to the `retry` parameter - with this approach you completely redefine
the errors on which retries will happen.
`retry_on_timeout` is deprecated - please include the TimeoutError
either in the Retry object or in the `retry_on_error` list.
When 'connection_pool' is provided - the retry configuration of the
provided pool will be used.
"""
kwargs: Dict[str, Any]
if event_dispatcher is None:
self._event_dispatcher = EventDispatcher()
else:
self._event_dispatcher = event_dispatcher
# auto_close_connection_pool only has an effect if connection_pool is
# None. It is assumed that if connection_pool is not None, the user
# wants to manage the connection pool themselves.
if auto_close_connection_pool is not None:
warnings.warn(
DeprecationWarning(
'"auto_close_connection_pool" is deprecated '
"since version 5.0.1. "
"Please create a ConnectionPool explicitly and "
"provide to the Redis() constructor instead."
)
)
else:
auto_close_connection_pool = True
if not connection_pool:
# Create internal connection pool, expected to be closed by Redis instance
if not retry_on_error:
retry_on_error = []
kwargs = {
"db": db,
"username": username,
"password": password,
"credential_provider": credential_provider,
"socket_timeout": socket_timeout,
"encoding": encoding,
"encoding_errors": encoding_errors,
"decode_responses": decode_responses,
"retry_on_error": retry_on_error,
"retry": copy.deepcopy(retry),
"max_connections": max_connections,
"health_check_interval": health_check_interval,
"client_name": client_name,
"lib_name": lib_name,
"lib_version": lib_version,
"redis_connect_func": redis_connect_func,
"protocol": protocol,
}
# based on input, setup appropriate connection args
if unix_socket_path is not None:
kwargs.update(
{
"path": unix_socket_path,
"connection_class": UnixDomainSocketConnection,
}
)
else:
# TCP specific options
kwargs.update(
{
"host": host,
"port": port,
"socket_connect_timeout": socket_connect_timeout,
"socket_keepalive": socket_keepalive,
"socket_keepalive_options": socket_keepalive_options,
}
)
if ssl:
kwargs.update(
{
"connection_class": SSLConnection,
"ssl_keyfile": ssl_keyfile,
"ssl_certfile": ssl_certfile,
"ssl_cert_reqs": ssl_cert_reqs,
"ssl_include_verify_flags": ssl_include_verify_flags,
"ssl_exclude_verify_flags": ssl_exclude_verify_flags,
"ssl_ca_certs": ssl_ca_certs,
"ssl_ca_data": ssl_ca_data,
"ssl_check_hostname": ssl_check_hostname,
"ssl_min_version": ssl_min_version,
"ssl_ciphers": ssl_ciphers,
}
)
# This arg only used if no pool is passed in
self.auto_close_connection_pool = auto_close_connection_pool
connection_pool = ConnectionPool(**kwargs)
self._event_dispatcher.dispatch(
AfterPooledConnectionsInstantiationEvent(
[connection_pool], ClientType.ASYNC, credential_provider
)
)
else:
# If a pool is passed in, do not close it
self.auto_close_connection_pool = False
self._event_dispatcher.dispatch(
AfterPooledConnectionsInstantiationEvent(
[connection_pool], ClientType.ASYNC, credential_provider
)
)
self.connection_pool = connection_pool
self.single_connection_client = single_connection_client
self.connection: Optional[Connection] = None
self.response_callbacks = CaseInsensitiveDict(_RedisCallbacks)
if self.connection_pool.connection_kwargs.get("protocol") in ["3", 3]:
self.response_callbacks.update(_RedisCallbacksRESP3)
else:
self.response_callbacks.update(_RedisCallbacksRESP2)
# If using a single connection client, we need to lock creation-of and use-of
# the client in order to avoid race conditions such as using asyncio.gather
# on a set of redis commands
self._single_conn_lock = asyncio.Lock()
# When used as an async context manager, we need to increment and decrement
# a usage counter so that we can close the connection pool when no one is
# using the client.
self._usage_counter = 0
self._usage_lock = asyncio.Lock()
def __repr__(self):
return (
f"<{self.__class__.__module__}.{self.__class__.__name__}"
f"({self.connection_pool!r})>"
)
def __await__(self):
return self.initialize().__await__()
async def initialize(self: _RedisT) -> _RedisT:
if self.single_connection_client:
async with self._single_conn_lock:
if self.connection is None:
self.connection = await self.connection_pool.get_connection()
self._event_dispatcher.dispatch(
AfterSingleConnectionInstantiationEvent(
self.connection, ClientType.ASYNC, self._single_conn_lock
)
)
return self
def set_response_callback(self, command: str, callback: ResponseCallbackT):
"""Set a custom Response Callback"""
self.response_callbacks[command] = callback
def get_encoder(self):
"""Get the connection pool's encoder"""
return self.connection_pool.get_encoder()
def get_connection_kwargs(self):
"""Get the connection's key-word arguments"""
return self.connection_pool.connection_kwargs
def get_retry(self) -> Optional[Retry]:
return self.get_connection_kwargs().get("retry")
def set_retry(self, retry: Retry) -> None:
self.get_connection_kwargs().update({"retry": retry})
self.connection_pool.set_retry(retry)
def load_external_module(self, funcname, func):
"""
This function can be used to add externally defined redis modules,
and their namespaces to the redis client.
funcname - A string containing the name of the function to create
func - The function, being added to this class.
ex: Assume that one has a custom redis module named foomod that
creates command named 'foo.dothing' and 'foo.anotherthing' in redis.
To load function functions into this namespace:
from redis import Redis
from foomodule import F
r = Redis()
r.load_external_module("foo", F)
r.foo().dothing('your', 'arguments')
For a concrete example see the reimport of the redisjson module in
tests/test_connection.py::test_loading_external_modules
"""
setattr(self, funcname, func)
def pipeline(
self, transaction: bool = True, shard_hint: Optional[str] = None
) -> "Pipeline":
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return Pipeline(
self.connection_pool, self.response_callbacks, transaction, shard_hint
)
async def transaction(
self,
func: Callable[["Pipeline"], Union[Any, Awaitable[Any]]],
*watches: KeyT,
shard_hint: Optional[str] = None,
value_from_callable: bool = False,
watch_delay: Optional[float] = None,
):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single argument which is a Pipeline object.
"""
pipe: Pipeline
async with self.pipeline(True, shard_hint) as pipe:
while True:
try:
if watches:
await pipe.watch(*watches)
func_value = func(pipe)
if inspect.isawaitable(func_value):
func_value = await func_value
exec_value = await pipe.execute()
return func_value if value_from_callable else exec_value
except WatchError:
if watch_delay is not None and watch_delay > 0:
await asyncio.sleep(watch_delay)
continue
def lock(
self,
name: KeyT,
timeout: Optional[float] = None,
sleep: float = 0.1,
blocking: bool = True,
blocking_timeout: Optional[float] = None,
lock_class: Optional[Type[Lock]] = None,
thread_local: bool = True,
raise_on_release_error: bool = True,
) -> Lock:
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
``blocking`` indicates whether calling ``acquire`` should block until
the lock has been acquired or to fail immediately, causing ``acquire``
to return False and the lock not being acquired. Defaults to True.
Note this value can be overridden by passing a ``blocking``
argument to ``acquire``.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``lock_class`` forces the specified lock implementation. Note that as
of redis-py 3.0, the only lock class we implement is ``Lock`` (which is
a Lua-based lock). So, it's unlikely you'll need this parameter, unless
you have created your own custom lock class.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
``raise_on_release_error`` indicates whether to raise an exception when
the lock is no longer owned when exiting the context manager. By default,
this is True, meaning an exception will be raised. If False, the warning
will be logged and the exception will be suppressed.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage."""
if lock_class is None:
lock_class = Lock
return lock_class(
self,
name,
timeout=timeout,
sleep=sleep,
blocking=blocking,
blocking_timeout=blocking_timeout,
thread_local=thread_local,
raise_on_release_error=raise_on_release_error,
)
def pubsub(self, **kwargs) -> "PubSub":
"""
Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.
"""
return PubSub(
self.connection_pool, event_dispatcher=self._event_dispatcher, **kwargs
)
def monitor(self) -> "Monitor":
return Monitor(self.connection_pool)
def client(self) -> "Redis":
return self.__class__(
connection_pool=self.connection_pool, single_connection_client=True
)
async def __aenter__(self: _RedisT) -> _RedisT:
"""
Async context manager entry. Increments a usage counter so that the
connection pool is only closed (via aclose()) when no context is using
the client.
"""
await self._increment_usage()
try:
# Initialize the client (i.e. establish connection, etc.)
return await self.initialize()
except Exception:
# If initialization fails, decrement the counter to keep it in sync
await self._decrement_usage()
raise
async def _increment_usage(self) -> int:
"""
Helper coroutine to increment the usage counter while holding the lock.
Returns the new value of the usage counter.
"""
async with self._usage_lock:
self._usage_counter += 1
return self._usage_counter
async def _decrement_usage(self) -> int:
"""
Helper coroutine to decrement the usage counter while holding the lock.
Returns the new value of the usage counter.
"""
async with self._usage_lock:
self._usage_counter -= 1
return self._usage_counter
async def __aexit__(self, exc_type, exc_value, traceback):
"""
Async context manager exit. Decrements a usage counter. If this is the
last exit (counter becomes zero), the client closes its connection pool.
"""
current_usage = await asyncio.shield(self._decrement_usage())
if current_usage == 0:
# This was the last active context, so disconnect the pool.
await asyncio.shield(self.aclose())
_DEL_MESSAGE = "Unclosed Redis client"
# passing _warnings and _grl as argument default since they may be gone
# by the time __del__ is called at shutdown
def __del__(
self,
_warn: Any = warnings.warn,
_grl: Any = asyncio.get_running_loop,
) -> None:
if hasattr(self, "connection") and (self.connection is not None):
_warn(f"Unclosed client session {self!r}", ResourceWarning, source=self)
try:
context = {"client": self, "message": self._DEL_MESSAGE}
_grl().call_exception_handler(context)
except RuntimeError:
pass
self.connection._close()
async def aclose(self, close_connection_pool: Optional[bool] = None) -> None:
"""
Closes Redis client connection
Args:
close_connection_pool:
decides whether to close the connection pool used by this Redis client,
overriding Redis.auto_close_connection_pool.
By default, let Redis.auto_close_connection_pool decide
whether to close the connection pool.
"""
conn = self.connection
if conn:
self.connection = None
await self.connection_pool.release(conn)
if close_connection_pool or (
close_connection_pool is None and self.auto_close_connection_pool
):
await self.connection_pool.disconnect()
@deprecated_function(version="5.0.1", reason="Use aclose() instead", name="close")
async def close(self, close_connection_pool: Optional[bool] = None) -> None:
"""
Alias for aclose(), for backwards compatibility
"""
await self.aclose(close_connection_pool)
async def _send_command_parse_response(self, conn, command_name, *args, **options):
"""
Send a command and parse the response
"""
await conn.send_command(*args)
return await self.parse_response(conn, command_name, **options)
async def _close_connection(self, conn: Connection):
"""
Close the connection before retrying.
The supported exceptions are already checked in the
retry object so we don't need to do it here.
After we disconnect the connection, it will try to reconnect and
do a health check as part of the send_command logic(on connection level).
"""
await conn.disconnect()
# COMMAND EXECUTION AND PROTOCOL PARSING
async def execute_command(self, *args, **options):
"""Execute a command and return a parsed response"""
await self.initialize()
pool = self.connection_pool
command_name = args[0]
conn = self.connection or await pool.get_connection()
if self.single_connection_client:
await self._single_conn_lock.acquire()
try:
return await conn.retry.call_with_retry(
lambda: self._send_command_parse_response(
conn, command_name, *args, **options
),
lambda _: self._close_connection(conn),
)
finally:
if self.single_connection_client:
self._single_conn_lock.release()
if not self.connection:
await pool.release(conn)
async def parse_response(
self, connection: Connection, command_name: Union[str, bytes], **options
):
"""Parses a response from the Redis server"""
try:
if NEVER_DECODE in options:
response = await connection.read_response(disable_decoding=True)
options.pop(NEVER_DECODE)
else:
response = await connection.read_response()
except ResponseError:
if EMPTY_RESPONSE in options:
return options[EMPTY_RESPONSE]
raise
if EMPTY_RESPONSE in options:
options.pop(EMPTY_RESPONSE)
# Remove keys entry, it needs only for cache.
options.pop("keys", None)
if command_name in self.response_callbacks:
# Mypy bug: https://github.com/python/mypy/issues/10977
command_name = cast(str, command_name)
retval = self.response_callbacks[command_name](response, **options)
return await retval if inspect.isawaitable(retval) else retval
return response
StrictRedis = Redis
| Redis |
python | ray-project__ray | python/ray/_private/profiling.py | {
"start": 2077,
"end": 8786
} | class ____:
# https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview#bookmark=id.iycbnb4z7i9g # noqa
name: str
# Metadata arguments. E.g., name: <metadata_name>
args: Dict[str, str]
# The process id of this event. In Ray, pid indicates the node.
pid: int
# The thread id of this event. In Ray, tid indicates each worker.
tid: int = None
# M means the metadata event.
ph: str = "M"
def profile(event_type, extra_data=None):
"""Profile a span of time so that it appears in the timeline visualization.
Note that this only works in the raylet code path.
This function can be used as follows (both on the driver or within a task).
.. testcode::
import ray._private.profiling as profiling
with profiling.profile("custom event", extra_data={'key': 'val'}):
# Do some computation here.
x = 1 * 2
Optionally, a dictionary can be passed as the "extra_data" argument, and
it can have keys "name" and "cname" if you want to override the default
timeline display text and box color. Other values will appear at the bottom
of the chrome tracing GUI when you click on the box corresponding to this
profile span.
Args:
event_type: A string describing the type of the event.
extra_data: This must be a dictionary mapping strings to strings. This
data will be added to the json objects that are used to populate
the timeline, so if you want to set a particular color, you can
simply set the "cname" attribute to an appropriate color.
Similarly, if you set the "name" attribute, then that will set the
text displayed on the box in the timeline.
Returns:
An object that can profile a span of time via a "with" statement.
"""
if not PROFILING_ENABLED:
return NULL_LOG_SPAN
worker = ray._private.worker.global_worker
if worker.mode == ray._private.worker.LOCAL_MODE:
return NULL_LOG_SPAN
return worker.core_worker.profile_event(event_type.encode("ascii"), extra_data)
def chrome_tracing_dump(
tasks: List[dict],
) -> str:
"""Generate a chrome/perfetto tracing dump using task events.
Args:
tasks: List of tasks generated by a state API list_tasks(detail=True).
Returns:
Json serialized dump to create a chrome/perfetto tracing.
"""
# All events from given tasks.
all_events = []
# Chrome tracing doesn't have a concept of "node". Instead, we use
# chrome tracing's pid == ray's node.
# chrome tracing's tid == ray's process.
# Note that pid or tid is usually integer, but ray's node/process has
# ids in string.
# Unfortunately, perfetto doesn't allow to have string as a value of pid/tid.
# To workaround it, we use Metadata event from chrome tracing schema
# (https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview#heading=h.xqopa5m0e28f) # noqa
# which allows pid/tid -> name mapping. In order to use this schema
# we build node_ip/(node_ip, worker_id) -> arbitrary index mapping.
# node ip address -> node idx.
node_to_index = {}
# Arbitrary index mapped to the ip address.
node_idx = 0
# (node index, worker id) -> worker idx
worker_to_index = {}
# Arbitrary index mapped to the (node index, worker id).
worker_idx = 0
for task in tasks:
profiling_data = task.get("profiling_data", [])
if profiling_data:
node_ip_address = profiling_data["node_ip_address"]
component_events = profiling_data["events"]
component_type = profiling_data["component_type"]
component_id = component_type + ":" + profiling_data["component_id"]
if component_type not in ["worker", "driver"]:
continue
for event in component_events:
extra_data = event["extra_data"]
# Propagate extra data.
extra_data["task_id"] = task["task_id"]
extra_data["job_id"] = task["job_id"]
extra_data["attempt_number"] = task["attempt_number"]
extra_data["func_or_class_name"] = task["func_or_class_name"]
extra_data["actor_id"] = task["actor_id"]
event_name = event["event_name"]
# build a id -> arbitrary index mapping
if node_ip_address not in node_to_index:
node_to_index[node_ip_address] = node_idx
# Whenever new node ip is introduced, we increment the index.
node_idx += 1
if (
node_to_index[node_ip_address],
component_id,
) not in worker_to_index: # noqa
worker_to_index[
(node_to_index[node_ip_address], component_id)
] = worker_idx # noqa
worker_idx += 1
# Modify the name with the additional user-defined extra data.
cname = _default_color_mapping[event["event_name"]]
name = event_name
if "cname" in extra_data:
cname = _default_color_mapping[event["extra_data"]["cname"]]
if "name" in extra_data:
name = extra_data["name"]
new_event = ChromeTracingCompleteEvent(
cat=event_name,
name=name,
pid=node_to_index[node_ip_address],
tid=worker_to_index[(node_to_index[node_ip_address], component_id)],
ts=event["start_time"] * 1e3,
dur=(event["end_time"] * 1e3) - (event["start_time"] * 1e3),
cname=cname,
args=extra_data,
)
all_events.append(asdict(new_event))
for node, i in node_to_index.items():
all_events.append(
asdict(
ChromeTracingMetadataEvent(
name="process_name",
pid=i,
args={"name": f"Node {node}"},
)
)
)
for worker, i in worker_to_index.items():
all_events.append(
asdict(
ChromeTracingMetadataEvent(
name="thread_name",
ph="M",
tid=i,
pid=worker[0],
args={"name": worker[1]},
)
)
)
# Handle task event disabled.
return json.dumps(all_events)
| ChromeTracingMetadataEvent |
python | dagster-io__dagster | python_modules/libraries/dagster-ssh/dagster_ssh/resources.py | {
"start": 1246,
"end": 14344
} | class ____(ConfigurableResource):
"""A Dagster resource for establishing SSH connections and performing remote file operations.
This resource leverages the Paramiko library to provide robust SSH connectivity,
including support for key-based and password authentication, tunneling, and SFTP transfers.
Args:
remote_host (str): The hostname or IP address of the remote server to connect to.
remote_port (Optional[int]): The SSH port on the remote host. Defaults to standard SSH port 22.
username (Optional[str]): The username for SSH authentication. If not provided, defaults to the current system user.
password (Optional[str]): The password for SSH authentication. Not recommended for production use; prefer key-based authentication.
key_file (Optional[str]): Path to the SSH private key file for authentication.
key_string (Optional[str]): SSH private key as a string for authentication.
timeout (int, optional): Connection timeout in seconds. Defaults to 10.
keepalive_interval (int, optional): Interval for sending SSH keepalive packets. (Defaults to 30 seconds.)
compress (bool, optional): Whether to compress the SSH transport stream. Defaults to True.
no_host_key_check (bool, optional): Disable host key verification.
allow_host_key_change (bool, optional): Allow connections to hosts with changed host keys. (Defaults to False.)
Example:
Creating an SSH resource with key-based authentication:
.. code-block:: python
ssh_resource = SSHResource(
remote_host="example.com",
username="myuser", key_file="/path/to/private/key"
)
Creating an SSH resource with password authentication:
.. code-block:: python
ssh_resource = SSHResource(
remote_host="example.com",
username="myuser",
password="my_secure_password"
)
Using the resource to transfer a file:
.. code-block:: python
local_file = ssh_resource.sftp_get("/remote/path/file.txt", "/local/path/file.txt")
"""
remote_host: str = Field(description="Remote host to connect to")
remote_port: Optional[int] = Field(default=None, description="Port of remote host to connect")
username: Optional[str] = Field(default=None, description="Username to connect to remote host")
password: Optional[str] = Field(
default=None, description="Password of the username to connect to remote host"
)
key_file: Optional[str] = Field(
default=None, description="Key file to use to connect to remote host"
)
key_string: Optional[str] = Field(
default=None, description="Key string to use to connect to remote host"
)
timeout: int = Field(
default=10, description="Timeout for the attempt to connect to remote host"
)
keepalive_interval: int = Field(
default=30,
description="Send a keepalive packet to remote host every keepalive_interval seconds",
)
compress: bool = Field(default=True, description="Compress the transport stream")
no_host_key_check: bool = Field(
default=True,
description=(
"If True, the host key will not be verified. This is unsafe and not recommended"
),
)
allow_host_key_change: bool = Field(
default=False,
description="If True, allow connecting to hosts whose host key has changed",
)
_logger: Optional[logging.Logger] = PrivateAttr(default=None)
_host_proxy: Optional[paramiko.ProxyCommand] = PrivateAttr(default=None)
_key_obj: Optional[paramiko.RSAKey] = PrivateAttr(default=None)
def set_logger(self, logger: logging.Logger) -> None:
self._logger = logger
def setup_for_execution(self, context: InitResourceContext) -> None:
self._logger = context.log
self._host_proxy = None
# Create RSAKey object from private key string
self._key_obj = key_from_str(self.key_string) if self.key_string is not None else None
# Auto detecting username values from system
if not self.username:
if self._logger:
self._logger.debug(
f"username to ssh to host: {self.remote_host} is not specified. Using system's default provided"
" by getpass.getuser()"
)
self.username = getpass.getuser()
user_ssh_config_filename = os.path.expanduser("~/.ssh/config")
if os.path.isfile(user_ssh_config_filename):
ssh_conf = paramiko.SSHConfig()
ssh_conf.parse(open(user_ssh_config_filename, encoding="utf8"))
host_info = ssh_conf.lookup(self.remote_host)
proxy_command = host_info.get("proxycommand")
if host_info and proxy_command:
self._host_proxy = paramiko.ProxyCommand(proxy_command)
if not (self.password or self.key_file):
identify_file = host_info.get("identityfile")
if host_info and identify_file:
self.key_file = identify_file[0]
@property
def log(self) -> logging.Logger:
return check.not_none(self._logger)
def get_connection(self) -> SSHClient:
"""Opens a SSH connection to the remote host.
:rtype: paramiko.client.SSHClient
"""
client = paramiko.SSHClient()
client.load_system_host_keys()
if not self.allow_host_key_change:
self.log.warning(
"Remote Identification Change is not verified. This won't protect against "
"Man-In-The-Middle attacks"
)
client.load_system_host_keys()
if self.no_host_key_check:
self.log.warning(
"No Host Key Verification. This won't protect against Man-In-The-Middle attacks"
)
# Default is RejectPolicy
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.password and self.password.strip():
client.connect(
hostname=self.remote_host,
username=self.username,
password=self.password,
key_filename=self.key_file,
pkey=self._key_obj,
timeout=self.timeout,
compress=self.compress,
port=self.remote_port, # type: ignore
sock=self._host_proxy,
look_for_keys=False,
)
else:
client.connect(
hostname=self.remote_host,
username=self.username,
key_filename=self.key_file,
pkey=self._key_obj,
timeout=self.timeout,
compress=self.compress,
port=self.remote_port, # type: ignore
sock=self._host_proxy,
)
if self.keepalive_interval:
client.get_transport().set_keepalive(self.keepalive_interval) # type: ignore
return client
def get_tunnel(
self, remote_port, remote_host="localhost", local_port=None
) -> SSHTunnelForwarder:
check.int_param(remote_port, "remote_port")
check.str_param(remote_host, "remote_host")
check.opt_int_param(local_port, "local_port")
if local_port is not None:
local_bind_address = ("localhost", local_port)
else:
local_bind_address = ("localhost",)
# Will prefer key string if specified, otherwise use the key file
if self._key_obj and self.key_file:
self.log.warning(
"SSHResource: key_string and key_file both specified as config. Using key_string."
)
pkey = self._key_obj if self._key_obj else self.key_file
if self.password and self.password.strip():
client = SSHTunnelForwarder(
self.remote_host,
ssh_port=self.remote_port,
ssh_username=self.username,
ssh_password=self.password,
ssh_pkey=pkey,
ssh_proxy=self._host_proxy,
local_bind_address=local_bind_address,
remote_bind_address=(remote_host, remote_port),
logger=self._logger,
)
else:
client = SSHTunnelForwarder(
self.remote_host,
ssh_port=self.remote_port,
ssh_username=self.username,
ssh_pkey=pkey,
ssh_proxy=self._host_proxy,
local_bind_address=local_bind_address,
remote_bind_address=(remote_host, remote_port),
host_pkey_directories=[],
logger=self._logger,
)
return client
def sftp_get(self, remote_filepath, local_filepath):
check.str_param(remote_filepath, "remote_filepath")
check.str_param(local_filepath, "local_filepath")
conn = self.get_connection()
with conn.open_sftp() as sftp_client:
local_folder = os.path.dirname(local_filepath)
# Create intermediate directories if they don't exist
mkdir_p(local_folder)
self.log.info(f"Starting to transfer from {remote_filepath} to {local_filepath}")
sftp_client.get(remote_filepath, local_filepath)
conn.close()
return local_filepath
def sftp_put(self, remote_filepath, local_filepath, confirm=True):
check.str_param(remote_filepath, "remote_filepath")
check.str_param(local_filepath, "local_filepath")
conn = self.get_connection()
with conn.open_sftp() as sftp_client:
self.log.info(f"Starting to transfer file from {local_filepath} to {remote_filepath}")
sftp_client.put(local_filepath, remote_filepath, confirm=confirm)
conn.close()
return local_filepath
@beta
@dagster_maintained_resource
@resource(
config_schema={
"remote_host": DagsterField(
StringSource, description="remote host to connect to", is_required=True
),
"remote_port": DagsterField(
IntSource,
description="port of remote host to connect (Default is paramiko SSH_PORT)",
is_required=False,
default_value=SSH_PORT,
),
"username": DagsterField(
StringSource, description="username to connect to the remote_host", is_required=False
),
"password": DagsterField(
StringSource,
description="password of the username to connect to the remote_host",
is_required=False,
),
"key_file": DagsterField(
StringSource,
description="key file to use to connect to the remote_host.",
is_required=False,
),
"key_string": DagsterField(
StringSource,
description="key string to use to connect to remote_host",
is_required=False,
),
"timeout": DagsterField(
IntSource,
description="timeout for the attempt to connect to the remote_host.",
is_required=False,
default_value=10,
),
"keepalive_interval": DagsterField(
IntSource,
description="send a keepalive packet to remote host every keepalive_interval seconds",
is_required=False,
default_value=30,
),
"compress": DagsterField(BoolSource, is_required=False, default_value=True),
"no_host_key_check": DagsterField(BoolSource, is_required=False, default_value=True),
"allow_host_key_change": DagsterField(
BoolSource, description="[Deprecated]", is_required=False, default_value=False
),
}
)
def ssh_resource(init_context):
"""A Dagster resource factory for creating SSHResource instances.
This function converts Dagster resource context configuration into an SSHResource
that can be used for remote SSH connections and file operations.
Args:
init_context (InitResourceContext): The Dagster resource initialization context containing configuration parameters.
Returns:
SSHResource: A configured SSH resource ready for use in Dagster pipelines.
Example:
Configuring the SSH resource in a Dagster pipeline:
.. code-block:: python
from dagster import Definitions, job, op
from dagster_ssh import ssh_resource
@op
def transfer_files(ssh):
ssh.sftp_get("/remote/file", "/local/file")
@job
def my_ssh_job():
transfer_files(ssh=ssh_resource.configured({
"remote_host": "example.com",
"username": "myuser",
"key_file": "/path/to/private/key"
}))
Definitions(jobs=[my_ssh_job])
"""
return SSHResource.from_resource_context(init_context)
| SSHResource |
python | sympy__sympy | sympy/sets/sets.py | {
"start": 56587,
"end": 64236
} | class ____(Set):
"""
Represents a finite set of Sympy expressions.
Examples
========
>>> from sympy import FiniteSet, Symbol, Interval, Naturals0
>>> FiniteSet(1, 2, 3, 4)
{1, 2, 3, 4}
>>> 3 in FiniteSet(1, 2, 3, 4)
True
>>> FiniteSet(1, (1, 2), Symbol('x'))
{1, x, (1, 2)}
>>> FiniteSet(Interval(1, 2), Naturals0, {1, 2})
FiniteSet({1, 2}, Interval(1, 2), Naturals0)
>>> members = [1, 2, 3, 4]
>>> f = FiniteSet(*members)
>>> f
{1, 2, 3, 4}
>>> f - FiniteSet(2)
{1, 3, 4}
>>> f + FiniteSet(2, 5)
{1, 2, 3, 4, 5}
References
==========
.. [1] https://en.wikipedia.org/wiki/Finite_set
"""
is_FiniteSet = True
is_iterable = True
is_empty = False
is_finite_set = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
if evaluate:
args = list(map(sympify, args))
if len(args) == 0:
return S.EmptySet
else:
args = list(map(sympify, args))
# keep the form of the first canonical arg
dargs = {}
for i in reversed(list(ordered(args))):
if i.is_Symbol:
dargs[i] = i
else:
try:
dargs[i.as_dummy()] = i
except TypeError:
# e.g. i = class without args like `Interval`
dargs[i] = i
_args_set = set(dargs.values())
args = list(ordered(_args_set, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._args_set = _args_set
return obj
def __iter__(self):
return iter(self.args)
def _complement(self, other):
if isinstance(other, Interval):
# Splitting in sub-intervals is only done for S.Reals;
# other cases that need splitting will first pass through
# Set._complement().
nums, syms = [], []
for m in self.args:
if m.is_number and m.is_real:
nums.append(m)
elif m.is_real == False:
pass # drop non-reals
else:
syms.append(m) # various symbolic expressions
if other == S.Reals and nums != []:
nums.sort()
intervals = [] # Build up a list of intervals between the elements
intervals += [Interval(S.NegativeInfinity, nums[0], True, True)]
for a, b in zip(nums[:-1], nums[1:]):
intervals.append(Interval(a, b, True, True)) # both open
intervals.append(Interval(nums[-1], S.Infinity, True, True))
if syms != []:
return Complement(Union(*intervals, evaluate=False),
FiniteSet(*syms), evaluate=False)
else:
return Union(*intervals, evaluate=False)
elif nums == []: # no splitting necessary or possible:
if syms:
return Complement(other, FiniteSet(*syms), evaluate=False)
else:
return other
elif isinstance(other, FiniteSet):
unk = []
for i in self:
c = sympify(other.contains(i))
if c is not S.true and c is not S.false:
unk.append(i)
unk = FiniteSet(*unk)
if unk == self:
return
not_true = []
for i in other:
c = sympify(self.contains(i))
if c is not S.true:
not_true.append(i)
return Complement(FiniteSet(*not_true), unk)
return Set._complement(self, other)
def _contains(self, other):
"""
Tests whether an element, other, is in the set.
Explanation
===========
The actual test is for mathematical equality (as opposed to
syntactical equality). In the worst case all elements of the
set must be checked.
Examples
========
>>> from sympy import FiniteSet
>>> 1 in FiniteSet(1, 2)
True
>>> 5 in FiniteSet(1, 2)
False
"""
if other in self._args_set:
return S.true
else:
# evaluate=True is needed to override evaluate=False context;
# we need Eq to do the evaluation
return Or(*[Eq(e, other, evaluate=True) for e in self.args])
def _eval_is_subset(self, other):
return fuzzy_and(other._contains(e) for e in self.args)
@property
def _boundary(self):
return self
@property
def _inf(self):
return Min(*self)
@property
def _sup(self):
return Max(*self)
@property
def measure(self):
return 0
def _kind(self):
if not self.args:
return SetKind()
elif all(i.kind == self.args[0].kind for i in self.args):
return SetKind(self.args[0].kind)
else:
return SetKind(UndefinedKind)
def __len__(self):
return len(self.args)
def as_relational(self, symbol):
"""Rewrite a FiniteSet in terms of equalities and logic operators. """
return Or(*[Eq(symbol, elem) for elem in self])
def compare(self, other):
return (hash(self) - hash(other))
def _eval_evalf(self, prec):
dps = prec_to_dps(prec)
return FiniteSet(*[elem.evalf(n=dps) for elem in self])
def _eval_simplify(self, **kwargs):
from sympy.simplify import simplify
return FiniteSet(*[simplify(elem, **kwargs) for elem in self])
@property
def _sorted_args(self):
return self.args
def _eval_powerset(self):
return self.func(*[self.func(*s) for s in subsets(self.args)])
def _eval_rewrite_as_PowerSet(self, *args, **kwargs):
"""Rewriting method for a finite set to a power set."""
from .powerset import PowerSet
is2pow = lambda n: bool(n and not n & (n - 1))
if not is2pow(len(self)):
return None
fs_test = lambda arg: isinstance(arg, Set) and arg.is_FiniteSet
if not all(fs_test(arg) for arg in args):
return None
biggest = max(args, key=len)
for arg in subsets(biggest.args):
arg_set = FiniteSet(*arg)
if arg_set not in args:
return None
return PowerSet(biggest)
def __ge__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return other.is_subset(self)
def __gt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_superset(other)
def __le__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_subset(other)
def __lt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_subset(other)
def __eq__(self, other):
if isinstance(other, (set, frozenset)):
return self._args_set == other
return super().__eq__(other)
def __hash__(self):
return Basic.__hash__(self)
_sympy_converter[set] = lambda x: FiniteSet(*x)
_sympy_converter[frozenset] = lambda x: FiniteSet(*x)
| FiniteSet |
python | huggingface__transformers | tests/trainer/test_trainer.py | {
"start": 225484,
"end": 227364
} | class ____(unittest.TestCase):
def setUp(self):
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
return {}
def model_init(trial):
if trial is not None:
a = trial.suggest_int("a", -4, 4)
b = trial.suggest_int("b", -4, 4)
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config).to(torch_device)
def hp_name(trial):
return MyTrialShortNamer.shortname(trial.params)
def compute_objective(metrics: dict[str, float]) -> list[float]:
return metrics["eval_loss"], metrics["eval_accuracy"]
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
eval_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=10,
disable_tqdm=True,
load_best_model_at_end=True,
run_name="test",
model_init=model_init,
compute_metrics=AlmostAccuracy(),
)
trainer.hyperparameter_search(
direction=["minimize", "maximize"],
hp_space=hp_space,
hp_name=hp_name,
n_trials=4,
compute_objective=compute_objective,
)
@require_torch
@require_optuna
| TrainerHyperParameterMultiObjectOptunaIntegrationTest |
python | spyder-ide__spyder | spyder/widgets/tabs.py | {
"start": 22798,
"end": 25842
} | class ____(BaseTabs, SpyderShortcutsMixin):
"""BaseTabs widget with movable tabs and tab navigation shortcuts."""
# Dummy CONF_SECTION to avoid a warning
CONF_SECTION = ""
# Signals
move_data = Signal(int, int)
move_tab_finished = Signal()
sig_move_tab = Signal(str, str, int, int)
def __init__(self, parent, actions=None, menu=None,
corner_widgets=None, menu_use_tooltips=False,
rename_tabs=False, split_char='',
split_index=0):
BaseTabs.__init__(self, parent, actions, menu,
corner_widgets, menu_use_tooltips)
SpyderShortcutsMixin.__init__(self)
tab_bar = TabBar(self, parent,
rename_tabs=rename_tabs,
split_char=split_char,
split_index=split_index)
tab_bar.sig_move_tab.connect(self.move_tab)
tab_bar.sig_move_tab[(str, int, int)].connect(
self.move_tab_from_another_tabwidget)
self.setTabBar(tab_bar)
self.register_shortcuts(parent)
@Slot(int, int)
def move_tab(self, index_from, index_to):
"""Move tab inside a tabwidget"""
self.move_data.emit(index_from, index_to)
tip, text = self.tabToolTip(index_from), self.tabText(index_from)
icon, widget = self.tabIcon(index_from), self.widget(index_from)
current_widget = self.currentWidget()
self.removeTab(index_from)
self.insertTab(index_to, widget, icon, text)
self.setTabToolTip(index_to, tip)
self.setCurrentWidget(current_widget)
self.move_tab_finished.emit()
@Slot(str, int, int)
def move_tab_from_another_tabwidget(self, tabwidget_from,
index_from, index_to):
"""Move tab from a tabwidget to another"""
# We pass self object IDs as QString objs, because otherwise it would
# depend on the platform: long for 64bit, int for 32bit. Replacing
# by long all the time is not working on some 32bit platforms.
# See spyder-ide/spyder#1094 and spyder-ide/spyder#1098.
self.sig_move_tab.emit(tabwidget_from, str(id(self)),
index_from, index_to)
def register_shortcuts(self, parent):
"""Register shortcuts for this widget."""
shortcuts = (
("go to next file", lambda: self.tab_navigate(1), "editor"),
("go to previous file", lambda: self.tab_navigate(-1), "editor"),
(
"close file 1",
lambda: self.sig_close_tab.emit(self.currentIndex()),
"main",
),
(
"close file 2",
lambda: self.sig_close_tab.emit(self.currentIndex()),
"main",
),
)
for name, callback, context in shortcuts:
self.register_shortcut_for_widget(
name=name, triggered=callback, widget=parent, context=context
)
| Tabs |
python | pytorch__pytorch | torch/_inductor/codegen/python_wrapper_mtia.py | {
"start": 144,
"end": 1025
} | class ____(PythonWrapperCodegen):
"""
A thin wrapper of PythonWrapperCodegen with MTIA specific logic
"""
@override
def write_header(self) -> None:
super().write_header()
# MITA specific imports
self.imports.splice("import mtia.host_runtime.torch_mtia.dynamic_library")
@override
@staticmethod
def create(
is_subgraph: bool,
subgraph_name: Optional[str],
parent_wrapper: Optional[PythonWrapperCodegen],
partition_signatures: Optional[ir.GraphPartitionSignature] = None,
) -> PythonWrapperCodegen:
if is_subgraph:
# Delegate to the parent class to handle the case of subgraph
return PythonWrapperCodegen.create(
is_subgraph, subgraph_name, parent_wrapper, partition_signatures
)
return PythonWrapperMtia()
| PythonWrapperMtia |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 12433,
"end": 14083
} | class ____:
_prefixes: Tuple[Tuple[DQLDMLClauseElement, str], ...] = ()
_has_prefixes_traverse_internals: _TraverseInternalsType = [
("_prefixes", InternalTraversal.dp_prefix_sequence)
]
@_generative
@_document_text_coercion(
"prefixes",
":meth:`_expression.HasPrefixes.prefix_with`",
":paramref:`.HasPrefixes.prefix_with.*prefixes`",
)
def prefix_with(
self,
*prefixes: _TextCoercedExpressionArgument[Any],
dialect: str = "*",
) -> Self:
r"""Add one or more expressions following the statement keyword, i.e.
SELECT, INSERT, UPDATE, or DELETE. Generative.
This is used to support backend-specific prefix keywords such as those
provided by MySQL.
E.g.::
stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql")
# MySQL 5.7 optimizer hints
stmt = select(table).prefix_with("/*+ BKA(t1) */", dialect="mysql")
Multiple prefixes can be specified by multiple calls
to :meth:`_expression.HasPrefixes.prefix_with`.
:param \*prefixes: textual or :class:`_expression.ClauseElement`
construct which
will be rendered following the INSERT, UPDATE, or DELETE
keyword.
:param dialect: optional string dialect name which will
limit rendering of this prefix to only that dialect.
"""
self._prefixes = self._prefixes + tuple(
[
(coercions.expect(roles.StatementOptionRole, p), dialect)
for p in prefixes
]
)
return self
| HasPrefixes |
python | sympy__sympy | sympy/physics/quantum/pauli.py | {
"start": 12966,
"end": 17307
} | class ____(Bra):
"""Bra for a two-level quantum system.
Parameters
==========
n : Number
The state number (0 or 1).
"""
def __new__(cls, n):
if n not in (0, 1):
raise ValueError("n must be 0 or 1")
return Bra.__new__(cls, n)
@property
def n(self):
return self.label[0]
@classmethod
def dual_class(self):
return SigmaZKet
def _qsimplify_pauli_product(a, b):
"""
Internal helper function for simplifying products of Pauli operators.
"""
if not (isinstance(a, SigmaOpBase) and isinstance(b, SigmaOpBase)):
return Mul(a, b)
if a.name != b.name:
# Pauli matrices with different labels commute; sort by name
if a.name < b.name:
return Mul(a, b)
else:
return Mul(b, a)
elif isinstance(a, SigmaX):
if isinstance(b, SigmaX):
return S.One
if isinstance(b, SigmaY):
return I * SigmaZ(a.name)
if isinstance(b, SigmaZ):
return - I * SigmaY(a.name)
if isinstance(b, SigmaMinus):
return (S.Half + SigmaZ(a.name)/2)
if isinstance(b, SigmaPlus):
return (S.Half - SigmaZ(a.name)/2)
elif isinstance(a, SigmaY):
if isinstance(b, SigmaX):
return - I * SigmaZ(a.name)
if isinstance(b, SigmaY):
return S.One
if isinstance(b, SigmaZ):
return I * SigmaX(a.name)
if isinstance(b, SigmaMinus):
return -I * (S.One + SigmaZ(a.name))/2
if isinstance(b, SigmaPlus):
return I * (S.One - SigmaZ(a.name))/2
elif isinstance(a, SigmaZ):
if isinstance(b, SigmaX):
return I * SigmaY(a.name)
if isinstance(b, SigmaY):
return - I * SigmaX(a.name)
if isinstance(b, SigmaZ):
return S.One
if isinstance(b, SigmaMinus):
return - SigmaMinus(a.name)
if isinstance(b, SigmaPlus):
return SigmaPlus(a.name)
elif isinstance(a, SigmaMinus):
if isinstance(b, SigmaX):
return (S.One - SigmaZ(a.name))/2
if isinstance(b, SigmaY):
return - I * (S.One - SigmaZ(a.name))/2
if isinstance(b, SigmaZ):
# (SigmaX(a.name) - I * SigmaY(a.name))/2
return SigmaMinus(b.name)
if isinstance(b, SigmaMinus):
return S.Zero
if isinstance(b, SigmaPlus):
return S.Half - SigmaZ(a.name)/2
elif isinstance(a, SigmaPlus):
if isinstance(b, SigmaX):
return (S.One + SigmaZ(a.name))/2
if isinstance(b, SigmaY):
return I * (S.One + SigmaZ(a.name))/2
if isinstance(b, SigmaZ):
#-(SigmaX(a.name) + I * SigmaY(a.name))/2
return -SigmaPlus(a.name)
if isinstance(b, SigmaMinus):
return (S.One + SigmaZ(a.name))/2
if isinstance(b, SigmaPlus):
return S.Zero
else:
return a * b
def qsimplify_pauli(e):
"""
Simplify an expression that includes products of pauli operators.
Parameters
==========
e : expression
An expression that contains products of Pauli operators that is
to be simplified.
Examples
========
>>> from sympy.physics.quantum.pauli import SigmaX, SigmaY
>>> from sympy.physics.quantum.pauli import qsimplify_pauli
>>> sx, sy = SigmaX(), SigmaY()
>>> sx * sy
SigmaX()*SigmaY()
>>> qsimplify_pauli(sx * sy)
I*SigmaZ()
"""
if isinstance(e, Operator):
return e
if isinstance(e, (Add, Pow, exp)):
t = type(e)
return t(*(qsimplify_pauli(arg) for arg in e.args))
if isinstance(e, Mul):
c, nc = e.args_cnc()
nc_s = []
while nc:
curr = nc.pop(0)
while (len(nc) and
isinstance(curr, SigmaOpBase) and
isinstance(nc[0], SigmaOpBase) and
curr.name == nc[0].name):
x = nc.pop(0)
y = _qsimplify_pauli_product(curr, x)
c1, nc1 = y.args_cnc()
curr = Mul(*nc1)
c = c + c1
nc_s.append(curr)
return Mul(*c) * Mul(*nc_s)
return e
| SigmaZBra |
python | huggingface__transformers | src/transformers/models/granitemoehybrid/modeling_granitemoehybrid.py | {
"start": 40740,
"end": 41439
} | class ____(torch.nn.Module):
def __init__(self, hidden_size, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states, gate=None):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
if gate is not None:
hidden_states = hidden_states * nn.functional.silu(gate.to(torch.float32))
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
| GraniteMoeHybridRMSNormGated |
python | getsentry__sentry | tests/sentry/incidents/models/test_incidents.py | {
"start": 10350,
"end": 10673
} | class ____(unittest.TestCase):
def test(self) -> None:
incident = Incident()
assert incident.current_end_date == timezone.now()
incident.date_closed = timezone.now() - timedelta(minutes=10)
assert incident.current_end_date == timezone.now() - timedelta(minutes=10)
| IncidentCurrentEndDateTest |
python | apache__airflow | task-sdk/tests/task_sdk/definitions/test_asset_decorators.py | {
"start": 2372,
"end": 7133
} | class ____:
def test_without_uri(self, example_asset_func):
asset_definition = asset(schedule=None)(example_asset_func)
assert asset_definition.name == "example_asset_func"
assert asset_definition.uri == "example_asset_func"
assert asset_definition.group == "asset"
assert asset_definition.extra == {}
assert asset_definition._function == example_asset_func
assert asset_definition._source.schedule is None
def test_with_uri(self, example_asset_func):
asset_definition = asset(schedule=None, uri="s3://bucket/object")(example_asset_func)
assert asset_definition.name == "example_asset_func"
assert asset_definition.uri == "s3://bucket/object"
assert asset_definition.group == "asset"
assert asset_definition.extra == {}
assert asset_definition._function == example_asset_func
assert asset_definition._source.schedule is None
def test_with_group_and_extra(self, example_asset_func):
asset_definition = asset(schedule=None, uri="s3://bucket/object", group="MLModel", extra={"k": "v"})(
example_asset_func
)
assert asset_definition.name == "example_asset_func"
assert asset_definition.uri == "s3://bucket/object"
assert asset_definition.group == "MLModel"
assert asset_definition.extra == {"k": "v"}
assert asset_definition._function == example_asset_func
assert asset_definition._source.schedule is None
def test_nested_function(self):
def root_func():
@asset(schedule=None)
def asset_func():
pass
with pytest.raises(ValueError, match="nested function not supported"):
root_func()
@pytest.mark.parametrize("func_fixer", ("self", "context"), indirect=True)
def test_with_invalid_asset_name(self, func_fixer):
@func_fixer
def example_asset_func():
pass
with pytest.raises(ValueError, match=f"prohibited name for asset: {func_fixer.fixed_name}"):
asset(schedule=None)(example_asset_func)
def test_with_star(self, func_fixer):
@func_fixer
def example_asset_func(*args):
pass
with pytest.raises(TypeError) as err:
asset(schedule=None)(example_asset_func)
assert err.value.args[0] == "wildcard '*args' is not supported in @asset"
def test_with_starstar(self, func_fixer):
@func_fixer
def example_asset_func(**kwargs):
pass
with pytest.raises(TypeError) as err:
asset(schedule=None)(example_asset_func)
assert err.value.args[0] == "wildcard '**kwargs' is not supported in @asset"
def test_with_posonly(self, func_fixer):
@func_fixer
def example_asset_func(self, /):
pass
with pytest.raises(TypeError) as err:
asset(schedule=None)(example_asset_func)
assert (
err.value.args[0]
== "positional-only argument 'self' without a default is not supported in @asset"
)
def test_with_task_decorator(self, func_fixer):
@task(retries=3)
@func_fixer
def _example_task_func():
return "This is example_task"
asset_definition = asset(name="asset", dag_id="dag", schedule=None)(_example_task_func)
assert asset_definition.name == "asset"
assert asset_definition._source.dag_id == "dag"
assert asset_definition._function == _example_task_func
def test_with_task_decorator_and_outlets(self, func_fixer):
@task(retries=3, outlets=Asset(name="a"))
@func_fixer
def _example_task_func():
return "This is example_task"
with pytest.raises(TypeError) as err:
asset(schedule=None)(_example_task_func)
assert err.value.args[0] == "@task decorator with 'outlets' argument is not supported in @asset"
@pytest.mark.parametrize(
("provided_uri", "expected_uri"),
[
pytest.param(None, "custom", id="default-uri"),
pytest.param("s3://bucket/object", "s3://bucket/object", id="custom-uri"),
],
)
def test_custom_name(self, example_asset_func, provided_uri, expected_uri):
asset_definition = asset(name="custom", uri=provided_uri, schedule=None)(example_asset_func)
assert asset_definition.name == "custom"
assert asset_definition.uri == expected_uri
def test_custom_dag_id(self, example_asset_func):
asset_definition = asset(name="asset", dag_id="dag", schedule=None)(example_asset_func)
assert asset_definition.name == "asset"
assert asset_definition._source.dag_id == "dag"
| TestAssetDecorator |
python | openai__gym | gym/envs/mujoco/humanoid.py | {
"start": 268,
"end": 2800
} | class ____(MuJocoPyEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 67,
}
def __init__(self, **kwargs):
observation_space = Box(
low=-np.inf, high=np.inf, shape=(376,), dtype=np.float64
)
MuJocoPyEnv.__init__(
self, "humanoid.xml", 5, observation_space=observation_space, **kwargs
)
utils.EzPickle.__init__(self, **kwargs)
def _get_obs(self):
data = self.sim.data
return np.concatenate(
[
data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat,
]
)
def step(self, a):
pos_before = mass_center(self.model, self.sim)
self.do_simulation(a, self.frame_skip)
pos_after = mass_center(self.model, self.sim)
alive_bonus = 5.0
data = self.sim.data
lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = 0.5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
qpos = self.sim.data.qpos
terminated = bool((qpos[2] < 1.0) or (qpos[2] > 2.0))
if self.render_mode == "human":
self.render()
return (
self._get_obs(),
reward,
terminated,
False,
dict(
reward_linvel=lin_vel_cost,
reward_quadctrl=-quad_ctrl_cost,
reward_alive=alive_bonus,
reward_impact=-quad_impact_cost,
),
)
def reset_model(self):
c = 0.01
self.set_state(
self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),
self.init_qvel
+ self.np_random.uniform(
low=-c,
high=c,
size=self.model.nv,
),
)
return self._get_obs()
def viewer_setup(self):
assert self.viewer is not None
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.lookat[2] = 2.0
self.viewer.cam.elevation = -20
| HumanoidEnv |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 45422,
"end": 46102
} | class ____(TestCase):
def test_valid_rounding(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2, rounding=ROUND_UP)
assert field.to_representation(Decimal('1.234')) == '1.24'
field = serializers.DecimalField(max_digits=4, decimal_places=2, rounding=ROUND_DOWN)
assert field.to_representation(Decimal('1.234')) == '1.23'
def test_invalid_rounding(self):
with pytest.raises(AssertionError) as excinfo:
serializers.DecimalField(max_digits=1, decimal_places=1, rounding='ROUND_UNKNOWN')
assert 'Invalid rounding option' in str(excinfo.value)
# Date & time serializers...
| TestRoundingDecimalField |
python | kamyu104__LeetCode-Solutions | Python/single-number-ii.py | {
"start": 858,
"end": 1106
} | class ____(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return (sum(set(nums)) * 3 - sum(nums)) / 2
# every element appears 4 times except for one with 2 times
| Solution4 |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/networks.py | {
"start": 1268,
"end": 7157
} | class ____(nn.Module):
ATTENTION_EMBEDDING_SIZE = 128 # The embedding size of attention is fixed
def __init__(
self,
observation_specs: List[ObservationSpec],
h_size: int,
vis_encode_type: EncoderType,
normalize: bool = False,
):
"""
Returns an ObservationEncoder that can process and encode a set of observations.
Will use an RSA if needed for variable length observations.
"""
super().__init__()
self.processors, self.embedding_sizes = ModelUtils.create_input_processors(
observation_specs,
h_size,
vis_encode_type,
self.ATTENTION_EMBEDDING_SIZE,
normalize=normalize,
)
self.rsa, self.x_self_encoder = ModelUtils.create_residual_self_attention(
self.processors, self.embedding_sizes, self.ATTENTION_EMBEDDING_SIZE
)
if self.rsa is not None:
total_enc_size = sum(self.embedding_sizes) + self.ATTENTION_EMBEDDING_SIZE
else:
total_enc_size = sum(self.embedding_sizes)
self.normalize = normalize
self._total_enc_size = total_enc_size
self._total_goal_enc_size = 0
self._goal_processor_indices: List[int] = []
for i in range(len(observation_specs)):
if observation_specs[i].observation_type == ObservationType.GOAL_SIGNAL:
self._total_goal_enc_size += self.embedding_sizes[i]
self._goal_processor_indices.append(i)
@property
def total_enc_size(self) -> int:
"""
Returns the total encoding size for this ObservationEncoder.
"""
return self._total_enc_size
@property
def total_goal_enc_size(self) -> int:
"""
Returns the total goal encoding size for this ObservationEncoder.
"""
return self._total_goal_enc_size
def update_normalization(self, buffer: AgentBuffer) -> None:
obs = ObsUtil.from_buffer(buffer, len(self.processors))
for vec_input, enc in zip(obs, self.processors):
if isinstance(enc, VectorInput):
enc.update_normalization(
torch.as_tensor(vec_input.to_ndarray(), device=default_device())
)
def copy_normalization(self, other_encoder: "ObservationEncoder") -> None:
if self.normalize:
for n1, n2 in zip(self.processors, other_encoder.processors):
if isinstance(n1, VectorInput) and isinstance(n2, VectorInput):
n1.copy_normalization(n2)
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
"""
Encode observations using a list of processors and an RSA.
:param inputs: List of Tensors corresponding to a set of obs.
"""
encodes = []
var_len_processor_inputs: List[Tuple[nn.Module, torch.Tensor]] = []
for idx, processor in enumerate(self.processors):
if not isinstance(processor, EntityEmbedding):
# The input can be encoded without having to process other inputs
obs_input = inputs[idx]
processed_obs = processor(obs_input)
encodes.append(processed_obs)
else:
var_len_processor_inputs.append((processor, inputs[idx]))
if len(encodes) != 0:
encoded_self = torch.cat(encodes, dim=1)
input_exist = True
else:
input_exist = False
if len(var_len_processor_inputs) > 0 and self.rsa is not None:
# Some inputs need to be processed with a variable length encoder
masks = get_zero_entities_mask([p_i[1] for p_i in var_len_processor_inputs])
embeddings: List[torch.Tensor] = []
processed_self = (
self.x_self_encoder(encoded_self)
if input_exist and self.x_self_encoder is not None
else None
)
for processor, var_len_input in var_len_processor_inputs:
embeddings.append(processor(processed_self, var_len_input))
qkv = torch.cat(embeddings, dim=1)
attention_embedding = self.rsa(qkv, masks)
if not input_exist:
encoded_self = torch.cat([attention_embedding], dim=1)
input_exist = True
else:
encoded_self = torch.cat([encoded_self, attention_embedding], dim=1)
if not input_exist:
raise UnityTrainerException(
"The trainer was unable to process any of the provided inputs. "
"Make sure the trained agents has at least one sensor attached to them."
)
return encoded_self
def get_goal_encoding(self, inputs: List[torch.Tensor]) -> torch.Tensor:
"""
Encode observations corresponding to goals using a list of processors.
:param inputs: List of Tensors corresponding to a set of obs.
"""
encodes = []
for idx in self._goal_processor_indices:
processor = self.processors[idx]
if not isinstance(processor, EntityEmbedding):
# The input can be encoded without having to process other inputs
obs_input = inputs[idx]
processed_obs = processor(obs_input)
encodes.append(processed_obs)
else:
raise UnityTrainerException(
"The one of the goals uses variable length observations. This use "
"case is not supported."
)
if len(encodes) != 0:
encoded = torch.cat(encodes, dim=1)
else:
raise UnityTrainerException(
"Trainer was unable to process any of the goals provided as input."
)
return encoded
| ObservationEncoder |
python | kamyu104__LeetCode-Solutions | Python/path-with-minimum-effort.py | {
"start": 95,
"end": 1303
} | class ____(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
dst = (len(heights)-1, len(heights[0])-1)
dist = [[float("inf")]*len(heights[0]) for _ in xrange(len(heights))]
dist[0][0] = 0
min_heap = [(0, 0, 0)]
lookup = [[False]*len(heights[0]) for _ in xrange(len(heights))]
while min_heap:
d, r, c = heapq.heappop(min_heap)
if lookup[r][c]:
continue
lookup[r][c] = True
if (r, c) == dst:
return d
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(heights) and 0 <= nc < len(heights[0]) and not lookup[nr][nc]):
continue
nd = max(d, abs(heights[nr][nc]-heights[r][c]))
if nd < dist[nr][nc]:
dist[nr][nc] = nd
heapq.heappush(min_heap, (nd, nr, nc))
return -1
# Time: O(m * n * log(m * n) + m * n * α(m * n)) = O(m * n * log(m * n))
# Space: O(m * n)
import collections
| Solution |
python | astropy__astropy | astropy/modeling/tests/test_fitting_parallel.py | {
"start": 12282,
"end": 20506
} | class ____:
def setup_method(self, method):
self.data = gaussian(np.arange(20), 2, 10, 1)
self.data = np.broadcast_to(self.data.reshape((20, 1)), (20, 3)).copy()
self.data_original = self.data.copy()
self.data[0, 0] = np.nan
self.model = Gaussian1D(amplitude=1.5, mean=12, stddev=1.5)
self.fitter = LevMarLSQFitter()
def test_error(self, tmp_path):
parallel_fit_dask(
data=self.data,
model=self.model,
fitter=self.fitter,
fitting_axes=0,
diagnostics="error",
diagnostics_path=tmp_path / "diag1",
scheduler="synchronous",
)
assert os.listdir(tmp_path / "diag1") == ["0"]
assert sorted(os.listdir(tmp_path / "diag1" / "0")) == ["error.log"]
def test_all(self, tmp_path):
parallel_fit_dask(
data=self.data,
model=self.model,
fitter=self.fitter,
fitting_axes=0,
diagnostics="all",
diagnostics_path=tmp_path / "diag2",
scheduler="synchronous",
)
assert sorted(os.listdir(tmp_path / "diag2")) == ["0", "1", "2"]
def test_all_world_wcs(self, tmp_path):
# Make sure things world also with world=wcs
parallel_fit_dask(
data=self.data,
model=self.model,
fitter=self.fitter,
fitting_axes=0,
world=WCS(naxis=2),
diagnostics="error",
diagnostics_path=tmp_path / "diag3",
scheduler="synchronous",
)
assert os.listdir(tmp_path / "diag3") == ["0"]
assert sorted(os.listdir(tmp_path / "diag3" / "0")) == ["error.log"]
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_callable(self, tmp_path):
# And check that we can pass in a callable
def custom_callable(path, world, data, weights, model, fitting_kwargs):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(world[0], data, "k.")
ax.text(0.1, 0.9, "Fit failed!", color="r", transform=ax.transAxes)
fig.savefig(os.path.join(path, "fit.png"))
plt.close(fig)
# Note: here we keep the default scheduler ('processes') to make sure
# that callables are passed correctly to other processes. This test is
# not as fast as other ones anyway due to the plotting so this doesn't
# have a big performance impact.
parallel_fit_dask(
data=self.data,
model=self.model,
fitter=self.fitter,
fitting_axes=0,
world=WCS(naxis=2),
diagnostics="error",
diagnostics_path=tmp_path / "diag4",
diagnostics_callable=custom_callable,
)
assert os.listdir(tmp_path / "diag4") == ["0"]
assert sorted(os.listdir(tmp_path / "diag4" / "0")) == ["error.log", "fit.png"]
def test_warnings(self, tmp_path):
# Check that catching warnings works
parallel_fit_dask(
data=self.data_original,
model=self.model,
fitter=self.fitter,
fitting_axes=0,
diagnostics="error+warn",
diagnostics_path=tmp_path / "diag5",
fitter_kwargs={"maxiter": 2},
scheduler="synchronous",
)
assert sorted(os.listdir(tmp_path / "diag5")) == ["0", "1", "2"]
assert sorted(os.listdir(tmp_path / "diag5" / "0")) == ["warn.log"]
def test_missing_path(self):
with pytest.raises(ValueError, match="diagnostics_path should be set"):
parallel_fit_dask(
data=self.data,
model=self.model,
fitter=self.fitter,
fitting_axes=0,
diagnostics="error",
)
def test_invalid(self):
with pytest.raises(
ValueError,
match=re.escape(
"diagnostics should be None, 'error', 'error+warn', or 'all'"
),
):
parallel_fit_dask(
data=self.data,
model=self.model,
fitter=self.fitter,
fitting_axes=0,
diagnostics="spam",
)
@pytest.mark.parametrize(
"scheduler", (None, "synchronous", "processes", "threads", "default")
)
def test_dask_scheduler(scheduler):
N = 120
P = 20
rng = np.random.default_rng(12345)
x = np.linspace(-5, 30, P)
amplitude = rng.uniform(1, 10, N)
mean = rng.uniform(0, 25, N)
stddev = rng.uniform(1, 4, N)
data = gaussian(x[:, None], amplitude, mean, stddev)
# At this point, the data has shape (P, N)
# Set initial parameters to be close to but not exactly equal to true parameters
model = Gaussian1D(
amplitude=amplitude * rng.random(N),
mean=mean + rng.random(N),
stddev=stddev + rng.random(N),
)
fitter = LevMarLSQFitter()
model_fit = parallel_fit_dask(
data=data,
model=model,
fitter=fitter,
fitting_axes=0,
world=(x,),
scheduler=scheduler,
)
# Check that shape and values match
assert_allclose(model_fit.amplitude.value, amplitude)
assert_allclose(model_fit.mean.value, mean)
assert_allclose(model_fit.stddev.value, stddev)
def test_compound_model():
# Compound models have to be treated a little differently so check they
# work fine.
data = gaussian(
np.arange(20)[:, None],
np.array([2, 1.8]),
np.array([10, 11]),
np.array([1, 1.1]),
)
data[:, 0] += 2
data[:, 1] += 3
model1 = Gaussian1D(amplitude=1.5, mean=1.2, stddev=0.15)
model2 = Const1D(1)
model = model1 + model2
fitter = LevMarLSQFitter()
wcs = WCS(naxis=2)
wcs.wcs.ctype = "OFFSET", "WAVE"
wcs.wcs.crval = 10, 0.1
wcs.wcs.crpix = 1, 1
wcs.wcs.cdelt = 10, 0.1
model_fit = parallel_fit_dask(
data=data,
model=model,
fitter=fitter,
fitting_axes=0,
world=wcs,
scheduler="synchronous",
)
assert_allclose(model_fit.amplitude_0.value, [2, 1.8])
assert_allclose(model_fit.mean_0.value, [1.1, 1.2])
assert_allclose(model_fit.stddev_0.value, [0.1, 0.11])
assert_allclose(model_fit.amplitude_1.value, [2, 3])
# Check that constraints work
model.amplitude_1 = 2
model.amplitude_1.fixed = True
model_fit = parallel_fit_dask(
data=data,
model=model,
fitter=fitter,
fitting_axes=0,
world=wcs,
scheduler="synchronous",
)
assert_allclose(model_fit.amplitude_0.value, [2, 1.633], atol=0.001)
assert_allclose(model_fit.mean_0.value, [1.1, 1.145], atol=0.001)
assert_allclose(model_fit.stddev_0.value, [0.1, 0.736], atol=0.001)
assert_allclose(model_fit.amplitude_1.value, [2, 2], atol=0.001)
def test_model_dimension_mismatch():
model = Planar2D()
data = np.empty((20, 10, 5))
fitter = LevMarLSQFitter()
with pytest.raises(
ValueError,
match=re.escape("Model is 2-dimensional, but got 1 value(s) in fitting_axes="),
):
parallel_fit_dask(
data=data,
model=model,
fitter=fitter,
fitting_axes=0,
)
model = Linear1D()
data = np.empty((20, 10, 5))
fitter = LevMarLSQFitter()
with pytest.raises(
ValueError,
match=re.escape("Model is 1-dimensional, but got 2 value(s) in fitting_axes="),
):
parallel_fit_dask(
data=data,
model=model,
fitter=fitter,
fitting_axes=(1, 2),
)
def test_data_dimension_mismatch():
model = Planar2D()
data = np.empty((20, 10, 5))
fitter = LevMarLSQFitter()
with pytest.raises(
ValueError,
match=re.escape("Fitting index 4 out of range for 3-dimensional data"),
):
parallel_fit_dask(
data=data,
model=model,
fitter=fitter,
fitting_axes=(1, 4),
)
| TestDiagnostics |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 22417,
"end": 22690
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = (
"BLUE_MINT",
"BLUE_PURPLE",
"PINK_BLUE",
"PURPLE_CORAL",
"RED_ORANGE",
)
| PinnedDiscussionGradient |
python | huggingface__transformers | tests/models/owlv2/test_modeling_owlv2.py | {
"start": 12771,
"end": 15464
} | class ____:
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = Owlv2TextModelTester(parent, **text_kwargs)
self.vision_model_tester = Owlv2VisionModelTester(parent, **vision_kwargs)
self.is_training = is_training
self.text_config = self.text_model_tester.get_config().to_dict()
self.vision_config = self.vision_model_tester.get_config().to_dict()
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, pixel_values
def get_config(self):
return Owlv2Config(
text_config=self.text_config,
vision_config=self.vision_config,
projection_dim=64,
)
def create_and_check_model(self, config, input_ids, attention_mask, pixel_values):
model = Owlv2Model(config).to(torch_device).eval()
with torch.no_grad():
result = model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
)
image_logits_size = (
self.vision_model_tester.batch_size,
self.text_model_tester.batch_size * self.text_model_tester.num_queries,
)
text_logits_size = (
self.text_model_tester.batch_size * self.text_model_tester.num_queries,
self.vision_model_tester.batch_size,
)
self.parent.assertEqual(result.logits_per_image.shape, image_logits_size)
self.parent.assertEqual(result.logits_per_text.shape, text_logits_size)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"return_loss": False,
}
return config, inputs_dict
@require_torch
# Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTModelTest with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2, owlvit-base-patch32->owlv2-base-patch16-ensemble
| Owlv2ModelTester |
python | walkccc__LeetCode | solutions/2645. Minimum Additions to Make Valid String/2645.py | {
"start": 0,
"end": 269
} | class ____:
def addMinimum(self, word: str) -> int:
letters = ['a', 'b', 'c']
ans = 0
i = 0
while i < len(word):
for c in letters:
if i < len(word) and word[i] == c:
i += 1
else:
ans += 1
return ans
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 31266,
"end": 32125
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, token: Optional[str] = None, start_date: Optional[str] = None):
"""Airbyte Source for Insightly.
Documentation can be found at https://docs.airbyte.com/integrations/sources/insightly
Args:
name (str): The name of the destination.
token (Optional[str]): Your Insightly API token.
start_date (Optional[str]): The date from which you'd like to replicate data for Insightly in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only for incremental streams.
"""
self.token = check.opt_str_param(token, "token")
self.start_date = check.opt_str_param(start_date, "start_date")
super().__init__("Insightly", name)
| InsightlySource |
python | readthedocs__readthedocs.org | readthedocs/config/models.py | {
"start": 864,
"end": 1099
} | class ____(ConfigBaseModel):
"""Object used for `build.jobs.build` key."""
html: list[str] | None = None
pdf: list[str] | None = None
epub: list[str] | None = None
htmlzip: list[str] | None = None
| BuildJobsBuildTypes |
python | kamyu104__LeetCode-Solutions | Python/last-substring-in-lexicographical-order.py | {
"start": 29,
"end": 557
} | class ____(object):
def lastSubstring(self, s):
"""
:type s: str
:rtype: str
"""
left, right, l = 0, 1, 0
while right+l < len(s):
if s[left+l] == s[right+l]:
l += 1
continue
if s[left+l] > s[right+l]:
right += l+1
else:
left = max(right, left+l+1)
right = left+1
l = 0
return s[left:]
# Time: O(n)
# Space: O(n)
import collections
| Solution |
python | run-llama__llama_index | llama-index-core/tests/conftest.py | {
"start": 3151,
"end": 5557
} | class ____:
"""
Saves the users' OpenAI API key and OpenAI API type either in
the environment variable or set to the library itself.
This allows us to run tests by setting it without plowing over
the local environment.
"""
def __init__(
self,
set_env_key_to: Optional[str] = "",
set_library_key_to: Optional[str] = None,
set_fake_key: bool = False,
set_env_type_to: Optional[str] = "",
set_library_type_to: str = "open_ai", # default value in openai package
):
self.set_env_key_to = set_env_key_to
self.set_library_key_to = set_library_key_to
self.set_fake_key = set_fake_key
self.set_env_type_to = set_env_type_to
self.set_library_type_to = set_library_type_to
def __enter__(self) -> None:
self.api_env_variable_was = os.environ.get("OPENAI_API_KEY", "")
self.api_env_type_was = os.environ.get("OPENAI_API_TYPE", "")
self.openai_api_key_was = openai.api_key
self.openai_api_type_was = openai.api_type
os.environ["OPENAI_API_KEY"] = str(self.set_env_key_to)
os.environ["OPENAI_API_TYPE"] = str(self.set_env_type_to)
if self.set_fake_key:
os.environ["OPENAI_API_KEY"] = "sk-" + "a" * 48
# No matter what, set the environment variable back to what it was
def __exit__(self, *exc: object) -> None:
os.environ["OPENAI_API_KEY"] = str(self.api_env_variable_was)
os.environ["OPENAI_API_TYPE"] = str(self.api_env_type_was)
openai.api_key = self.openai_api_key_was
openai.api_type = self.openai_api_type_was
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--integration",
action="store_true",
default=False,
help="run integration tests",
)
def pytest_configure(config: pytest.Config) -> None:
config.addinivalue_line("markers", "integration: mark test as integration")
def pytest_collection_modifyitems(
config: pytest.Config, items: List[pytest.Item]
) -> None:
if config.getoption("--integration"):
# --integration given in cli: do not skip integration tests
return
skip_integration = pytest.mark.skip(reason="need --integration option to run")
for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integration)
| CachedOpenAIApiKeys |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 5626,
"end": 7181
} | class ____(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ip link set up vcan0
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
| SocketCANTest |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/components.py | {
"start": 1217,
"end": 1558
} | class ____(UserComponent):
def __init__(self, component_id):
self._non_existing_comp_id = component_id
def update(self, *args, **kwargs):
msg = "Component with id %s doesn't exist. No updates will be made at anytime during runtime."
_warning_with_component(self, msg % self._non_existing_comp_id)
| StubComponent |
python | django__django | tests/forms_tests/tests/test_formsets.py | {
"start": 1581,
"end": 1750
} | class ____(Form):
def __init__(self, *args, custom_kwarg, **kwargs):
self.custom_kwarg = custom_kwarg
super().__init__(*args, **kwargs)
| CustomKwargForm |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 5587,
"end": 5698
} | class ____(InvalidRequestFatalError):
description = 'Invalid client_id parameter value.'
| InvalidClientIdError |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 13098,
"end": 13285
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("COMPLETED", "NOT_PLANNED", "REOPENED")
| IssueStateReason |
python | tornadoweb__tornado | tornado/test/tcpclient_test.py | {
"start": 5792,
"end": 6387
} | class ____(unittest.TestCase):
def test_one_family(self):
# These addresses aren't in the right format, but split doesn't care.
primary, secondary = _Connector.split([(AF1, "a"), (AF1, "b")])
self.assertEqual(primary, [(AF1, "a"), (AF1, "b")])
self.assertEqual(secondary, [])
def test_mixed(self):
primary, secondary = _Connector.split(
[(AF1, "a"), (AF2, "b"), (AF1, "c"), (AF2, "d")]
)
self.assertEqual(primary, [(AF1, "a"), (AF1, "c")])
self.assertEqual(secondary, [(AF2, "b"), (AF2, "d")])
| TestConnectorSplit |
python | pypa__pip | tests/unit/test_utils.py | {
"start": 13885,
"end": 17223
} | class ____:
"""Tests for pip._internal.utils.hashes"""
@pytest.mark.parametrize(
"hash_name, hex_digest, expected",
[
# Test a value that matches but with the wrong hash_name.
("sha384", 128 * "a", False),
# Test matching values, including values other than the first.
("sha512", 128 * "a", True),
("sha512", 128 * "b", True),
# Test a matching hash_name with a value that doesn't match.
("sha512", 128 * "c", False),
],
)
def test_is_hash_allowed(
self, hash_name: str, hex_digest: str, expected: bool
) -> None:
hashes_data = {
"sha512": [128 * "a", 128 * "b"],
}
hashes = Hashes(hashes_data)
assert hashes.is_hash_allowed(hash_name, hex_digest) == expected
def test_success(self, tmpdir: Path) -> None:
"""Make sure no error is raised when at least one hash matches.
Test check_against_path because it calls everything else.
"""
file = tmpdir / "to_hash"
file.write_text("hello")
hashes = Hashes(
{
"sha256": [
"2cf24dba5fb0a30e26e83b2ac5b9e29e"
"1b161e5c1fa7425e73043362938b9824"
],
"sha224": ["wrongwrong"],
"md5": ["5d41402abc4b2a76b9719d911017c592"],
}
)
hashes.check_against_path(os.fspath(file))
def test_failure(self) -> None:
"""Hashes should raise HashMismatch when no hashes match."""
hashes = Hashes({"sha256": ["wrongwrong"]})
with pytest.raises(HashMismatch):
hashes.check_against_file(BytesIO(b"hello"))
def test_missing_hashes(self) -> None:
"""MissingHashes should raise HashMissing when any check is done."""
with pytest.raises(HashMissing):
MissingHashes().check_against_file(BytesIO(b"hello"))
def test_unknown_hash(self) -> None:
"""Hashes should raise InstallationError when it encounters an unknown
hash."""
hashes = Hashes({"badbad": ["dummy"]})
with pytest.raises(InstallationError):
hashes.check_against_file(BytesIO(b"hello"))
def test_non_zero(self) -> None:
"""Test that truthiness tests tell whether any known-good hashes
exist."""
assert Hashes({"sha256": ["dummy"]})
assert not Hashes()
assert not Hashes({})
def test_equality(self) -> None:
assert Hashes() == Hashes()
assert Hashes({"sha256": ["abcd"]}) == Hashes({"sha256": ["abcd"]})
assert Hashes({"sha256": ["ab", "cd"]}) == Hashes({"sha256": ["cd", "ab"]})
def test_hash(self) -> None:
cache = {}
cache[Hashes({"sha256": ["ab", "cd"]})] = 42
assert cache[Hashes({"sha256": ["ab", "cd"]})] == 42
def test_has_one_of(self) -> None:
hashes = Hashes({"sha256": ["abcd", "efgh"], "sha384": ["ijkl"]})
assert hashes.has_one_of({"sha256": "abcd"})
assert hashes.has_one_of({"sha256": "efgh"})
assert not hashes.has_one_of({"sha256": "xyzt"})
empty_hashes = Hashes()
assert not empty_hashes.has_one_of({"sha256": "xyzt"})
def raises(error: type[Exception]) -> NoReturn:
raise error
| TestHashes |
python | pypa__warehouse | tests/unit/integration/test_package.py | {
"start": 879,
"end": 1432
} | class ____:
def test_unimplemented(self, metrics):
cache = integrations.PublicKeysCache(cache_time=10)
payload_verifier = integrations.PayloadVerifier(
metrics=metrics, public_keys_cache=cache
)
with pytest.raises(NotImplementedError):
payload_verifier.metric_name
with pytest.raises(NotImplementedError):
payload_verifier.retrieve_public_key_payload()
with pytest.raises(NotImplementedError):
payload_verifier.extract_public_keys({})
| TestPayloadVerifier |
python | huggingface__transformers | src/transformers/models/openai/modeling_openai.py | {
"start": 26565,
"end": 31573
} | class ____(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = OpenAIGPTModel(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
# Ensure the batch size is > 1 if there is no padding.
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=pooled_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
__all__ = [
"OpenAIGPTDoubleHeadsModel",
"OpenAIGPTForSequenceClassification",
"OpenAIGPTLMHeadModel",
"OpenAIGPTModel",
"OpenAIGPTPreTrainedModel",
]
| OpenAIGPTForSequenceClassification |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/transfers/presto_to_gcs.py | {
"start": 1128,
"end": 4957
} | class ____:
"""
An adapter that adds additional feature to the Presto cursor.
The implementation of cursor in the prestodb library is not sufficient.
The following changes have been made:
* The poke mechanism for row. You can look at the next row without consuming it.
* The description attribute is available before reading the first row. Thanks to the poke mechanism.
* the iterator interface has been implemented.
A detailed description of the class methods is available in
`PEP-249 <https://www.python.org/dev/peps/pep-0249/>`__.
"""
def __init__(self, cursor: PrestoCursor):
self.cursor: PrestoCursor = cursor
self.rows: list[Any] = []
self.initialized: bool = False
@property
def description(self) -> list[tuple]:
"""
This read-only attribute is a sequence of 7-item sequences.
Each of these sequences contains information describing one result column:
* ``name``
* ``type_code``
* ``display_size``
* ``internal_size``
* ``precision``
* ``scale``
* ``null_ok``
The first two items (``name`` and ``type_code``) are mandatory, the other
five are optional and are set to None if no meaningful values can be provided.
"""
if not self.initialized:
# Peek for first row to load description.
self.peekone()
return self.cursor.description
@property
def rowcount(self) -> int:
"""The read-only attribute specifies the number of rows."""
return self.cursor.rowcount
def close(self) -> None:
"""Close the cursor now."""
self.cursor.close()
def execute(self, *args, **kwargs) -> PrestoResult:
"""Prepare and execute a database operation (query or command)."""
self.initialized = False
self.rows = []
return self.cursor.execute(*args, **kwargs)
def executemany(self, *args, **kwargs):
"""
Prepare and execute a database operation.
Prepare a database operation (query or command) and then execute it against
all parameter sequences or mappings found in the sequence seq_of_parameters.
"""
self.initialized = False
self.rows = []
return self.cursor.executemany(*args, **kwargs)
def peekone(self) -> Any:
"""Return the next row without consuming it."""
self.initialized = True
element = self.cursor.fetchone()
self.rows.insert(0, element)
return element
def fetchone(self) -> Any:
"""Fetch the next row of a query result set, returning a single sequence, or ``None``."""
if self.rows:
return self.rows.pop(0)
return self.cursor.fetchone()
def fetchmany(self, size=None) -> list:
"""
Fetch the next set of rows of a query result, returning a sequence of sequences.
An empty sequence is returned when no more rows are available.
"""
if size is None:
size = self.cursor.arraysize
result = []
for _ in range(size):
row = self.fetchone()
if row is None:
break
result.append(row)
return result
def __next__(self) -> Any:
"""
Return the next row from the current SQL statement using the same semantics as ``.fetchone()``.
A ``StopIteration`` exception is raised when the result set is exhausted.
"""
result = self.fetchone()
if result is None:
raise StopIteration()
return result
def __iter__(self) -> _PrestoToGCSPrestoCursorAdapter:
"""Return self to make cursors compatible to the iteration protocol."""
return self
| _PrestoToGCSPrestoCursorAdapter |
python | walkccc__LeetCode | solutions/2586. Count the Number of Vowel Strings in Range/2586.py | {
"start": 0,
"end": 217
} | class ____:
def vowelStrings(self, words: list[str], left: int, right: int) -> int:
VOWELS = 'aeiou'
return sum(word[0] in VOWELS and word[-1] in VOWELS
for word in words[left:right + 1])
| Solution |
python | django__django | tests/forms_tests/tests/test_forms.py | {
"start": 1228,
"end": 1369
} | class ____(Form):
name = CharField()
language = ChoiceField(choices=[("P", "Python"), ("J", "Java")], widget=RadioSelect)
| FrameworkForm |
python | ansible__ansible | lib/ansible/module_utils/facts/collector.py | {
"start": 2249,
"end": 14687
} | class ____:
_fact_ids = set() # type: t.Set[str]
_platform = 'Generic'
name = None # type: str | None
required_facts = set() # type: t.Set[str]
def __init__(self, collectors=None, namespace=None):
"""Base class for things that collect facts.
'collectors' is an optional list of other FactCollectors for composing."""
self.collectors = collectors or []
# self.namespace is a object with a 'transform' method that transforms
# the name to indicate the namespace (ie, adds a prefix or suffix).
self.namespace = namespace
self.fact_ids = set([self.name])
self.fact_ids.update(self._fact_ids)
@classmethod
def platform_match(cls, platform_info):
if platform_info.get('system', None) == cls._platform:
return cls
return None
def _transform_name(self, key_name):
if self.namespace:
return self.namespace.transform(key_name)
return key_name
def _transform_dict_keys(self, fact_dict):
"""update a dicts keys to use new names as transformed by self._transform_name"""
if fact_dict is None:
return {}
for old_key in list(fact_dict.keys()):
new_key = self._transform_name(old_key)
# pop the item by old_key and replace it using new_key
fact_dict[new_key] = fact_dict.pop(old_key)
return fact_dict
# TODO/MAYBE: rename to 'collect' and add 'collect_without_namespace'
def collect_with_namespace(self, module=None, collected_facts=None):
# collect, then transform the key names if needed
facts_dict = self.collect(module=module, collected_facts=collected_facts)
if self.namespace:
facts_dict = self._transform_dict_keys(facts_dict)
return facts_dict
def collect(self, module=None, collected_facts=None):
"""do the fact collection
'collected_facts' is a object (a dict, likely) that holds all previously
facts. This is intended to be used if a FactCollector needs to reference
another fact (for ex, the system arch) and should not be modified (usually).
Returns a dict of facts.
"""
facts_dict = {}
return facts_dict
def get_collector_names(valid_subsets=None,
minimal_gather_subset=None,
gather_subset=None,
aliases_map=None,
platform_info=None):
"""return a set of FactCollector names based on gather_subset spec.
gather_subset is a spec describing which facts to gather.
valid_subsets is a frozenset of potential matches for gather_subset ('all', 'network') etc
minimal_gather_subsets is a frozenset of matches to always use, even for gather_subset='!all'
"""
# Retrieve module parameters
gather_subset = gather_subset or ['all']
# the list of everything that 'all' expands to
valid_subsets = valid_subsets or frozenset()
# if provided, minimal_gather_subset is always added, even after all negations
minimal_gather_subset = minimal_gather_subset or frozenset()
aliases_map = aliases_map or defaultdict(set)
# Retrieve all facts elements
additional_subsets = set()
exclude_subsets = set()
# total always starts with the min set, then
# adds of the additions in gather_subset, then
# excludes all of the excludes, then add any explicitly
# requested subsets.
gather_subset_with_min = ['min']
gather_subset_with_min.extend(gather_subset)
# subsets we mention in gather_subset explicitly, except for 'all'/'min'
explicitly_added = set()
for subset in gather_subset_with_min:
subset_id = subset
if subset_id == 'min':
additional_subsets.update(minimal_gather_subset)
continue
if subset_id == 'all':
additional_subsets.update(valid_subsets)
continue
if subset_id.startswith('!'):
subset = subset[1:]
if subset == 'min':
exclude_subsets.update(minimal_gather_subset)
continue
if subset == 'all':
exclude_subsets.update(valid_subsets - minimal_gather_subset)
continue
exclude = True
else:
exclude = False
if exclude:
# include 'devices', 'dmi' etc for '!hardware'
exclude_subsets.update(aliases_map.get(subset, set()))
exclude_subsets.add(subset)
else:
# NOTE: this only considers adding an unknown gather subsetup an error. Asking to
# exclude an unknown gather subset is ignored.
if subset_id not in valid_subsets:
raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" %
(subset, ", ".join(sorted(valid_subsets))))
explicitly_added.add(subset)
additional_subsets.add(subset)
if not additional_subsets:
additional_subsets.update(valid_subsets)
additional_subsets.difference_update(exclude_subsets - explicitly_added)
return additional_subsets
def find_collectors_for_platform(all_collector_classes, compat_platforms):
found_collectors = set()
found_collectors_names = set()
# start from specific platform, then try generic
for compat_platform in compat_platforms:
platform_match = None
for all_collector_class in all_collector_classes:
# ask the class if it is compatible with the platform info
platform_match = all_collector_class.platform_match(compat_platform)
if not platform_match:
continue
primary_name = all_collector_class.name
if primary_name not in found_collectors_names:
found_collectors.add(all_collector_class)
found_collectors_names.add(all_collector_class.name)
return found_collectors
def build_fact_id_to_collector_map(collectors_for_platform):
fact_id_to_collector_map = defaultdict(list)
aliases_map = defaultdict(set)
for collector_class in collectors_for_platform:
primary_name = collector_class.name
fact_id_to_collector_map[primary_name].append(collector_class)
for fact_id in collector_class._fact_ids:
fact_id_to_collector_map[fact_id].append(collector_class)
aliases_map[primary_name].add(fact_id)
return fact_id_to_collector_map, aliases_map
def select_collector_classes(collector_names, all_fact_subsets):
seen_collector_classes = set()
selected_collector_classes = []
for collector_name in collector_names:
collector_classes = all_fact_subsets.get(collector_name, [])
for collector_class in collector_classes:
if collector_class not in seen_collector_classes:
selected_collector_classes.append(collector_class)
seen_collector_classes.add(collector_class)
return selected_collector_classes
def _get_requires_by_collector_name(collector_name, all_fact_subsets):
required_facts = set()
try:
collector_classes = all_fact_subsets[collector_name]
except KeyError:
raise CollectorNotFoundError('Fact collector "%s" not found' % collector_name)
for collector_class in collector_classes:
required_facts.update(collector_class.required_facts)
return required_facts
def find_unresolved_requires(collector_names, all_fact_subsets):
"""Find any collector names that have unresolved requires
Returns a list of collector names that correspond to collector
classes whose .requires_facts() are not in collector_names.
"""
unresolved = set()
for collector_name in collector_names:
required_facts = _get_requires_by_collector_name(collector_name, all_fact_subsets)
for required_fact in required_facts:
if required_fact not in collector_names:
unresolved.add(required_fact)
return unresolved
def resolve_requires(unresolved_requires, all_fact_subsets):
new_names = set()
failed = []
for unresolved in unresolved_requires:
if unresolved in all_fact_subsets:
new_names.add(unresolved)
else:
failed.append(unresolved)
if failed:
raise UnresolvedFactDep('unresolved fact dep %s' % ','.join(failed))
return new_names
def build_dep_data(collector_names, all_fact_subsets):
dep_map = defaultdict(set)
for collector_name in collector_names:
collector_deps = set()
for collector in all_fact_subsets[collector_name]:
for dep in collector.required_facts:
collector_deps.add(dep)
dep_map[collector_name] = collector_deps
return dep_map
def tsort(dep_map):
sorted_list = []
unsorted_map = dep_map.copy()
while unsorted_map:
acyclic = False
for node, edges in list(unsorted_map.items()):
for edge in edges:
if edge in unsorted_map:
break
else:
acyclic = True
del unsorted_map[node]
sorted_list.append((node, edges))
if not acyclic:
raise CycleFoundInFactDeps('Unable to tsort deps, there was a cycle in the graph. sorted=%s' % sorted_list)
return sorted_list
def _solve_deps(collector_names, all_fact_subsets):
unresolved = collector_names.copy()
solutions = collector_names.copy()
while True:
unresolved = find_unresolved_requires(solutions, all_fact_subsets)
if unresolved == set():
break
new_names = resolve_requires(unresolved, all_fact_subsets)
solutions.update(new_names)
return solutions
def collector_classes_from_gather_subset(all_collector_classes=None,
valid_subsets=None,
minimal_gather_subset=None,
gather_subset=None,
gather_timeout=None,
platform_info=None):
"""return a list of collector classes that match the args"""
# use gather_name etc to get the list of collectors
all_collector_classes = all_collector_classes or []
minimal_gather_subset = minimal_gather_subset or frozenset()
platform_info = platform_info or {'system': platform.system()}
gather_timeout = gather_timeout or timeout.DEFAULT_GATHER_TIMEOUT
# tweak the modules GATHER_TIMEOUT
timeout.GATHER_TIMEOUT = gather_timeout
valid_subsets = valid_subsets or frozenset()
# maps alias names like 'hardware' to the list of names that are part of hardware
# like 'devices' and 'dmi'
aliases_map = defaultdict(set)
compat_platforms = [platform_info, {'system': 'Generic'}]
collectors_for_platform = find_collectors_for_platform(all_collector_classes, compat_platforms)
# all_facts_subsets maps the subset name ('hardware') to the class that provides it.
# TODO: name collisions here? are there facts with the same name as a gather_subset (all, network, hardware, virtual, ohai, facter)
all_fact_subsets, aliases_map = build_fact_id_to_collector_map(collectors_for_platform)
all_valid_subsets = frozenset(all_fact_subsets.keys())
# expand any fact_id/collectorname/gather_subset term ('all', 'env', etc) to the list of names that represents
collector_names = get_collector_names(valid_subsets=all_valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=gather_subset,
aliases_map=aliases_map,
platform_info=platform_info)
complete_collector_names = _solve_deps(collector_names, all_fact_subsets)
dep_map = build_dep_data(complete_collector_names, all_fact_subsets)
ordered_deps = tsort(dep_map)
ordered_collector_names = [x[0] for x in ordered_deps]
selected_collector_classes = select_collector_classes(ordered_collector_names,
all_fact_subsets)
return selected_collector_classes
| BaseFactCollector |
python | facebook__pyre-check | pyre_extensions/tests/safe_json_test.py | {
"start": 806,
"end": 8519
} | class ____(unittest.TestCase):
def _assert_loads(self, input: str, target_type: Type[T], output: T) -> None:
self.assertEqual(
safe_json.loads(
input,
target_type,
),
output,
)
def _assert_loads_fails(self, input: str, target_type: Type[T]) -> None:
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads(
input,
target_type,
)
def test_loads(self) -> None:
# Primitives.
self.assertEqual(safe_json.loads("1", int), 1)
self.assertEqual(safe_json.loads("true", bool), True)
self.assertEqual(safe_json.loads("1.1", float), 1.1)
self.assertEqual(safe_json.loads('"string"', str), "string")
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("1", bool)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("1", float)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("1", str)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("true", float)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("true", str)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("1.1", int)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("1.1", bool)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("1.1", str)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("hello", int)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("hello", bool)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("hello", float)
# Lists.
self.assertEqual(safe_json.loads("[]", List[int]), [])
self.assertEqual(safe_json.loads("[1]", List[int]), [1])
self.assertEqual(safe_json.loads("[1, 2]", List[int]), [1, 2])
self.assertEqual(
safe_json.loads('[{"1": 1}]', List[Dict[str, int]]), [{"1": 1}]
)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads("[1, 'string']", List[int])
# Dictionaries.
self.assertEqual(safe_json.loads("{}", Dict[int, str]), {})
self.assertEqual(safe_json.loads('{"1": 1}', Dict[str, int]), {"1": 1})
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads('{"1": "string"}', Dict[str, int])
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads('{"1": 1, "2": "2"}', Dict[str, int])
self.assertEqual(
safe_json.loads('{"1": {"2": 3}}', Dict[str, Dict[str, int]]),
{"1": {"2": 3}},
)
# Typed dictionaries.
self.assertEqual(
safe_json.loads('{"name": "The Matrix", "year": 1999}', Movie),
{"name": "The Matrix", "year": 1999},
)
with self.assertRaises(safe_json.InvalidJson):
safe_json.loads('{"name": "The Matrix", "year": ""}', Movie)
# Any.
self.assertEqual(safe_json.loads("[1]", List[Any]), [1])
self.assertEqual(safe_json.loads('[{"1": 1}]', List[Any]), [{"1": 1}])
# Optionals.
self.assertEqual(safe_json.loads("[1]", List[Optional[int]]), [1])
self.assertEqual(safe_json.loads("[null, 2]", List[Optional[int]]), [None, 2])
# Validation can be turned off.
self.assertEqual(safe_json.loads("[1]", List[str], validate=False), [1])
def test_validate(self) -> None:
# Lists.
parsedListStr = ["1", "2"]
self.assertEqual(safe_json.validate(parsedListStr, List[str]), parsedListStr)
with self.assertRaises(safe_json.InvalidJson):
safe_json.validate(parsedListStr, List[int])
# Dictionaries.
parsedDictBasic = {"1": 1}
self.assertEqual(
safe_json.validate(parsedDictBasic, Dict[str, int]), parsedDictBasic
)
with self.assertRaises(safe_json.InvalidJson):
safe_json.validate(parsedDictBasic, List[Any])
parsedDictNested = {"1": {"2": 3}}
self.assertEqual(
safe_json.validate(parsedDictNested, Dict[str, Dict[str, int]]),
parsedDictNested,
)
with self.assertRaises(safe_json.InvalidJson):
safe_json.validate(parsedDictNested, Dict[str, int])
# Typed dictionaries.
parsedDictTyped = {"name": "The Matrix", "year": 1999}
parsedDictTypedFailing = {"name": "The Matrix", "year": ""}
self.assertEqual(safe_json.validate(parsedDictTyped, Movie), parsedDictTyped)
with self.assertRaises(safe_json.InvalidJson):
safe_json.validate(parsedDictTypedFailing, Movie)
# Any.
parsedAny = [{"1": 1}]
self.assertEqual(safe_json.validate(parsedAny, List[Any]), parsedAny)
# Optionals.
parsedOptionals = [2, None, 4]
self.assertEqual(
safe_json.validate(parsedOptionals, List[Optional[int]]), parsedOptionals
)
def test_load(self) -> None:
f = StringIO('{"1": {"2": 3}}')
self.assertEqual(safe_json.load(f, Dict[str, Dict[str, int]]), {"1": {"2": 3}})
with self.assertRaises(safe_json.InvalidJson):
safe_json.load(f, Dict[int, Dict[int, int]])
def test_loads_typed_dictionary(self) -> None:
# Field that is not present in the TypedDict.
self._assert_loads(
'{"name": "The Matrix Reloaded", "year": 1999, "extra_field": "hello"}',
Movie,
{"name": "The Matrix Reloaded", "year": 1999, "extra_field": "hello"},
)
# TypedDict inheriting from another.
self._assert_loads(
'{"name": "The Matrix", "year": 1999, "rating": 9.0}',
MovieWithRating,
{"name": "The Matrix", "year": 1999, "rating": 9.0},
)
self._assert_loads_fails(
'{"name": "The Matrix", "year": 1999, "rating": "not a float"}',
MovieWithRating,
)
# TypedDict with a field accepting an arbitrary dictionary.
self._assert_loads(
'{"name": "The Matrix", "year": 1999,'
+ ' "dictionary": {"foo": "bar", "baz": {}}}',
MovieWithArbitraryDictionary,
{
"name": "The Matrix",
"year": 1999,
"dictionary": {"foo": "bar", "baz": {}},
},
)
self._assert_loads_fails(
'{"name": "The Matrix", "year": 1999, "dictionary": [1, 2]}',
MovieWithArbitraryDictionary,
)
# TODO(T92804673): Unions are not supported.
self._assert_loads_fails(
'{"name": "The Matrix", "year": 1999, "int_or_str": 1}',
MovieWithUnion,
)
self._assert_loads(
'{"name": "The Matrix", "year": 1999, "not_required": "hello"}',
MovieWithNonRequiredField,
{"name": "The Matrix", "year": 1999, "not_required": "hello"},
)
# TODO(T92805077): Missing non-required field should not be an error.
self._assert_loads_fails(
'{"name": "The Matrix", "year": 1999}',
MovieWithNonRequiredField,
)
# `typing_extensions.TypedDict` should also work
self._assert_loads(
'{"name": "The Matrix", "year": 1999}',
MovieAlternative,
{"name": "The Matrix", "year": 1999},
)
if __name__ == "__main__":
unittest.main()
| BasicTestCase |
python | google__jax | jax/_src/stages.py | {
"start": 35651,
"end": 36741
} | class ____:
da: Sequence[xc.Device]
m_type: MismatchType
source_info: SourceInfo | None
@property
def device_ids(self) -> Sequence[int]:
return [d.id for d in self.da]
@property
def platform(self) -> str:
return self.da[0].platform.upper()
def _maybe_api_name(self, api_name) -> str:
return f" {api_name}'s" if self.m_type == MismatchType.CONTEXT_DEVICES else ""
@property
def source_info_str(self):
return (
"" if self.source_info is None
else f" at {source_info_util.summarize(self.source_info.source_info)}"
)
@property
def _dev_ids_plat_str(self):
return f"device ids {self.device_ids} on platform {self.platform}"
def m_type_str(self, api_name):
return (f'{self.source_info and self.source_info.eqn_name} inside {api_name}'
if self.m_type == MismatchType.SHARDING_INSIDE_COMPUTATION else self.m_type)
def _str(self, api_name):
return (f"{self._maybe_api_name(api_name)} {self.m_type_str(api_name)} with "
f"{self._dev_ids_plat_str}{self.source_info_str}")
| DeviceAssignmentMismatch |
python | kamyu104__LeetCode-Solutions | Python/maximum-binary-string-after-change.py | {
"start": 29,
"end": 497
} | class ____(object):
def maximumBinaryString(self, binary):
"""
:type binary: str
:rtype: str
"""
result = list(binary)
zeros = ones = 0
for i, c in enumerate(result):
if c == '0':
zeros += 1
elif zeros == 0:
ones += 1
result[i] = '1'
if ones != len(result):
result[zeros+ones-1] = '0'
return "".join(result)
| Solution |
python | getsentry__sentry | tests/sentry/web/frontend/test_auth_organization_login.py | {
"start": 51957,
"end": 57667
} | class ____(AuthProviderTestCase):
def setUp(self) -> None:
self.demo_user = self.create_user()
self.demo_org = self.create_organization(owner=self.demo_user)
self.normal_user = self.create_user()
self.normal_org = self.create_organization(owner=self.normal_user)
def is_logged_in_to_org(self, response, org):
return response.status_code == 200 and response.redirect_chain == [
(reverse("sentry-login"), 302),
(f"/organizations/{org.slug}/issues/", 302),
]
def fetch_org_login_page(self, org):
return self.client.get(
reverse("sentry-auth-organization", args=[org.slug]),
follow=True,
)
def test_auto_login_demo_mode_disabled(self) -> None:
with override_options(
{
"demo-mode.enabled": False,
"demo-mode.users": [self.demo_user.id],
"demo-mode.orgs": [self.demo_org.id],
}
):
resp = self.fetch_org_login_page(self.demo_org)
assert not self.is_logged_in_to_org(resp, self.demo_org)
resp = self.fetch_org_login_page(self.normal_org)
assert not self.is_logged_in_to_org(resp, self.normal_org)
def test_auto_login_demo_mode(self) -> None:
with override_options(
{
"demo-mode.enabled": True,
"demo-mode.users": [self.demo_user.id],
"demo-mode.orgs": [self.demo_org.id],
}
):
resp = self.fetch_org_login_page(self.demo_org)
assert self.is_logged_in_to_org(resp, self.demo_org)
resp = self.fetch_org_login_page(self.normal_org)
assert not self.is_logged_in_to_org(resp, self.normal_org)
def test_auto_login_not_demo_org(self) -> None:
with override_options(
{"demo-mode.enabled": True, "demo-mode.users": [], "demo-mode.orgs": []}
):
resp = self.fetch_org_login_page(self.demo_org)
assert not self.is_logged_in_to_org(resp, self.demo_org)
resp = self.fetch_org_login_page(self.normal_org)
assert not self.is_logged_in_to_org(resp, self.normal_org)
def test_demo_user_joins_existing_sso_organization(self) -> None:
"""
Test that when a demo user is logged in and tries to join an existing SSO organization,
they are logged in as a new user with a proper auth identity linked.
This can happen in production when a user visits the sandbox, gets logged in as a demo user,
and then attempts to join an organization that has SSO configured (e.g., via invite link).
The demo user's session is treated as unauthenticated for the SSO pipeline
but the user property still resolves to the demo user as a fallback.
When the user chooses "newuser", a completely new account is created with SSO identity.
"""
with override_options(
{
"demo-mode.enabled": True,
"demo-mode.users": [self.demo_user.id],
"demo-mode.orgs": [self.demo_org.id],
}
):
sso_org = self.create_organization(name="sso-org", owner=self.normal_user)
# Create an organization with SSO (not the demo org)
auth_provider = AuthProvider.objects.create(
organization_id=sso_org.id, provider="dummy"
)
# Log in as demo user
self.login_as(self.demo_user)
# Initiate SSO flow
path = reverse("sentry-auth-organization", args=[sso_org.slug])
resp = self.client.post(path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
# Complete SSO with a new email
sso_path = reverse("sentry-auth-sso")
new_email = "newuser@example.com"
resp = self.client.post(sso_path, {"email": new_email})
# Even though demo user is logged in, they're treated as unauthenticated in pipeline
# So we get auth-confirm-identity screen, but existing_user still shows the demo user
# as a fallback (because user property returns request.user when email doesn't match)
self.assertTemplateUsed(resp, "sentry/auth-confirm-identity.html")
assert resp.status_code == 200
# Create new user account
resp = self.client.post(sso_path, {"op": "newuser"}, follow=True)
# Should redirect and log in as the new user
assert resp.status_code == 200
# Verify a new user was created (not the demo user)
auth_identity = AuthIdentity.objects.get(auth_provider=auth_provider)
new_user = auth_identity.user
# Verify it's not the demo user
assert new_user.id != self.demo_user.id
assert new_user.email == new_email
logged_in_user = get_user(self.client)
assert logged_in_user.id == new_user.id
# Verify demo user has NO auth identity linked to this provider
assert not AuthIdentity.objects.filter(
auth_provider=auth_provider, user_id=self.demo_user.id
).exists()
# Verify the new user has organization membership with SSO linked
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(organization=sso_org, user_id=new_user.id)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
| OrganizationAuthLoginDemoModeTest |
python | pypa__pip | src/pip/_vendor/rich/screen.py | {
"start": 288,
"end": 1591
} | class ____:
"""A renderable that fills the terminal screen and crops excess.
Args:
renderable (RenderableType): Child renderable.
style (StyleType, optional): Optional background style. Defaults to None.
"""
renderable: "RenderableType"
def __init__(
self,
*renderables: "RenderableType",
style: Optional[StyleType] = None,
application_mode: bool = False,
) -> None:
from pip._vendor.rich.console import Group
self.renderable = Group(*renderables)
self.style = style
self.application_mode = application_mode
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
width, height = options.size
style = console.get_style(self.style) if self.style else None
render_options = options.update(width=width, height=height)
lines = console.render_lines(
self.renderable or "", render_options, style=style, pad=True
)
lines = Segment.set_shape(lines, width, height, style=style)
new_line = Segment("\n\r") if self.application_mode else Segment.line()
for last, line in loop_last(lines):
yield from line
if not last:
yield new_line
| Screen |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_retry_execution.py | {
"start": 3389,
"end": 17994
} | class ____(ExecutingGraphQLContextTestMatrix):
def test_retry_pipeline_execution(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "eventually_successful")
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": retry_config(0),
}
},
)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_did_succeed(logs, "spawn")
assert step_did_fail(logs, "fail")
assert step_did_not_run(logs, "fail_2")
assert step_did_not_run(logs, "fail_3")
assert step_did_not_run(logs, "reset")
assert step_did_not_run(logs, "collect")
retry_one = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": retry_config(1),
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
run_id = retry_one.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_did_not_run(logs, "spawn")
assert step_did_succeed(logs, "fail")
assert step_did_fail(logs, "fail_2")
assert step_did_not_run(logs, "fail_3")
assert step_did_not_run(logs, "reset")
assert step_did_not_run(logs, "collect")
retry_two = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": retry_config(2),
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
run_id = retry_two.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_did_not_run(logs, "spawn")
assert step_did_not_run(logs, "fail")
assert step_did_succeed(logs, "fail_2")
assert step_did_fail(logs, "fail_3")
assert step_did_not_run(logs, "reset")
assert step_did_not_run(logs, "collect")
retry_three = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": retry_config(3),
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
run_id = retry_three.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_did_not_run(logs, "spawn")
assert step_did_not_run(logs, "fail")
assert step_did_not_run(logs, "fail_2")
assert step_did_succeed(logs, "fail_3")
assert step_did_succeed(logs, "reset")
assert step_did_succeed(logs, "collect")
def test_retry_resource_pipeline(self, graphql_context: WorkspaceRequestContext):
context = graphql_context
selector = infer_job_selector(graphql_context, "retry_resource_job")
result = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
}
},
)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(context, run_id)["pipelineRunLogs"][
"messages"
]
assert step_did_succeed(logs, "start")
assert step_did_fail(logs, "will_fail")
retry_one = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
run_id = retry_one.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(context, run_id)["pipelineRunLogs"][
"messages"
]
assert step_did_not_run(logs, "start")
assert step_did_fail(logs, "will_fail")
def test_retry_multi_output(self, graphql_context: WorkspaceRequestContext):
context = graphql_context
result = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": get_retry_multi_execution_params(context, should_fail=True)
},
)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(context, run_id)["pipelineRunLogs"][
"messages"
]
assert step_did_succeed(logs, "multi")
assert step_did_skip(logs, "child_multi_skip")
assert step_did_fail(logs, "can_fail")
assert step_did_not_run(logs, "child_fail")
assert step_did_not_run(logs, "child_skip")
assert step_did_not_run(logs, "grandchild_fail")
retry_one = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": get_retry_multi_execution_params(
context, should_fail=True, retry_id=run_id
)
},
)
run_id = retry_one.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(context, run_id)["pipelineRunLogs"][
"messages"
]
assert step_did_not_run(logs, "multi")
assert step_did_not_run(logs, "child_multi_skip")
assert step_did_fail(logs, "can_fail")
assert step_did_not_run(logs, "child_fail")
assert step_did_not_run(logs, "child_skip")
assert step_did_not_run(logs, "grandchild_fail")
retry_two = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": get_retry_multi_execution_params(
context, should_fail=False, retry_id=run_id
)
},
)
run_id = retry_two.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(context, run_id)["pipelineRunLogs"][
"messages"
]
assert step_did_not_run(logs, "multi")
assert step_did_not_run(logs, "child_multi_skip")
assert step_did_succeed(logs, "can_fail")
assert step_did_succeed(logs, "child_fail")
assert step_did_skip(logs, "child_skip")
assert step_did_succeed(logs, "grandchild_fail")
def test_successful_pipeline_reexecution(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "csv_hello_world")
result_one = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_ops_config(),
}
},
)
run_id = result_one.data["launchPipelineExecution"]["run"]["runId"]
assert result_one.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
result = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)
logs = result["pipelineRunLogs"]["messages"]
assert get_step_output_event(logs, "sum_op")
assert get_step_output_event(logs, "sum_sq_op")
# retry
result_two = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_ops_config(),
"stepKeys": ["sum_sq_op"],
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
query_result = result_two.data["launchPipelineReexecution"]
assert query_result["__typename"] == "LaunchRunSuccess"
new_run_id = query_result["run"]["runId"]
result = get_all_logs_for_finished_run_via_subscription(graphql_context, new_run_id)
logs = result["pipelineRunLogs"]["messages"]
assert isinstance(logs, list)
assert has_event_of_type(logs, "RunStartEvent")
assert has_event_of_type(logs, "RunSuccessEvent")
assert not has_event_of_type(logs, "RunFailureEvent")
assert not get_step_output_event(logs, "sum_op")
assert get_step_output_event(logs, "sum_sq_op")
def test_pipeline_reexecution_info_query(
self, graphql_context: WorkspaceRequestContext, snapshot
):
context = graphql_context
selector = infer_job_selector(graphql_context, "csv_hello_world")
result = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_ops_config(),
}
},
)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
# retry
retry_result = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_ops_config(),
"stepKeys": ["sum_sq_op"],
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
new_run_id = retry_result.data["launchPipelineReexecution"]["run"]["runId"]
info_result_one = execute_dagster_graphql_and_finish_runs(
context, PIPELINE_REEXECUTION_INFO_QUERY, variables={"runId": run_id}
)
query_result_one = info_result_one.data["pipelineRunOrError"]
assert query_result_one["__typename"] == "Run"
assert query_result_one["stepKeysToExecute"] == ["sum_op", "sum_sq_op"] # full execution
info_result_two = execute_dagster_graphql_and_finish_runs(
context, PIPELINE_REEXECUTION_INFO_QUERY, variables={"runId": new_run_id}
)
query_result_two = info_result_two.data["pipelineRunOrError"]
assert query_result_two["__typename"] == "Run"
stepKeysToExecute = query_result_two["stepKeysToExecute"]
assert stepKeysToExecute == ["sum_sq_op"] # selected key
def test_pipeline_reexecution_invalid_step_in_subset(
self, graphql_context: WorkspaceRequestContext
):
selector = infer_job_selector(graphql_context, "csv_hello_world")
result_one = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_ops_config(),
}
},
)
assert result_one.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
run_id = result_one.data["launchPipelineExecution"]["run"]["runId"]
# retry
result_two = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_ops_config(),
"stepKeys": ["nope"],
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
query_result = result_two.data["launchPipelineReexecution"]
assert query_result["__typename"] == "PythonError"
assert query_result["className"] == "DagsterExecutionStepNotFoundError"
assert "Can not build subset plan from unknown step: nope" in query_result["message"]
| TestRetryExecution |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 43258,
"end": 44169
} | class ____(TestCase):
def test_int_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value(12).as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
assert value == expected_digit_tuple
def test_string_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value('12').as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
assert value == expected_digit_tuple
def test_part_precision_string_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value('12.0').as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
assert value == expected_digit_tuple
| TestQuantizedValueForDecimal |
python | allegroai__clearml | clearml/utilities/parallel.py | {
"start": 482,
"end": 3665
} | class ____(object):
__slots__ = ("__queue", "__future_caller", "__future_func")
def __init__(self, a_future_caller: Any, future_func: str) -> None:
self.__queue = Queue()
self.__future_caller = a_future_caller
self.__future_func = future_func
def __nested_caller(self, item: str, args: tuple, kwargs: dict) -> Any:
# wait until object is constructed
getattr(self.__future_caller, "id") # noqa
future_func = getattr(self.__future_caller, self.__future_func)
the_object = future_func()
the_object_func = getattr(the_object, item)
return the_object_func(*args, **kwargs)
def _flush_into_logger(
self,
a_future_object: Optional[Any] = None,
a_future_func: Optional[Any] = None,
) -> None:
self.__close_queue(a_future_object=a_future_object, a_future_func=a_future_func)
def __close_queue(
self,
a_future_object: Optional[Any] = None,
a_future_func: Optional[Any] = None,
) -> None:
# call this function when we Know the object is initialization is completed
if self.__queue is None:
return
_queue = self.__queue
self.__queue = None
while True:
# noinspection PyBroadException
try:
item, args, kwargs = _queue.get(block=False)
if a_future_object:
future_func = getattr(a_future_object, self.__future_func)
the_object = future_func()
the_object_func = getattr(the_object, item)
the_object_func(*args, **kwargs)
elif a_future_func:
the_object_func = getattr(a_future_func, item)
the_object_func(*args, **kwargs)
else:
self.__nested_caller(item, args, kwargs)
except Empty:
break
except Exception:
# import traceback
# stdout_print(''.join(traceback.format_exc()))
pass
def __getattr__(self, item: str) -> Callable:
def _caller(*args: Any, **kwargs: Any) -> Union[bool, Any]:
# if we already completed the background initialization, call functions immediately
# noinspection PyProtectedMember
if not self.__queue or self.__future_caller._FutureTaskCaller__executor is None:
return self.__nested_caller(item, args, kwargs)
# noinspection PyBroadException
try:
# if pool is still active call async
self.__queue.put(
(
item,
deepcopy(args) if args else args,
deepcopy(kwargs) if kwargs else kwargs,
)
)
except Exception:
# assume we wait only if self.__pool was nulled between the if and now, so just call directly
return self.__nested_caller(item, args, kwargs)
# let's hope it is the right one
return True
return _caller
| _DeferredClass |
python | plotly__plotly.py | plotly/io/_base_renderers.py | {
"start": 1982,
"end": 2655
} | class ____(MimetypeRenderer):
"""
Renderer to display figures using the plotly mime type. This renderer is
compatible with VSCode and nteract.
mime type: 'application/vnd.plotly.v1+json'
"""
def __init__(self, config=None):
self.config = dict(config) if config else {}
def to_mimebundle(self, fig_dict):
config = _get_jconfig(self.config)
if config:
fig_dict["config"] = config
json_compatible_fig_dict = json.loads(
to_json(fig_dict, validate=False, remove_uids=False)
)
return {"application/vnd.plotly.v1+json": json_compatible_fig_dict}
# Static Image
| PlotlyRenderer |
python | falconry__falcon | falcon/typing.py | {
"start": 1202,
"end": 1646
} | class ____(Protocol):
"""Async file-like protocol that defines only a read method, and is iterable.
.. versionadded:: 4.0
"""
async def read(self, n: int | None = ..., /) -> bytes: ...
def __aiter__(self) -> AsyncIterator[bytes]: ...
SSEEmitter = AsyncIterator[Optional['SSEvent']]
"""Async generator or iterator over Server-Sent Events
(instances of :class:`falcon.asgi.SSEvent`).
.. versionadded:: 4.0
"""
| AsyncReadableIO |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_kubernetes_engine.py | {
"start": 20707,
"end": 21354
} | class ____:
@pytest.mark.asyncio
@mock.patch(f"{ASYNC_HOOK_STRING}._get_client")
async def test_get_operation(self, mock_get_client, async_gke_hook, mock_async_gke_cluster_client):
mock_get_client.return_value = mock_async_gke_cluster_client
await async_gke_hook.get_operation(
operation_name=OPERATION_NAME,
project_id=TEST_GCP_PROJECT_ID,
)
operation_path = f"projects/{TEST_GCP_PROJECT_ID}/locations/{GKE_ZONE}/operations/{OPERATION_NAME}"
mock_async_gke_cluster_client.get_operation.assert_called_once_with(
name=operation_path,
)
| TestGKEAsyncHook |
python | gevent__gevent | src/greentest/3.10/test_ssl.py | {
"start": 113174,
"end": 122671
} | class ____(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
with warnings_helper.check_warnings():
# ignore Deprecation warnings
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version
):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
with warnings_helper.check_warnings():
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
seclevel_workaround(server_context, client_context)
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
| AsyncoreEchoServer |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/rds.py | {
"start": 38353,
"end": 43848
} | class ____(RdsBaseOperator):
"""
Stops an RDS DB instance / cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsStopDbOperator`
:param db_identifier: The AWS identifier of the DB to stop
:param db_type: Type of the DB - either "instance" or "cluster" (default: "instance")
:param db_snapshot_identifier: The instance identifier of the DB Snapshot to create before
stopping the DB instance. The default value (None) skips snapshot creation. This
parameter is ignored when ``db_type`` is "cluster"
:param wait_for_completion: If True, waits for DB to stop. (default: True)
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check DB instance state
:param waiter_max_attempts: The maximum number of attempts to check DB instance state
:param deferrable: If True, the operator will wait asynchronously for the DB instance to be created.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
template_fields = aws_template_fields("db_identifier", "db_snapshot_identifier", "db_type")
def __init__(
self,
*,
db_identifier: str,
db_type: RdsDbType | str = RdsDbType.INSTANCE,
db_snapshot_identifier: str | None = None,
wait_for_completion: bool = True,
waiter_delay: int = 30,
waiter_max_attempts: int = 40,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.db_identifier = db_identifier
self.db_type = db_type
self.db_snapshot_identifier = db_snapshot_identifier
self.wait_for_completion = wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
def execute(self, context: Context) -> str:
self.db_type = RdsDbType(self.db_type)
stop_db_response: dict[str, Any] = self._stop_db()
if self.deferrable:
self.defer(
trigger=RdsDbStoppedTrigger(
db_identifier=self.db_identifier,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
response=stop_db_response,
db_type=self.db_type,
),
method_name="execute_complete",
)
elif self.wait_for_completion:
waiter = self.hook.get_waiter(f"db_{self.db_type.value}_stopped")
waiter_key = (
"DBInstanceIdentifier" if self.db_type == RdsDbType.INSTANCE else "DBClusterIdentifier"
)
kwargs = {waiter_key: self.db_identifier}
waiter.wait(
WaiterConfig=prune_dict(
{
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
}
),
**kwargs,
)
return json.dumps(stop_db_response, default=str)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Failed to start DB: {validated_event}")
return json.dumps(validated_event["response"], default=str)
def _stop_db(self):
self.log.info("Stopping DB %s '%s'", self.db_type.value, self.db_identifier)
if self.db_type == RdsDbType.INSTANCE:
conn_params = {"DBInstanceIdentifier": self.db_identifier}
# The db snapshot parameter is optional, but the AWS SDK raises an exception
# if passed a null value. Only set snapshot id if value is present.
if self.db_snapshot_identifier:
conn_params["DBSnapshotIdentifier"] = self.db_snapshot_identifier
response = self.hook.conn.stop_db_instance(**conn_params)
else:
if self.db_snapshot_identifier:
self.log.warning(
"'db_snapshot_identifier' does not apply to db clusters. "
"Remove it to silence this warning."
)
response = self.hook.conn.stop_db_cluster(DBClusterIdentifier=self.db_identifier)
return response
__all__ = [
"RdsCreateDbSnapshotOperator",
"RdsCopyDbSnapshotOperator",
"RdsDeleteDbSnapshotOperator",
"RdsCreateEventSubscriptionOperator",
"RdsDeleteEventSubscriptionOperator",
"RdsStartExportTaskOperator",
"RdsCancelExportTaskOperator",
"RdsCreateDbInstanceOperator",
"RdsDeleteDbInstanceOperator",
"RdsStartDbOperator",
"RdsStopDbOperator",
]
| RdsStopDbOperator |
python | keras-team__keras | keras/src/initializers/constant_initializers.py | {
"start": 2476,
"end": 3599
} | class ____(Initializer):
"""Initializer that generates tensors initialized to 1.
Also available via the shortcut function `ones`.
Examples:
>>> # Standalone usage:
>>> initializer = Ones()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = Ones()
>>> layer = Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes
are supported. If not specified, `keras.backend.floatx()`
is used, which default to `float32` unless you configured it
otherwise (via `keras.backend.set_floatx(float_dtype)`).
"""
dtype = standardize_dtype(dtype)
return ops.ones(shape, dtype=dtype)
@keras_export(
[
"keras.initializers.Identity",
"keras.initializers.identity",
"keras.initializers.IdentityInitializer",
]
)
| Ones |
python | ray-project__ray | python/ray/experimental/collective/collective_tensor_transport.py | {
"start": 320,
"end": 7237
} | class ____(TensorTransportManager):
def __init__(self, tensor_transport_backend: Backend):
self._tensor_transport_backend = tensor_transport_backend
@property
def tensor_transport_backend(self) -> Backend:
return self._tensor_transport_backend
@staticmethod
def is_one_sided() -> bool:
return False
@staticmethod
def can_abort_transport() -> bool:
return False
def actor_has_tensor_transport(self, actor: "ray.actor.ActorHandle") -> bool:
from ray.experimental.collective import get_collective_groups
communicators = get_collective_groups(
[actor], backend=self.tensor_transport_backend
)
return len(communicators) > 0
@staticmethod
def extract_tensor_transport_metadata(
obj_id: str,
gpu_object: List["torch.Tensor"],
) -> CollectiveTransportMetadata:
tensor_meta = []
device = None
if gpu_object:
device = gpu_object[0].device
for t in gpu_object:
if t.device.type != device.type:
raise ValueError(
"All tensors in an RDT object must have the same device type."
)
tensor_meta.append((t.shape, t.dtype))
return CollectiveTransportMetadata(
tensor_meta=tensor_meta,
tensor_device=device,
)
@staticmethod
def get_tensor_transport_metadata(
src_actor: "ray.actor.ActorHandle",
obj_id: str,
) -> CollectiveTransportMetadata:
def __ray_get_tensor_transport_metadata__(
self: "ray.actor.ActorHandle",
obj_id: str,
) -> CollectiveTransportMetadata:
from ray._private.worker import global_worker
gpu_object_store = global_worker.gpu_object_manager.gpu_object_store
# NOTE: We do not specify a timeout here because the user task that returns
# it could take arbitrarily long and we don't want to trigger a spurious
# timeout.
gpu_object = gpu_object_store.wait_and_get_object(obj_id)
return CollectiveTensorTransport.extract_tensor_transport_metadata(
obj_id, gpu_object
)
# Submit a Ray actor task to the source actor to get the tensor metadata.
# The metadata is a list of tuples, where each tuple contains the shape and dtype
# of a tensor in the GPU object store. This function returns an ObjectRef that
# points to the tensor metadata.
# NOTE(swang): We put this task on the background thread to avoid tasks
# executing on the main thread blocking this task.
return src_actor.__ray_call__.options(concurrency_group="_ray_system").remote(
__ray_get_tensor_transport_metadata__, obj_id
)
@staticmethod
def get_communicator_metadata(
src_actor: "ray.actor.ActorHandle",
dst_actor: "ray.actor.ActorHandle",
backend: Optional[str] = None,
) -> CollectiveCommunicatorMetadata:
from ray.experimental.collective import get_collective_groups
communicators = get_collective_groups(
[src_actor, dst_actor],
backend=backend,
)
# TODO(kevin85421): Support multiple communicators.
if len(communicators) == 0:
raise ValueError(
f"No communicators found for actors {src_actor} and {dst_actor}. "
"Create a communicator with "
"`ray.experimental.collective.create_collective_group` "
"before calling actor tasks. with non-default tensor_transport."
)
elif len(communicators) > 1:
raise ValueError(
f"There are {len(communicators)} possible communicators that contain actors {src_actor} and {dst_actor}. "
"Currently, RDT objects only support one communicator. Please make sure only "
"one communicator exists."
)
communicator = communicators[0]
src_rank = communicator.get_rank(src_actor)
if src_rank == -1:
raise ValueError(
f"Sender actor {src_actor} not found in communicator. "
"Please make sure the sender and receiver are in the same communicator."
)
dst_rank = communicator.get_rank(dst_actor)
if dst_rank == -1:
raise ValueError(
f"Receiver actor {dst_actor} not found in communicator. "
"Please make sure the sender and receiver are in the same communicator."
)
communicator_metadata = CollectiveCommunicatorMetadata(
communicator_name=communicator.name,
src_rank=src_rank,
dst_rank=dst_rank,
)
return communicator_metadata
@staticmethod
def recv_multiple_tensors(
tensors,
obj_id: str,
tensor_transport_metadata: CollectiveTransportMetadata,
communicator_metadata: CollectiveCommunicatorMetadata,
):
from ray.util.collective import types
from ray.util.collective.collective import recv
assert isinstance(
tensor_transport_metadata, types.CollectiveTransportMetadata
), "metadata must be a CollectiveTransportMetadata object for non-NIXL transport"
assert isinstance(
communicator_metadata, types.CollectiveCommunicatorMetadata
), "metadata must be a CollectiveCommunicatorMetadata object for non-NIXL transport"
for tensor in tensors:
recv(
tensor,
communicator_metadata.src_rank,
communicator_metadata.communicator_name,
)
@staticmethod
def send_multiple_tensors(
tensors: List["torch.Tensor"],
tensor_transport_metadata: CollectiveTransportMetadata,
communicator_metadata: CollectiveCommunicatorMetadata,
):
import ray.util.collective as collective
device = tensors[0].device if tensors else None
for tensor in tensors:
if tensor.device.type != device.type:
raise ValueError(
f"tensor device {tensor.device} does not match device {device}"
)
collective.send(
tensor,
communicator_metadata.dst_rank,
communicator_metadata.communicator_name,
)
@staticmethod
def garbage_collect(
obj_id: str, tensor_transport_meta: CollectiveTransportMetadata
):
pass
@staticmethod
def abort_transport(
obj_id: str,
communicator_metadata: CollectiveCommunicatorMetadata,
):
raise NotImplementedError(
"Collective transport does not support abort_transport for now."
)
| CollectiveTensorTransport |
python | automl__auto-sklearn | examples/80_extending/example_extending_preprocessor.py | {
"start": 1247,
"end": 4807
} | class ____(AutoSklearnPreprocessingAlgorithm):
def __init__(self, solver, tol, shrinkage=None, random_state=None):
self.solver = solver
self.shrinkage = shrinkage
self.tol = tol
self.random_state = random_state
self.preprocessor = None
def fit(self, X, y=None):
if check_none(self.shrinkage):
self.shrinkage = None
else:
self.shrinkage = float(self.shrinkage)
self.tol = float(self.tol)
import sklearn.discriminant_analysis
self.preprocessor = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(
shrinkage=self.shrinkage,
solver=self.solver,
tol=self.tol,
)
self.preprocessor.fit(X, y)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "LDA",
"name": "Linear Discriminant Analysis",
"handles_regression": False,
"handles_classification": True,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, UNSIGNED_DATA, SIGNED_DATA),
"output": (DENSE, UNSIGNED_DATA, SIGNED_DATA),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
solver = CategoricalHyperparameter(
name="solver", choices=["svd", "lsqr", "eigen"], default_value="svd"
)
shrinkage = UniformFloatHyperparameter(
name="shrinkage", lower=0.0, upper=1.0, default_value=0.5
)
tol = UniformFloatHyperparameter(
name="tol", lower=0.0001, upper=1, default_value=0.0001
)
cs.add_hyperparameters([solver, shrinkage, tol])
shrinkage_condition = InCondition(shrinkage, solver, ["lsqr", "eigen"])
cs.add_condition(shrinkage_condition)
return cs
# Add LDA component to auto-sklearn.
autosklearn.pipeline.components.feature_preprocessing.add_preprocessor(LDA)
############################################################################
# Create dataset
# ==============
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
############################################################################
# Configuration space
# ===================
cs = LDA.get_hyperparameter_search_space()
print(cs)
############################################################################
# Fit the model using LDA as preprocessor
# =======================================
clf = autosklearn.classification.AutoSklearnClassifier(
time_left_for_this_task=30,
include={"feature_preprocessor": ["LDA"]},
# Bellow two flags are provided to speed up calculations
# Not recommended for a real implementation
initial_configurations_via_metalearning=0,
smac_scenario_args={"runcount_limit": 5},
)
clf.fit(X_train, y_train)
############################################################################
# Print prediction score and statistics
# =====================================
y_pred = clf.predict(X_test)
print("accuracy: ", sklearn.metrics.accuracy_score(y_pred, y_test))
pprint(clf.show_models(), indent=4)
| LDA |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/mapped_covariant.py | {
"start": 1467,
"end": 2230
} | class ____(Base):
__tablename__ = "child"
name: Mapped[str] = mapped_column(primary_key=True)
parent_name: Mapped[str] = mapped_column(ForeignKey(Parent.name))
parent: Mapped[Parent] = relationship()
assert get_parent_name(Child(parent=Parent(name="foo"))) == "foo"
# Make sure that relationships are covariant as well
_BaseT = TypeVar("_BaseT", bound=Base, covariant=True)
RelationshipType = Union[
InstrumentedAttribute[_BaseT],
InstrumentedAttribute[Sequence[_BaseT]],
InstrumentedAttribute[Union[_BaseT, None]],
]
def operate_on_relationships(
relationships: List[RelationshipType[_BaseT]],
) -> int:
return len(relationships)
assert operate_on_relationships([Parent.children, Child.parent]) == 2
# other test
| Child |
python | pytorch__pytorch | tools/testing/target_determination/heuristics/correlated_with_historical_failures.py | {
"start": 450,
"end": 975
} | class ____(HeuristicInterface):
def __init__(self, **kwargs: dict[str, Any]) -> None:
super().__init__(**kwargs)
def get_prediction_confidence(self, tests: list[str]) -> TestPrioritizations:
test_ratings = get_ratings_for_tests(
ADDITIONAL_CI_FILES_FOLDER / TEST_FILE_RATINGS_FILE
)
test_ratings = {TestRun(k): v for (k, v) in test_ratings.items() if k in tests}
return TestPrioritizations(tests, normalize_ratings(test_ratings, 0.25))
| CorrelatedWithHistoricalFailures |
python | ray-project__ray | python/ray/tests/test_namespace.py | {
"start": 4606,
"end": 5660
} | class ____:
def ping(self):
return "pong from other job"
actor = DetachedActor.options(name="Pinger", lifetime="detached").remote()
ray.get(actor.ping.remote())
"""
run_string_as_driver(driver_template.format(address))
act = ray.get_actor("Pinger")
assert ray.get(act.ping.remote()) == "pong from other job"
def test_detached_warning(shutdown_only):
ray.init()
@ray.remote
class DetachedActor:
def ping(self):
return "pong"
error_pubsub = init_error_pubsub()
actor = DetachedActor.options( # noqa: F841
name="Pinger", lifetime="detached"
).remote()
errors = get_error_message(error_pubsub, 1, None)
error = errors.pop()
assert error["type"] == ray_constants.DETACHED_ACTOR_ANONYMOUS_NAMESPACE_ERROR
def test_namespace_client():
cluster = Cluster()
cluster.add_node(num_cpus=4, ray_client_server_port=8080)
cluster.wait_for_nodes(1)
template = """
import ray
ray.util.connect("{address}", namespace="{namespace}")
@ray.remote
| DetachedActor |
python | django__django | django/db/backends/sqlite3/_functions.py | {
"start": 14418,
"end": 14485
} | class ____(ListAggregate):
finalize = statistics.stdev
| StdDevSamp |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 37357,
"end": 42814
} | class ____:
xl3DArea = -4098 # from enum XlChartType
xl3DAreaStacked = 78 # from enum XlChartType
xl3DAreaStacked100 = 79 # from enum XlChartType
xl3DBarClustered = 60 # from enum XlChartType
xl3DBarStacked = 61 # from enum XlChartType
xl3DBarStacked100 = 62 # from enum XlChartType
xl3DColumn = -4100 # from enum XlChartType
xl3DColumnClustered = 54 # from enum XlChartType
xl3DColumnStacked = 55 # from enum XlChartType
xl3DColumnStacked100 = 56 # from enum XlChartType
xl3DLine = -4101 # from enum XlChartType
xl3DPie = -4102 # from enum XlChartType
xl3DPieExploded = 70 # from enum XlChartType
xlArea = 1 # from enum XlChartType
xlAreaStacked = 76 # from enum XlChartType
xlAreaStacked100 = 77 # from enum XlChartType
xlBarClustered = 57 # from enum XlChartType
xlBarOfPie = 71 # from enum XlChartType
xlBarStacked = 58 # from enum XlChartType
xlBarStacked100 = 59 # from enum XlChartType
xlBubble = 15 # from enum XlChartType
xlBubble3DEffect = 87 # from enum XlChartType
xlColumnClustered = 51 # from enum XlChartType
xlColumnStacked = 52 # from enum XlChartType
xlColumnStacked100 = 53 # from enum XlChartType
xlConeBarClustered = 102 # from enum XlChartType
xlConeBarStacked = 103 # from enum XlChartType
xlConeBarStacked100 = 104 # from enum XlChartType
xlConeCol = 105 # from enum XlChartType
xlConeColClustered = 99 # from enum XlChartType
xlConeColStacked = 100 # from enum XlChartType
xlConeColStacked100 = 101 # from enum XlChartType
xlCylinderBarClustered = 95 # from enum XlChartType
xlCylinderBarStacked = 96 # from enum XlChartType
xlCylinderBarStacked100 = 97 # from enum XlChartType
xlCylinderCol = 98 # from enum XlChartType
xlCylinderColClustered = 92 # from enum XlChartType
xlCylinderColStacked = 93 # from enum XlChartType
xlCylinderColStacked100 = 94 # from enum XlChartType
xlDoughnut = -4120 # from enum XlChartType
xlDoughnutExploded = 80 # from enum XlChartType
xlLine = 4 # from enum XlChartType
xlLineMarkers = 65 # from enum XlChartType
xlLineMarkersStacked = 66 # from enum XlChartType
xlLineMarkersStacked100 = 67 # from enum XlChartType
xlLineStacked = 63 # from enum XlChartType
xlLineStacked100 = 64 # from enum XlChartType
xlPie = 5 # from enum XlChartType
xlPieExploded = 69 # from enum XlChartType
xlPieOfPie = 68 # from enum XlChartType
xlPyramidBarClustered = 109 # from enum XlChartType
xlPyramidBarStacked = 110 # from enum XlChartType
xlPyramidBarStacked100 = 111 # from enum XlChartType
xlPyramidCol = 112 # from enum XlChartType
xlPyramidColClustered = 106 # from enum XlChartType
xlPyramidColStacked = 107 # from enum XlChartType
xlPyramidColStacked100 = 108 # from enum XlChartType
xlRadar = -4151 # from enum XlChartType
xlRadarFilled = 82 # from enum XlChartType
xlRadarMarkers = 81 # from enum XlChartType
xlStockHLC = 88 # from enum XlChartType
xlStockOHLC = 89 # from enum XlChartType
xlStockVHLC = 90 # from enum XlChartType
xlStockVOHLC = 91 # from enum XlChartType
xlSurface = 83 # from enum XlChartType
xlSurfaceTopView = 85 # from enum XlChartType
xlSurfaceTopViewWireframe = 86 # from enum XlChartType
xlSurfaceWireframe = 84 # from enum XlChartType
xlXYScatter = -4169 # from enum XlChartType
xlXYScatterLines = 74 # from enum XlChartType
xlXYScatterLinesNoMarkers = 75 # from enum XlChartType
xlXYScatterSmooth = 72 # from enum XlChartType
xlXYScatterSmoothNoMarkers = 73 # from enum XlChartType
chart_types = (
"3d_area",
"3d_area_stacked",
"3d_area_stacked_100",
"3d_bar_clustered",
"3d_bar_stacked",
"3d_bar_stacked_100",
"3d_column",
"3d_column_clustered",
"3d_column_stacked",
"3d_column_stacked_100",
"3d_line",
"3d_pie",
"3d_pie_exploded",
"area",
"area_stacked",
"area_stacked_100",
"bar_clustered",
"bar_of_pie",
"bar_stacked",
"bar_stacked_100",
"bubble",
"bubble_3d_effect",
"column_clustered",
"column_stacked",
"column_stacked_100",
"combination",
"cone_bar_clustered",
"cone_bar_stacked",
"cone_bar_stacked_100",
"cone_col",
"cone_col_clustered",
"cone_col_stacked",
"cone_col_stacked_100",
"cylinder_bar_clustered",
"cylinder_bar_stacked",
"cylinder_bar_stacked_100",
"cylinder_col",
"cylinder_col_clustered",
"cylinder_col_stacked",
"cylinder_col_stacked_100",
"doughnut",
"doughnut_exploded",
"line",
"line_markers",
"line_markers_stacked",
"line_markers_stacked_100",
"line_stacked",
"line_stacked_100",
"pie",
"pie_exploded",
"pie_of_pie",
"pyramid_bar_clustered",
"pyramid_bar_stacked",
"pyramid_bar_stacked_100",
"pyramid_col",
"pyramid_col_clustered",
"pyramid_col_stacked",
"pyramid_col_stacked_100",
"radar",
"radar_filled",
"radar_markers",
"stock_hlc",
"stock_ohlc",
"stock_vhlc",
"stock_vohlc",
"surface",
"surface_top_view",
"surface_top_view_wireframe",
"surface_wireframe",
"xy_scatter",
"xy_scatter_lines",
"xy_scatter_lines_no_markers",
"xy_scatter_smooth",
"xy_scatter_smooth_no_markers",
)
| ChartType |
python | google__python-fire | fire/test_components_py3.py | {
"start": 1483,
"end": 1977
} | class ____:
"""Class with functions that have default arguments and types."""
def double(self, count: float) -> float:
"""Returns the input multiplied by 2.
Args:
count: Input number that you want to double.
Returns:
A number that is the double of count.
"""
return 2 * count
def long_type(
self,
long_obj: (Tuple[Tuple[Tuple[Tuple[Tuple[Tuple[Tuple[
Tuple[Tuple[Tuple[Tuple[Tuple[int]]]]]]]]]]]])
):
return long_obj
| WithTypes |
python | mlflow__mlflow | mlflow/models/resources.py | {
"start": 7805,
"end": 8666
} | class ____(DatabricksResource):
"""
Defines a Databricks Unity Catalog (UC) Table, which establishes table dependencies
for Model Serving. This table will be referenced in Agent Model Serving endpoints,
where an agent queries a SQL table via either Genie or UC Functions.
Args:
table_name (str): The name of the table used by the model
on_behalf_of_user (Optional[bool]): If True, the resource is accessed with
with the permission of the invoker of the model in the serving endpoint. If set to
None or False, the resource is accessed with the permissions of the creator
"""
@property
def type(self) -> ResourceType:
return ResourceType.APP
def __init__(self, app_name: str, on_behalf_of_user: bool | None = None):
super().__init__(app_name, on_behalf_of_user)
| DatabricksApp |
python | google__jax | jax/_src/scipy/spatial/transform.py | {
"start": 880,
"end": 7462
} | class ____(typing.NamedTuple):
"""Rotation in 3 dimensions.
JAX implementation of :class:`scipy.spatial.transform.Rotation`.
Examples:
Construct an object describing a 90 degree rotation about the z-axis:
>>> from jax.scipy.spatial.transform import Rotation
>>> r = Rotation.from_euler('z', 90, degrees=True)
Convert to a rotation vector:
>>> r.as_rotvec()
Array([0. , 0. , 1.5707964], dtype=float32)
Convert to rotation matrix:
>>> r.as_matrix()
Array([[ 0. , -0.99999994, 0. ],
[ 0.99999994, 0. , 0. ],
[ 0. , 0. , 0.99999994]], dtype=float32)
Compose with another rotation:
>>> r2 = Rotation.from_euler('x', 90, degrees=True)
>>> r3 = r * r2
>>> r3.as_matrix()
Array([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]], dtype=float32)
See the scipy :class:`~scipy.spatial.transform.Rotation` documentation for
further examples of manipulating Rotation objects.
"""
quat: Array
@classmethod
def concatenate(cls, rotations: typing.Sequence):
"""Concatenate a sequence of `Rotation` objects."""
return cls(jnp.concatenate([rotation.quat for rotation in rotations]))
@classmethod
def from_euler(cls, seq: str, angles: Array, degrees: bool = False):
"""Initialize from Euler angles."""
num_axes = len(seq)
if num_axes < 1 or num_axes > 3:
raise ValueError("Expected axis specification to be a non-empty "
"string of upto 3 characters, got {}".format(seq))
intrinsic = (re.match(r'^[XYZ]{1,3}$', seq) is not None)
extrinsic = (re.match(r'^[xyz]{1,3}$', seq) is not None)
if not (intrinsic or extrinsic):
raise ValueError("Expected axes from `seq` to be from ['x', 'y', "
"'z'] or ['X', 'Y', 'Z'], got {}".format(seq))
if any(seq[i] == seq[i+1] for i in range(num_axes - 1)):
raise ValueError("Expected consecutive axes to be different, "
"got {}".format(seq))
angles = jnp.atleast_1d(angles)
axes = jnp.array([_elementary_basis_index(x) for x in seq.lower()])
return cls(_elementary_quat_compose(angles, axes, intrinsic, degrees))
@classmethod
def from_matrix(cls, matrix: Array):
"""Initialize from rotation matrix."""
return cls(_from_matrix(matrix))
@classmethod
def from_mrp(cls, mrp: Array):
"""Initialize from Modified Rodrigues Parameters (MRPs)."""
return cls(_from_mrp(mrp))
@classmethod
def from_quat(cls, quat: Array):
"""Initialize from quaternions."""
return cls(_normalize_quaternion(quat))
@classmethod
def from_rotvec(cls, rotvec: Array, degrees: bool = False):
"""Initialize from rotation vectors."""
return cls(_from_rotvec(rotvec, degrees))
@classmethod
def identity(cls, num: int | None = None, dtype=float):
"""Get identity rotation(s)."""
assert num is None
quat = jnp.array([0., 0., 0., 1.], dtype=dtype)
return cls(quat)
@classmethod
def random(cls, random_key: Array, num: int | None = None):
"""Generate uniformly distributed rotations."""
# Need to implement scipy.stats.special_ortho_group for this to work...
raise NotImplementedError()
def __getitem__(self, indexer):
"""Extract rotation(s) at given index(es) from object."""
if self.single:
raise TypeError("Single rotation is not subscriptable.")
return Rotation(self.quat[indexer])
def __len__(self):
"""Number of rotations contained in this object."""
if self.single:
raise TypeError('Single rotation has no len().')
else:
return self.quat.shape[0]
def __mul__(self, other) -> Rotation:
"""Compose this rotation with the other."""
return Rotation.from_quat(_compose_quat(self.quat, other.quat))
def apply(self, vectors: Array, inverse: bool = False) -> Array:
"""Apply this rotation to one or more vectors."""
return _apply(self.as_matrix(), vectors, inverse)
def as_euler(self, seq: str, degrees: bool = False):
"""Represent as Euler angles."""
if len(seq) != 3:
raise ValueError(f"Expected 3 axes, got {seq}.")
intrinsic = (re.match(r'^[XYZ]{1,3}$', seq) is not None)
extrinsic = (re.match(r'^[xyz]{1,3}$', seq) is not None)
if not (intrinsic or extrinsic):
raise ValueError("Expected axes from `seq` to be from "
"['x', 'y', 'z'] or ['X', 'Y', 'Z'], "
"got {}".format(seq))
if any(seq[i] == seq[i+1] for i in range(2)):
raise ValueError("Expected consecutive axes to be different, "
"got {}".format(seq))
axes = jnp.array([_elementary_basis_index(x) for x in seq.lower()])
with config.numpy_rank_promotion('allow'):
return _compute_euler_from_quat(self.quat, axes, extrinsic, degrees)
def as_matrix(self) -> Array:
"""Represent as rotation matrix."""
return _as_matrix(self.quat)
def as_mrp(self) -> Array:
"""Represent as Modified Rodrigues Parameters (MRPs)."""
return _as_mrp(self.quat)
def as_rotvec(self, degrees: bool = False) -> Array:
"""Represent as rotation vectors."""
return _as_rotvec(self.quat, degrees)
def as_quat(self, canonical: bool=False, scalar_first: bool=False) -> Array:
"""Represent as quaternions."""
quat = _make_canonical(self.quat) if canonical else self.quat
if scalar_first:
return jnp.roll(quat, shift=1, axis=-1)
return quat
def inv(self):
"""Invert this rotation."""
return Rotation(_inv(self.quat))
def magnitude(self) -> Array:
"""Get the magnitude(s) of the rotation(s)."""
return _magnitude(self.quat)
def mean(self, weights: Array | None = None):
"""Get the mean of the rotations."""
w = jnp.ones(self.quat.shape[0], dtype=self.quat.dtype) if weights is None else jnp.asarray(weights, dtype=self.quat.dtype)
if w.ndim != 1:
raise ValueError("Expected `weights` to be 1 dimensional, got "
"shape {}.".format(w.shape))
if w.shape[0] != len(self):
raise ValueError("Expected `weights` to have number of values "
"equal to number of rotations, got "
"{} values and {} rotations.".format(w.shape[0], len(self)))
K = jnp.dot(w[np.newaxis, :] * self.quat.T, self.quat)
_, v = jnp_linalg.eigh(K)
return Rotation(v[:, -1])
@property
def single(self) -> bool:
"""Whether this instance represents a single rotation."""
return self.quat.ndim == 1
| Rotation |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-monday/unit_tests/integrations/monday_requests/base_requests_builder.py | {
"start": 233,
"end": 839
} | class ____(abc.ABC):
@property
@abc.abstractmethod
def url(self) -> str:
"""A url"""
@property
@abc.abstractmethod
def query_params(self) -> Dict[str, Any]:
"""Query params"""
@property
@abc.abstractmethod
def headers(self) -> Dict[str, Any]:
"""Headers"""
@property
@abc.abstractmethod
def request_body(self) -> Optional[str]:
"""A request body"""
def build(self) -> HttpRequest:
return HttpRequest(url=self.url, query_params=self.query_params, headers=self.headers, body=self.request_body)
| MondayRequestBuilder |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/conflicting_dependent/package.py | {
"start": 217,
"end": 643
} | class ____(Package):
"""By itself this package does not have conflicts, but it is used to
ensure that if a user tries to build with an installed instance
of dependency-install@2 that there is a failure."""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("dependency-install@:1.0")
| ConflictingDependent |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 34929,
"end": 35125
} | class ____(PrefectBaseModel):
"""Filter by `Variable.id`."""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of variable ids to include"
)
| VariableFilterId |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 4691,
"end": 5027
} | class ____(Generic[_TModel, _TType]):
@classmethod
def from_orm(cls, model: _TModel) -> _TType:
raise NotImplementedError
@classmethod
def from_orm_optional(cls, model: Optional[_TModel]) -> Optional[_TType]:
if model is None:
return None
return cls.from_orm(model)
@dataclass
| OrmMixin |
python | sympy__sympy | sympy/matrices/common.py | {
"start": 73319,
"end": 87792
} | class ____(MatrixRequired):
"""Provides basic matrix arithmetic operations.
Should not be instantiated directly."""
_op_priority = 10.01
def _eval_Abs(self):
return self._new(self.rows, self.cols, lambda i, j: Abs(self[i, j]))
def _eval_add(self, other):
return self._new(self.rows, self.cols,
lambda i, j: self[i, j] + other[i, j])
def _eval_matrix_mul(self, other):
def entry(i, j):
vec = [self[i,k]*other[k,j] for k in range(self.cols)]
try:
return Add(*vec)
except (TypeError, SympifyError):
# Some matrices don't work with `sum` or `Add`
# They don't work with `sum` because `sum` tries to add `0`
# Fall back to a safe way to multiply if the `Add` fails.
return reduce(lambda a, b: a + b, vec)
return self._new(self.rows, other.cols, entry)
def _eval_matrix_mul_elementwise(self, other):
return self._new(self.rows, self.cols, lambda i, j: self[i,j]*other[i,j])
def _eval_matrix_rmul(self, other):
def entry(i, j):
return sum(other[i,k]*self[k,j] for k in range(other.cols))
return self._new(other.rows, self.cols, entry)
def _eval_pow_by_recursion(self, num):
if num == 1:
return self
if num % 2 == 1:
a, b = self, self._eval_pow_by_recursion(num - 1)
else:
a = b = self._eval_pow_by_recursion(num // 2)
return a.multiply(b)
def _eval_pow_by_cayley(self, exp):
from sympy.discrete.recurrences import linrec_coeffs
row = self.shape[0]
p = self.charpoly()
coeffs = (-p).all_coeffs()[1:]
coeffs = linrec_coeffs(coeffs, exp)
new_mat = self.eye(row)
ans = self.zeros(row)
for i in range(row):
ans += coeffs[i]*new_mat
new_mat *= self
return ans
def _eval_pow_by_recursion_dotprodsimp(self, num, prevsimp=None):
if prevsimp is None:
prevsimp = [True]*len(self)
if num == 1:
return self
if num % 2 == 1:
a, b = self, self._eval_pow_by_recursion_dotprodsimp(num - 1,
prevsimp=prevsimp)
else:
a = b = self._eval_pow_by_recursion_dotprodsimp(num // 2,
prevsimp=prevsimp)
m = a.multiply(b, dotprodsimp=False)
lenm = len(m)
elems = [None]*lenm
for i in range(lenm):
if prevsimp[i]:
elems[i], prevsimp[i] = _dotprodsimp(m[i], withsimp=True)
else:
elems[i] = m[i]
return m._new(m.rows, m.cols, elems)
def _eval_scalar_mul(self, other):
return self._new(self.rows, self.cols, lambda i, j: self[i,j]*other)
def _eval_scalar_rmul(self, other):
return self._new(self.rows, self.cols, lambda i, j: other*self[i,j])
def _eval_Mod(self, other):
return self._new(self.rows, self.cols, lambda i, j: Mod(self[i, j], other))
# Python arithmetic functions
def __abs__(self):
"""Returns a new matrix with entry-wise absolute values."""
return self._eval_Abs()
@call_highest_priority('__radd__')
def __add__(self, other):
"""Return self + other, raising ShapeError if shapes do not match."""
if isinstance(other, NDimArray): # Matrix and array addition is currently not implemented
return NotImplemented
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check.
if hasattr(other, 'shape'):
if self.shape != other.shape:
raise ShapeError("Matrix size mismatch: %s + %s" % (
self.shape, other.shape))
# honest SymPy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
# call the highest-priority class's _eval_add
a, b = self, other
if a.__class__ != classof(a, b):
b, a = a, b
return a._eval_add(b)
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_add(self, other)
raise TypeError('cannot add %s and %s' % (type(self), type(other)))
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
return self * (self.one / other)
@call_highest_priority('__rmatmul__')
def __matmul__(self, other):
other = _matrixify(other)
if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):
return NotImplemented
return self.__mul__(other)
def __mod__(self, other):
return self.applyfunc(lambda x: x % other)
@call_highest_priority('__rmul__')
def __mul__(self, other):
"""Return self*other where other is either a scalar or a matrix
of compatible dimensions.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]])
True
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
Matrix([
[30, 36, 42],
[66, 81, 96]])
>>> B*A
Traceback (most recent call last):
...
ShapeError: Matrices size mismatch.
>>>
See Also
========
matrix_multiply_elementwise
"""
return self.multiply(other)
def multiply(self, other, dotprodsimp=None):
"""Same as __mul__() but with optional simplification.
Parameters
==========
dotprodsimp : bool, optional
Specifies whether intermediate term algebraic simplification is used
during matrix multiplications to control expression blowup and thus
speed up calculation. Default is off.
"""
isimpbool = _get_intermediate_simp_bool(False, dotprodsimp)
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check. Double check other is not explicitly not a Matrix.
if (hasattr(other, 'shape') and len(other.shape) == 2 and
(getattr(other, 'is_Matrix', True) or
getattr(other, 'is_MatrixLike', True))):
if self.shape[1] != other.shape[0]:
raise ShapeError("Matrix size mismatch: %s * %s." % (
self.shape, other.shape))
# honest SymPy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
m = self._eval_matrix_mul(other)
if isimpbool:
return m._new(m.rows, m.cols, [_dotprodsimp(e) for e in m])
return m
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_matrix_mul(self, other)
# if 'other' is not iterable then scalar multiplication.
if not isinstance(other, Iterable):
try:
return self._eval_scalar_mul(other)
except TypeError:
pass
return NotImplemented
def multiply_elementwise(self, other):
"""Return the Hadamard product (elementwise product) of A and B
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> A.multiply_elementwise(B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
sympy.matrices.matrixbase.MatrixBase.cross
sympy.matrices.matrixbase.MatrixBase.dot
multiply
"""
if self.shape != other.shape:
raise ShapeError("Matrix shapes must agree {} != {}".format(self.shape, other.shape))
return self._eval_matrix_mul_elementwise(other)
def __neg__(self):
return self._eval_scalar_mul(-1)
@call_highest_priority('__rpow__')
def __pow__(self, exp):
"""Return self**exp a scalar or symbol."""
return self.pow(exp)
def pow(self, exp, method=None):
r"""Return self**exp a scalar or symbol.
Parameters
==========
method : multiply, mulsimp, jordan, cayley
If multiply then it returns exponentiation using recursion.
If jordan then Jordan form exponentiation will be used.
If cayley then the exponentiation is done using Cayley-Hamilton
theorem.
If mulsimp then the exponentiation is done using recursion
with dotprodsimp. This specifies whether intermediate term
algebraic simplification is used during naive matrix power to
control expression blowup and thus speed up calculation.
If None, then it heuristically decides which method to use.
"""
if method is not None and method not in ['multiply', 'mulsimp', 'jordan', 'cayley']:
raise TypeError('No such method')
if self.rows != self.cols:
raise NonSquareMatrixError()
a = self
jordan_pow = getattr(a, '_matrix_pow_by_jordan_blocks', None)
exp = sympify(exp)
if exp.is_zero:
return a._new(a.rows, a.cols, lambda i, j: int(i == j))
if exp == 1:
return a
diagonal = getattr(a, 'is_diagonal', None)
if diagonal is not None and diagonal():
return a._new(a.rows, a.cols, lambda i, j: a[i,j]**exp if i == j else 0)
if exp.is_Number and exp % 1 == 0:
if a.rows == 1:
return a._new([[a[0]**exp]])
if exp < 0:
exp = -exp
a = a.inv()
# When certain conditions are met,
# Jordan block algorithm is faster than
# computation by recursion.
if method == 'jordan':
try:
return jordan_pow(exp)
except MatrixError:
if method == 'jordan':
raise
elif method == 'cayley':
if not exp.is_Number or exp % 1 != 0:
raise ValueError("cayley method is only valid for integer powers")
return a._eval_pow_by_cayley(exp)
elif method == "mulsimp":
if not exp.is_Number or exp % 1 != 0:
raise ValueError("mulsimp method is only valid for integer powers")
return a._eval_pow_by_recursion_dotprodsimp(exp)
elif method == "multiply":
if not exp.is_Number or exp % 1 != 0:
raise ValueError("multiply method is only valid for integer powers")
return a._eval_pow_by_recursion(exp)
elif method is None and exp.is_Number and exp % 1 == 0:
if exp.is_Float:
exp = Integer(exp)
# Decide heuristically which method to apply
if a.rows == 2 and exp > 100000:
return jordan_pow(exp)
elif _get_intermediate_simp_bool(True, None):
return a._eval_pow_by_recursion_dotprodsimp(exp)
elif exp > 10000:
return a._eval_pow_by_cayley(exp)
else:
return a._eval_pow_by_recursion(exp)
if jordan_pow:
try:
return jordan_pow(exp)
except NonInvertibleMatrixError:
# Raised by jordan_pow on zero determinant matrix unless exp is
# definitely known to be a non-negative integer.
# Here we raise if n is definitely not a non-negative integer
# but otherwise we can leave this as an unevaluated MatPow.
if exp.is_integer is False or exp.is_nonnegative is False:
raise
from sympy.matrices.expressions import MatPow
return MatPow(a, exp)
@call_highest_priority('__add__')
def __radd__(self, other):
return self + other
@call_highest_priority('__matmul__')
def __rmatmul__(self, other):
other = _matrixify(other)
if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):
return NotImplemented
return self.__rmul__(other)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return self.rmultiply(other)
def rmultiply(self, other, dotprodsimp=None):
"""Same as __rmul__() but with optional simplification.
Parameters
==========
dotprodsimp : bool, optional
Specifies whether intermediate term algebraic simplification is used
during matrix multiplications to control expression blowup and thus
speed up calculation. Default is off.
"""
isimpbool = _get_intermediate_simp_bool(False, dotprodsimp)
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check. Double check other is not explicitly not a Matrix.
if (hasattr(other, 'shape') and len(other.shape) == 2 and
(getattr(other, 'is_Matrix', True) or
getattr(other, 'is_MatrixLike', True))):
if self.shape[0] != other.shape[1]:
raise ShapeError("Matrix size mismatch.")
# honest SymPy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
m = self._eval_matrix_rmul(other)
if isimpbool:
return m._new(m.rows, m.cols, [_dotprodsimp(e) for e in m])
return m
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_matrix_rmul(self, other)
# if 'other' is not iterable then scalar multiplication.
if not isinstance(other, Iterable):
try:
return self._eval_scalar_rmul(other)
except TypeError:
pass
return NotImplemented
@call_highest_priority('__sub__')
def __rsub__(self, a):
return (-self) + a
@call_highest_priority('__rsub__')
def __sub__(self, a):
return self + (-a)
| MatrixArithmetic |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/annotation.py | {
"start": 4504,
"end": 6911
} | class ____(SupportsWrappingAnnotations):
# SupportsCloneAnnotations extends from SupportsWrappingAnnotations
# to support the structure of having the base ClauseElement
# be a subclass of SupportsWrappingAnnotations. Any ClauseElement
# subclass that wants to extend from SupportsCloneAnnotations
# will inherently also be subclassing SupportsWrappingAnnotations, so
# make that specific here.
if not typing.TYPE_CHECKING:
__slots__ = ()
_clone_annotations_traverse_internals: _TraverseInternalsType = [
("_annotations", InternalTraversal.dp_annotations_key)
]
def _annotate(self, values: _AnnotationDict) -> Self:
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
new = self._clone()
new._annotations = new._annotations.union(values)
new.__dict__.pop("_annotations_cache_key", None)
new.__dict__.pop("_generate_cache_key", None)
return new
def _with_annotations(self, values: _AnnotationDict) -> Self:
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
new = self._clone()
new._annotations = util.immutabledict(values)
new.__dict__.pop("_annotations_cache_key", None)
new.__dict__.pop("_generate_cache_key", None)
return new
@overload
def _deannotate(
self,
values: Literal[None] = ...,
clone: bool = ...,
) -> Self: ...
@overload
def _deannotate(
self,
values: Sequence[str] = ...,
clone: bool = ...,
) -> SupportsAnnotations: ...
def _deannotate(
self,
values: Optional[Sequence[str]] = None,
clone: bool = False,
) -> SupportsAnnotations:
"""return a copy of this :class:`_expression.ClauseElement`
with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone or self._annotations:
# clone is used when we are also copying
# the expression for a deep deannotation
new = self._clone()
new._annotations = util.immutabledict()
new.__dict__.pop("_annotations_cache_key", None)
return new
else:
return self
| SupportsCloneAnnotations |
python | Netflix__metaflow | metaflow/plugins/argo/argo_workflows.py | {
"start": 2327,
"end": 2714
} | class ____(MetaflowException):
headline = "Argo Workflows scheduling error"
# List of future enhancements -
# 1. Configure Argo metrics.
# 2. Support resuming failed workflows within Argo Workflows.
# 3. Add Metaflow tags to labels/annotations.
# 4. Support R lang.
# 5. Ping @savin at slack.outerbounds.co for any feature request
| ArgoWorkflowsSchedulingException |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 36774,
"end": 36869
} | class ____(BaseModel):
hits: List["FacetValueHit"] = Field(..., description="")
| FacetResponse |
python | django__django | django/contrib/gis/measure.py | {
"start": 8434,
"end": 11726
} | class ____(MeasureBase):
STANDARD_UNIT = "m"
UNITS = {
"chain": 20.1168,
"chain_benoit": 20.116782,
"chain_sears": 20.1167645,
"british_chain_benoit": 20.1167824944,
"british_chain_sears": 20.1167651216,
"british_chain_sears_truncated": 20.116756,
"cm": 0.01,
"british_ft": 0.304799471539,
"british_yd": 0.914398414616,
"clarke_ft": 0.3047972654,
"clarke_link": 0.201166195164,
"fathom": 1.8288,
"ft": 0.3048,
"furlong": 201.168,
"german_m": 1.0000135965,
"gold_coast_ft": 0.304799710181508,
"indian_yd": 0.914398530744,
"inch": 0.0254,
"km": 1000.0,
"link": 0.201168,
"link_benoit": 0.20116782,
"link_sears": 0.20116765,
"m": 1.0,
"mi": 1609.344,
"mm": 0.001,
"nm": 1852.0,
"nm_uk": 1853.184,
"rod": 5.0292,
"sears_yd": 0.91439841,
"survey_ft": 0.304800609601,
"um": 0.000001,
"yd": 0.9144,
}
# Unit aliases for `UNIT` terms encountered in Spatial Reference WKT.
ALIAS = {
"centimeter": "cm",
"foot": "ft",
"inches": "inch",
"kilometer": "km",
"kilometre": "km",
"meter": "m",
"metre": "m",
"micrometer": "um",
"micrometre": "um",
"millimeter": "mm",
"millimetre": "mm",
"mile": "mi",
"yard": "yd",
"British chain (Benoit 1895 B)": "british_chain_benoit",
"British chain (Sears 1922)": "british_chain_sears",
"British chain (Sears 1922 truncated)": "british_chain_sears_truncated",
"British foot (Sears 1922)": "british_ft",
"British foot": "british_ft",
"British yard (Sears 1922)": "british_yd",
"British yard": "british_yd",
"Clarke's Foot": "clarke_ft",
"Clarke's link": "clarke_link",
"Chain (Benoit)": "chain_benoit",
"Chain (Sears)": "chain_sears",
"Foot (International)": "ft",
"Furrow Long": "furlong",
"German legal metre": "german_m",
"Gold Coast foot": "gold_coast_ft",
"Indian yard": "indian_yd",
"Link (Benoit)": "link_benoit",
"Link (Sears)": "link_sears",
"Nautical Mile": "nm",
"Nautical Mile (UK)": "nm_uk",
"US survey foot": "survey_ft",
"U.S. Foot": "survey_ft",
"Yard (Indian)": "indian_yd",
"Yard (Sears)": "sears_yd",
}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __mul__(self, other):
if isinstance(other, self.__class__):
return Area(
default_unit=AREA_PREFIX + self._default_unit,
**{AREA_PREFIX + self.STANDARD_UNIT: (self.standard * other.standard)},
)
elif isinstance(other, NUMERIC_TYPES):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)},
)
else:
raise TypeError(
"%(distance)s must be multiplied with number or %(distance)s"
% {
"distance": pretty_name(self.__class__),
}
)
| Distance |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 18020,
"end": 18292
} | class ____(PrefectBaseModel):
"""Filter by `Log.name`."""
any_: Optional[List[str]] = Field(
default=None,
description="A list of log names to include",
examples=[["prefect.logger.flow_runs", "prefect.logger.task_runs"]],
)
| LogFilterName |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_detector_index.py | {
"start": 46023,
"end": 55958
} | class ____(OrganizationDetectorIndexBaseTest):
method = "PUT"
def setUp(self) -> None:
super().setUp()
self.detector = self.create_detector(
project=self.project, name="Test Detector", type=MetricIssue.slug, enabled=True
)
self.detector_two = self.create_detector(
project=self.project, name="Another Detector", type=MetricIssue.slug, enabled=True
)
self.detector_three = self.create_detector(
project=self.project, name="Third Detector", type=MetricIssue.slug, enabled=True
)
self.error_detector = self.create_detector(
project=self.project,
name="Error Detector",
type=ErrorGroupType.slug,
enabled=True,
created_by_id=None,
)
self.user_detector = self.create_detector(
project=self.project,
name="User Created Detector",
type=MetricIssue.slug,
enabled=True,
created_by_id=self.user.id,
)
self.member_user = self.create_user()
self.create_member(
team_roles=[(self.team, "contributor")],
user=self.member_user,
role="member",
organization=self.organization,
)
self.team_admin_user = self.create_user()
self.create_member(
team_roles=[(self.team, "admin")],
user=self.team_admin_user,
role="member",
organization=self.organization,
)
self.org_manager_user = self.create_user()
self.create_member(
user=self.org_manager_user,
role="manager",
organization=self.organization,
)
def test_update_detectors_by_ids_success(self) -> None:
response = self.get_success_response(
self.organization.slug,
qs_params=[("id", str(self.detector.id)), ("id", str(self.detector_two.id))],
enabled=False,
status_code=200,
)
# Verify detectors were updated
self.detector.refresh_from_db()
self.detector_two.refresh_from_db()
assert self.detector.enabled is False
assert self.detector_two.enabled is False
# Verify third detector was not affected
self.detector_three.refresh_from_db()
assert self.detector_three.enabled is True
# Verify response data
assert len(response.data) == 2
detector_ids = {d["id"] for d in response.data}
assert detector_ids == {str(self.detector.id), str(self.detector_two.id)}
assert all(d["enabled"] is False for d in response.data)
def test_update_detectors_by_query_success(self) -> None:
response = self.get_success_response(
self.organization.slug,
qs_params={"query": "test", "project": self.project.id},
enabled=False,
status_code=200,
)
# Verify detector matching query was updated
self.detector.refresh_from_db()
assert self.detector.enabled is False
# Verify other detectors were not affected
self.detector_two.refresh_from_db()
self.detector_three.refresh_from_db()
assert self.detector_two.enabled is True
assert self.detector_three.enabled is True
# Verify response
assert len(response.data) == 1
assert response.data[0]["id"] == str(self.detector.id)
assert response.data[0]["enabled"] is False
def test_update_detectors_enable_success(self) -> None:
self.detector.update(enabled=False)
self.detector_two.update(enabled=False)
self.detector_three.update(enabled=False)
response = self.get_success_response(
self.organization.slug,
qs_params={"id": str(self.detector_three.id)},
enabled=True,
status_code=200,
)
# Verify detector was enabled
self.detector_three.refresh_from_db()
assert self.detector_three.enabled is True
# Verify response
assert len(response.data) == 1
assert response.data[0]["id"] == str(self.detector_three.id)
assert response.data[0]["enabled"] is True
def test_update_detectors_no_parameters_error(self) -> None:
response = self.get_error_response(
self.organization.slug,
enabled=False,
status_code=400,
)
assert "At least one of 'id', 'query', 'project', or 'projectSlug' must be provided" in str(
response.data["detail"]
)
def test_update_detectors_missing_enabled_field(self) -> None:
response = self.get_error_response(
self.organization.slug,
qs_params={"id": str(self.detector.id)},
status_code=400,
)
assert "This field is required." in str(response.data["enabled"])
def test_update_detectors_invalid_id_format(self) -> None:
response = self.get_error_response(
self.organization.slug,
qs_params={"id": "not-a-number"},
enabled=False,
status_code=400,
)
assert "Invalid ID format" in str(response.data["id"])
def test_update_detectors_no_matching_detectors(self) -> None:
response = self.get_error_response(
self.organization.slug,
qs_params={"id": "999999"},
enabled=False,
status_code=400,
)
assert (
response.data["detail"]
== "Some detectors were not found or you do not have permission to update them."
)
def test_update_detectors_permission_denied_for_member_without_alerts_write(self) -> None:
self.organization.flags.allow_joinleave = False
self.organization.update_option("sentry:alerts_member_write", False)
self.organization.save()
self.login_as(user=self.member_user)
self.get_error_response(
self.organization.slug,
qs_params={"id": str(self.detector.id)},
enabled=False,
status_code=403,
)
# Verify detector was not modified
self.detector.refresh_from_db()
assert self.detector.enabled is True
def test_update_detectors_permission_allowed_for_team_admin(self) -> None:
self.organization.update_option("sentry:alerts_member_write", False)
self.login_as(user=self.team_admin_user)
self.get_success_response(
self.organization.slug,
qs_params={"id": str(self.detector.id)},
enabled=False,
status_code=200,
)
# Verify detector was updated
self.detector.refresh_from_db()
assert self.detector.enabled is False
def test_update_detectors_member_permission_allowed_for_user_created_detector(self) -> None:
self.login_as(user=self.member_user)
self.get_success_response(
self.organization.slug,
qs_params={"id": str(self.user_detector.id)},
enabled=False,
status_code=200,
)
# Verify detector was updated
self.user_detector.refresh_from_db()
assert self.user_detector.enabled is False
def test_update_detectors_member_permission_denied_for_non_user_created_detector(self) -> None:
self.login_as(user=self.member_user)
# Try to update a detector not created by a user
self.get_error_response(
self.organization.slug,
qs_params={"id": str(self.error_detector.id)},
enabled=False,
status_code=403,
)
# Verify detector was not modified
self.error_detector.refresh_from_db()
assert self.error_detector.enabled is True
def test_update_detectors_org_manager_permission(self) -> None:
"""
Test that an organization manager can update any type of detector, including error detectors.
"""
self.login_as(user=self.org_manager_user)
self.get_success_response(
self.organization.slug,
qs_params=[("id", str(self.detector.id)), ("id", str(self.error_detector.id))],
enabled=False,
status_code=200,
)
self.detector.refresh_from_db()
self.error_detector.refresh_from_db()
assert self.detector.enabled is False
assert self.error_detector.enabled is False
def test_update_owner_query_by_project(self) -> None:
new_project = self.create_project(organization=self.organization)
detector = self.create_detector(
project=new_project, name="Test Detector", type=MetricIssue.slug, enabled=True
)
owner = self.create_user()
self.create_member(
user=owner,
role="owner",
organization=self.organization,
)
self.login_as(user=owner)
self.get_success_response(
self.organization.slug,
qs_params={"project": new_project.id},
enabled=False,
status_code=200,
)
detector.refresh_from_db()
assert detector.enabled is False
def test_update_detectors_mixed_permissions(self) -> None:
self.login_as(user=self.member_user)
# Try to update both detectors - should fail because of mixed permissions
self.get_error_response(
self.organization.slug,
qs_params=[("id", str(self.user_detector.id)), ("id", str(self.error_detector.id))],
enabled=False,
status_code=403,
)
# Verify neither detector was modified
self.user_detector.refresh_from_db()
self.error_detector.refresh_from_db()
assert self.user_detector.enabled is True
assert self.error_detector.enabled is True
@region_silo_test
| OrganizationDetectorIndexPutTest |
python | prabhupant__python-ds | data_structures/stack/stack_using_linked_list.py | {
"start": 1074,
"end": 1780
} | class ____(object):
def __init__(self,top=None):
self.ll = LinkedList(top)
def push(self, new_element):
"Push (add) a new element onto the top of the stack"
self.ll.insert_first(new_element)
def pop(self):
"Pop (remove) the first element off the top of the stack and return it"
return self.ll.delete_first()
# Test cases
# Set up some Elements
e1 = Element(1)
e2 = Element(2)
e3 = Element(3)
e4 = Element(4)
# Start setting up a Stack
stack = Stack(e1)
# Test stack functionality
stack.push(e2)
stack.push(e3)
print(stack.pop().value)
print(stack.pop().value)
print(stack.pop().value)
print(stack.pop())
stack.push(e4)
print(stack.pop().value)
| Stack |
python | rapidsai__cudf | python/dask_cudf/dask_cudf/backends.py | {
"start": 17045,
"end": 19328
} | class ____(DataFrameBackendEntrypoint):
"""Backend-entrypoint class for Dask-Expressions
This class is registered under the name "cudf" for the
``dask_expr.dataframe.backends`` entrypoint in ``pyproject.toml``.
Dask-DataFrame will use the methods defined in this class
in place of ``dask_expr.<creation-method>`` when the
"dataframe.backend" configuration is set to "cudf":
Examples
--------
>>> import dask
>>> import dask.dataframe as dd
>>> with dask.config.set({"dataframe.backend": "cudf"}):
... ddf = dd.from_dict({"a": range(10)})
>>> type(ddf._meta)
<class 'cudf.core.dataframe.DataFrame'>
"""
@staticmethod
def to_backend(data, **kwargs):
from dask_cudf._expr import new_collection
from dask_cudf._expr.expr import ToCudfBackend
return new_collection(ToCudfBackend(data, kwargs))
@staticmethod
def from_dict(
data,
npartitions,
orient="columns",
dtype=None,
columns=None,
constructor=cudf.DataFrame,
):
from dask_cudf._expr import from_dict
return _default_backend(
from_dict,
data,
npartitions=npartitions,
orient=orient,
dtype=dtype,
columns=columns,
constructor=constructor,
)
@staticmethod
def read_parquet(*args, **kwargs):
from dask_cudf.io.parquet import read_parquet as read_parquet_expr
return read_parquet_expr(*args, **kwargs)
@staticmethod
def read_csv(
path,
*args,
header="infer",
dtype_backend=None,
storage_options=None,
**kwargs,
):
from dask_cudf.io.csv import read_csv
return read_csv(
path,
*args,
header=header,
storage_options=storage_options,
**kwargs,
)
@staticmethod
def read_json(*args, **kwargs):
from dask_cudf.io.json import read_json as read_json_impl
return read_json_impl(*args, **kwargs)
@staticmethod
def read_orc(*args, **kwargs):
from dask_cudf.io.orc import read_orc as legacy_read_orc
return legacy_read_orc(*args, **kwargs)
| CudfBackendEntrypoint |
python | redis__redis-py | redis/commands/cluster.py | {
"start": 31663,
"end": 32604
} | class ____(
AsyncClusterMultiKeyCommands,
AsyncClusterManagementCommands,
AsyncACLCommands,
AsyncClusterDataAccessCommands,
AsyncScriptCommands,
AsyncFunctionCommands,
AsyncModuleCommands,
AsyncRedisModuleCommands,
):
"""
A class for all Redis Cluster commands
For key-based commands, the target node(s) will be internally determined
by the keys' hash slot.
Non-key-based commands can be executed with the 'target_nodes' argument to
target specific nodes. By default, if target_nodes is not specified, the
command will be executed on the default cluster node.
:param :target_nodes: type can be one of the followings:
- nodes flag: ALL_NODES, PRIMARIES, REPLICAS, RANDOM
- 'ClusterNode'
- 'list(ClusterNodes)'
- 'dict(any:clusterNodes)'
for example:
r.cluster_info(target_nodes=RedisCluster.ALL_NODES)
"""
| AsyncRedisClusterCommands |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/flickr/tests.py | {
"start": 2052,
"end": 3858
} | class ____(OAuthTestsMixin, TestCase):
"""Separate test for Flickr accounts without real names"""
provider_id = FlickrProvider.id
def get_mocked_response(self):
#
return [
MockedResponse(
HTTPStatus.OK,
r"""
{"stat": "ok",
"user": {
"username": {
"_content": "pennersr"},
"id": "12345678@N00"}}
""",
), # noqa
MockedResponse(
HTTPStatus.OK,
r"""
{"person": {"username": {"_content": "pennersr"}, "photosurl": {"_content":
"http://www.flickr.com/photos/12345678@N00/"},
"nsid": "12345678@N00",
"path_alias": null, "photos": {"count": {"_content": 0},
"firstdatetaken": {"_content": null}, "views": {"_content": "28"},
"firstdate": {"_content": null}}, "iconserver": "0",
"description": {"_content": ""}, "mobileurl": {"_content":
"http://m.flickr.com/photostream.gne?id=6294613"},
"profileurl": {
"_content": "http://www.flickr.com/people/12345678@N00/"},
"mbox_sha1sum": {"_content":
"5e5b359c123e54f95236209c8808d607a5cdd21e"},
"ispro": 0, "location": {"_content": ""},
"id": "12345678@N00",
"realname": {"_content": ""},
"iconfarm": 0}, "stat": "ok"}
""",
),
] # noqa
def get_expected_to_str(self):
return "pennersr"
def test_login(self):
super().test_login()
account = SocialAccount.objects.get(uid="12345678@N00")
f_account = account.get_provider_account()
self.assertEqual(account.user.first_name, "")
self.assertEqual(account.user.last_name, "")
self.assertEqual(
f_account.get_profile_url(),
"http://www.flickr.com/people/12345678@N00/",
)
self.assertEqual(f_account.to_str(), "pennersr")
| FlickrWithoutRealNameTests |
python | pytest-dev__pytest | testing/test_terminal.py | {
"start": 112171,
"end": 117590
} | class ____:
"""Tests for the TerminalProgressPlugin."""
@pytest.fixture
def mock_file(self) -> StringIO:
return StringIO()
@pytest.fixture
def mock_tr(self, mock_file: StringIO) -> pytest.TerminalReporter:
tr: pytest.TerminalReporter = mock.create_autospec(pytest.TerminalReporter)
def write_raw(content: str, *, flush: bool = False) -> None:
mock_file.write(content)
tr.write_raw = write_raw # type: ignore[method-assign]
tr._progress_nodeids_reported = set()
return tr
@pytest.mark.skipif(sys.platform != "win32", reason="#13896")
def test_plugin_registration_enabled_by_default(
self, pytester: pytest.Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Test that the plugin registration is enabled by default.
Currently only on Windows (#13896).
"""
monkeypatch.setattr(sys.stdout, "isatty", lambda: True)
# The plugin module should be registered as a default plugin.
config = pytester.parseconfigure()
plugin = config.pluginmanager.get_plugin("terminalprogress")
assert plugin is not None
def test_plugin_registred_on_all_platforms_when_explicitly_requested(
self, pytester: pytest.Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Test that the plugin is registered on any platform if explicitly requested."""
monkeypatch.setattr(sys.stdout, "isatty", lambda: True)
# The plugin module should be registered as a default plugin.
config = pytester.parseconfigure("-p", "terminalprogress")
plugin = config.pluginmanager.get_plugin("terminalprogress")
assert plugin is not None
def test_disabled_for_non_tty(
self, pytester: pytest.Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Test that plugin is disabled for non-TTY output."""
monkeypatch.setattr(sys.stdout, "isatty", lambda: False)
config = pytester.parseconfigure("-p", "terminalprogress")
plugin = config.pluginmanager.get_plugin("terminalprogress-plugin")
assert plugin is None
def test_disabled_for_dumb_terminal(
self, pytester: pytest.Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Test that plugin is disabled when TERM=dumb."""
monkeypatch.setenv("TERM", "dumb")
monkeypatch.setattr(sys.stdout, "isatty", lambda: True)
config = pytester.parseconfigure("-p", "terminalprogress")
plugin = config.pluginmanager.get_plugin("terminalprogress-plugin")
assert plugin is None
@pytest.mark.parametrize(
["state", "progress", "expected"],
[
("indeterminate", None, "\x1b]9;4;3;\x1b\\"),
("normal", 50, "\x1b]9;4;1;50\x1b\\"),
("error", 75, "\x1b]9;4;2;75\x1b\\"),
("paused", None, "\x1b]9;4;4;\x1b\\"),
("paused", 80, "\x1b]9;4;4;80\x1b\\"),
("remove", None, "\x1b]9;4;0;\x1b\\"),
],
)
def test_emit_progress_sequences(
self,
mock_file: StringIO,
mock_tr: pytest.TerminalReporter,
state: Literal["remove", "normal", "error", "indeterminate", "paused"],
progress: int | None,
expected: str,
) -> None:
"""Test that progress sequences are emitted correctly."""
plugin = TerminalProgressPlugin(mock_tr)
plugin._emit_progress(state, progress)
assert expected in mock_file.getvalue()
def test_session_lifecycle(
self, mock_file: StringIO, mock_tr: pytest.TerminalReporter
) -> None:
"""Test progress updates during session lifecycle."""
plugin = TerminalProgressPlugin(mock_tr)
session = mock.create_autospec(pytest.Session)
session.testscollected = 3
# Session start - should emit indeterminate progress.
plugin.pytest_sessionstart(session)
assert "\x1b]9;4;3;\x1b\\" in mock_file.getvalue()
mock_file.truncate(0)
mock_file.seek(0)
# Collection finish - should emit 0% progress.
plugin.pytest_collection_finish()
assert "\x1b]9;4;1;0\x1b\\" in mock_file.getvalue()
mock_file.truncate(0)
mock_file.seek(0)
# First test - 33% progress.
report1 = pytest.TestReport(
nodeid="test_1",
location=("test.py", 0, "test_1"),
when="call",
outcome="passed",
keywords={},
longrepr=None,
)
mock_tr.reported_progress = 1 # type: ignore[misc]
plugin.pytest_runtest_logreport(report1)
assert "\x1b]9;4;1;33\x1b\\" in mock_file.getvalue()
mock_file.truncate(0)
mock_file.seek(0)
# Second test with failure - 66% in error state.
report2 = pytest.TestReport(
nodeid="test_2",
location=("test.py", 1, "test_2"),
when="call",
outcome="failed",
keywords={},
longrepr=None,
)
mock_tr.reported_progress = 2 # type: ignore[misc]
plugin.pytest_runtest_logreport(report2)
assert "\x1b]9;4;2;66\x1b\\" in mock_file.getvalue()
mock_file.truncate(0)
mock_file.seek(0)
# Session finish - should remove progress.
plugin.pytest_sessionfinish()
assert "\x1b]9;4;0;\x1b\\" in mock_file.getvalue()
| TestTerminalProgressPlugin |
python | pytorch__pytorch | torch/autograd/profiler_util.py | {
"start": 20389,
"end": 32763
} | class ____(FormattedTimesMixin):
"""Profiling information about a single function.
FunctionEvent records the execution of a single operation during profiling.
These events are obtained from the profiler/kineto and contain detailed
timing and memory usage information.
.. note::
FunctionEvent objects are typically created by the profiler/kineto and should not
be instantiated directly by users. Access them through the profiler's output.
Attributes:
id (int): Unique identifier for this event.
node_id (int): Node identifier for distributed profiling (-1 if not applicable).
name (str): Name of the profiled function/operator.
overload_name (str): Overload name for the operator (requires _ExperimentalConfig(capture_overload_names=True) set).
trace_name (str): Same as name, just changes ProfilerStep* to ProfilerStep#
time_range (Interval): Time interval containing start and end timestamps in microseconds.
thread (int): Thread ID where the operation started.
fwd_thread (int): Thread ID of the corresponding forward operation.
kernels (List[Kernel]): List of device kernels launched by this operation.
count (int): Number of times this event was called (usually 1).
cpu_children (List[FunctionEvent]): Direct CPU child operations.
cpu_parent (FunctionEvent): Direct CPU parent operation.
input_shapes (Tuple[int, ...]): Shapes of input tensors (requires record_shapes=true).
concrete_inputs (List[Any]): Concrete input values (requires record_shapes=true).
kwinputs (Dict[str, Any]): Keyword arguments (requires record_shapes=true).
stack (List[str]): Python stack trace where the operation was called (requires with_stack=true).
scope (int): at::RecordScope identifier (0=forward, 1=backward, etc.).
use_device (str): Device type being profiled ("cuda", "xpu", etc.).
cpu_memory_usage (int): CPU memory allocated in bytes.
device_memory_usage (int): Device memory allocated in bytes.
is_async (bool): Whether this is an asynchronous operation.
is_remote (bool): Whether this operation occurred on a remote node.
sequence_nr (int): Sequence number for autograd operations.
device_type (DeviceType): Type of device (CPU, CUDA, XPU, PrivateUse1, etc.).
device_index (int): Index of the device (e.g., GPU 0, 1, 2).
device_resource_id (int): Resource ID on the device (ie. stream ID).
is_legacy (bool): Whether this is from the legacy profiler.
flops (int): Estimated floating point operations.
is_user_annotation (bool): Whether this is a user-annotated region.
metadata_json (str): Additional metadata in JSON format.
Properties:
cpu_time_total (float): Total CPU time in microseconds.
device_time_total (float): Total device (CUDA/XPU/etc) time in microseconds.
self_cpu_time_total (float): CPU time excluding child operations.
self_device_time_total (float): Device time excluding child operations.
self_cpu_memory_usage (int): CPU memory usage excluding child operations.
self_device_memory_usage (int): Device memory usage excluding child operations.
cpu_time (float): Average CPU time per call.
device_time (float): Average device time per call.
key (str): Key used for grouping events (usually same as name).
See Also:
- :class:`torch.profiler.profile`: Context manager for profiling
- :class:`EventList`: List container for FunctionEvent objects with helper methods
- :class:`FunctionEventAvg`: Averaged statistics over multiple FunctionEvent objects
"""
def __init__(
self,
id,
name,
thread,
start_us,
end_us,
overload_name=None,
fwd_thread=None,
input_shapes=None,
stack=None,
scope=0,
use_device=None,
cpu_memory_usage=0,
device_memory_usage=0,
is_async=False,
is_remote=False,
sequence_nr=-1,
node_id=-1,
device_type=DeviceType.CPU,
device_index=0,
device_resource_id=None,
is_legacy=False,
flops=None,
trace_name=None,
concrete_inputs=None,
kwinputs=None,
is_user_annotation=False,
metadata_json=None,
):
self.id: int = id
self.node_id: int = node_id
self.name: str = name
# pyrefly: ignore [bad-assignment]
self.overload_name: str = overload_name
# pyrefly: ignore [bad-assignment]
self.trace_name: str = trace_name
self.time_range: Interval = Interval(start_us, end_us)
self.thread: int = thread
self.fwd_thread: Optional[int] = fwd_thread
self.kernels: list[Kernel] = []
self.count: int = 1
self.cpu_children: list[FunctionEvent] = []
self.cpu_parent: Optional[FunctionEvent] = None
# pyrefly: ignore [bad-assignment]
self.input_shapes: tuple[int, ...] = input_shapes
# pyrefly: ignore [bad-assignment]
self.concrete_inputs: list[Any] = concrete_inputs
# pyrefly: ignore [bad-assignment]
self.kwinputs: dict[str, Any] = kwinputs
# pyrefly: ignore [bad-assignment]
self.stack: list = stack
self.scope: int = scope
self.use_device: Optional[str] = use_device
self.cpu_memory_usage: int = cpu_memory_usage
self.device_memory_usage: int = device_memory_usage
self.is_async: bool = is_async
self.is_remote: bool = is_remote
self.sequence_nr: int = sequence_nr
self.device_type: DeviceType = device_type
self.device_index: int = device_index
self.device_resource_id: int = (
thread if device_resource_id is None else device_resource_id
)
self.is_legacy: bool = is_legacy
self.flops: Optional[int] = flops
self.is_user_annotation: Optional[bool] = is_user_annotation
self.self_cpu_percent = -1
self.total_cpu_percent = -1
self.total_device_percent = -1
self.metadata_json = metadata_json
def append_kernel(self, name, device, duration):
if self.device_type != DeviceType.CPU:
raise AssertionError("Expected device_type to be CPU")
self.kernels.append(Kernel(name, device, duration))
def append_cpu_child(self, child):
"""Append a CPU child of type FunctionEvent.
One is supposed to append only direct children to the event to have
correct self cpu time being reported.
"""
if self.device_type != DeviceType.CPU:
raise AssertionError("Expected device_type to be CPU")
if not isinstance(child, FunctionEvent):
raise AssertionError("Expected child to be a FunctionEvent")
if child.device_type != DeviceType.CPU:
raise AssertionError("Expected child device_type to be CPU")
self.cpu_children.append(child)
def set_cpu_parent(self, parent):
"""Set the immediate CPU parent of type FunctionEvent.
One profiling FunctionEvent should have only one CPU parent such that
the child's range interval is completely inside the parent's. We use
this connection to determine the event is from top-level op or not.
"""
if self.device_type != DeviceType.CPU:
raise AssertionError("Expected device_type to be CPU")
if not isinstance(parent, FunctionEvent):
raise AssertionError("Expected parent to be a FunctionEvent")
if parent.device_type != DeviceType.CPU:
raise AssertionError("Expected parent device_type to be CPU")
self.cpu_parent = parent
# Note: async events don't have children, are not used when computing 'self'
# metrics of other events, have only total cpu time
@property
def self_cpu_memory_usage(self):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.cpu_memory_usage - sum(
child.cpu_memory_usage for child in self.cpu_children
)
@property
def self_device_memory_usage(self):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.device_memory_usage - sum(
child.device_memory_usage for child in self.cpu_children
)
@property
@deprecated(
"`self_cuda_memory_usage` is deprecated. Use `self_device_memory_usage` instead.",
category=FutureWarning,
)
def self_cuda_memory_usage(self): # To be deprecated
return self.self_device_memory_usage
@property
def cpu_time_total(self):
if self.device_type == DeviceType.CPU:
return self.time_range.elapsed_us()
else:
return 0
@property
def self_cpu_time_total(self):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.cpu_time_total - sum(
child.cpu_time_total for child in self.cpu_children
)
@property
def device_time_total(self):
if self.is_async or not self.use_device:
return 0
if self.device_type == DeviceType.CPU:
if not self.is_legacy:
# account for the kernels in the children ops
return sum(kinfo.duration for kinfo in self.kernels) + sum(
ch.device_time_total for ch in self.cpu_children
)
else:
# each legacy cpu events has a single (fake) kernel
return sum(kinfo.duration for kinfo in self.kernels)
else:
if self.device_type not in [
DeviceType.CUDA,
DeviceType.PrivateUse1,
DeviceType.MTIA,
DeviceType.HPU,
]:
raise AssertionError(
f"Expected device_type to be CUDA, PrivateUse1, MTIA, or HPU, but got {self.device_type}"
)
return self.time_range.elapsed_us()
@property
@deprecated(
"`cuda_time_total` is deprecated. Use `device_time_total` instead.",
category=FutureWarning,
)
def cuda_time_total(self): # To be deprecated
return self.device_time_total
@property
def self_device_time_total(self):
if self.is_async or not self.use_device:
return 0
if self.device_type == DeviceType.CPU:
return self.device_time_total - sum(
child.device_time_total for child in self.cpu_children
)
else:
if self.device_type not in [
DeviceType.CUDA,
DeviceType.PrivateUse1,
DeviceType.MTIA,
DeviceType.HPU,
]:
raise AssertionError(
f"Expected device_type to be CUDA, PrivateUse1, MTIA, or HPU, but got {self.device_type}"
)
return self.device_time_total
@property
@deprecated(
"`self_cuda_time_total` is deprecated. Use `self_device_time_total` instead.",
category=FutureWarning,
)
def self_cuda_time_total(self): # To be deprecated
return self.self_device_time_total
@property
def key(self):
return self.name
def __repr__(self):
device_name = self.use_device
device_time = self.device_time_str
device_memory_usage = self.device_memory_usage
return (
f"<FunctionEvent id={self.id} name={self.name} overload_name={self.overload_name} "
f"device_type={self.device_type} node_id={self.node_id} cpu_time={self.cpu_time_str} "
f"start_us={self.time_range.start} end_us={self.time_range.end} "
f"cpu_children={str([child.id for child in self.cpu_children])} {device_name}_time={device_time} "
f"name={self.name} thread={self.thread} input_shapes={str(self.input_shapes)} "
f"cpu_memory_usage={self.cpu_memory_usage} {device_name}_memory_usage={device_memory_usage} "
f"is_async={self.is_async} is_remote={self.is_remote} seq_nr={self.sequence_nr} is_legacy={self.is_legacy}>"
)
| FunctionEvent |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/api.py | {
"start": 1316,
"end": 1423
} | class ____(RendezvousError):
"""Raised when the state of a rendezvous is corrupt."""
| RendezvousStateError |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/rank_two_test.py | {
"start": 1115,
"end": 3480
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""Test for rank 2 input in TF-TRT."""
def GraphFn(self, x1, x2):
# Two paths: first with rank 2 input, second with rank 4 input.
outputs = []
xs = [x1, x2]
for i in range(2):
x = xs[i]
c = constant_op.constant(1.0, name="c%d_1" % i)
q = math_ops.add(x, c, name="add%d_1" % i)
q = math_ops.abs(q, name="abs%d_1" % i)
c = constant_op.constant(2.2, name="c%d_2" % i)
q = math_ops.add(q, c, name="add%d_2" % i)
q = math_ops.abs(q, name="abs%d_2" % i)
c = constant_op.constant(3.0, name="c%d_3" % i)
q = math_ops.add(q, c, name="add%d_3" % i)
if i == 0:
axis = constant_op.constant(-1, dtype=dtypes.int32, name="axis")
for j in range(2):
q = array_ops.expand_dims(q, axis, name="expand%d_%d" % (i, j))
q = self.trt_incompatible_op(q)
q = gen_math_ops.reciprocal(q, name="reciprocal%d" % i)
outputs.append(q)
# Combine both paths
q = math_ops.add(outputs[0], outputs[1], name="add")
return array_ops.squeeze(q, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32,
[[12, 5], [12, 5, 2, 2]], [[12, 5, 2, 2]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
expected_engines = {
"TRTEngineOp_000": [
"add0_1", "add0_2", "add0_3", "c0_1", "c0_2", "c0_3", "abs0_1",
"abs0_2", "expand0_0", "expand0_1", "axis"
],
"TRTEngineOp_001": [
"add1_1", "add1_2", "add1_3", "c1_1", "c1_2", "c1_3", "abs1_1",
"abs1_2", "reciprocal1"
]
}
if not run_params.dynamic_shape:
# The two ops can't be in the same cluster as the ops in TRTEngineOp_000
# due to trt_incompatible_op. They can't be in the same cluster as the
# ops in TRTEngineOP_1 because their batch size belongs to a different
# equivalent class.
expected_engines["TRTEngineOp_002"] = ["add", "reciprocal0"]
else:
# In dynamic shape mode the batch size of the ops can differ,
# therefore the final ops will be merged to TRTEngineOP_1.
expected_engines["TRTEngineOp_001"] += ["add", "reciprocal0"]
return expected_engines
if __name__ == "__main__":
test.main()
| RankTwoTest |
python | ray-project__ray | python/ray/_private/state_api_test_utils.py | {
"start": 13596,
"end": 19557
} | class ____:
def __init__(self):
self.name_to_pid = {}
def get_pids(self):
return self.name_to_pid
def report_pid(self, name, pid, state=None):
self.name_to_pid[name] = (pid, state)
def _is_actor_task_running(actor_pid: int, task_name: str):
"""
Check whether the actor task `task_name` is running on the actor process
with pid `actor_pid`.
Args:
actor_pid: The pid of the actor process.
task_name: The name of the actor task.
Returns:
True if the actor task is running, False otherwise.
Limitation:
If the actor task name is set using options.name and is a substring of
the actor name, this function may return true even if the task is not
running on the actor process. To resolve this issue, we can possibly
pass in the actor name.
"""
if not psutil.pid_exists(actor_pid):
return False
"""
Why use both `psutil.Process.name()` and `psutil.Process.cmdline()`?
1. Core worker processes call `setproctitle` to set the process title before
and after executing tasks. However, the definition of "title" is a bit
complex.
[ref]: https://github.com/dvarrazzo/py-setproctitle
> The process title is usually visible in files such as /proc/PID/cmdline,
/proc/PID/status, /proc/PID/comm, depending on the operating system and
kernel version. This information is used by user-space tools such as ps
and top.
Ideally, we would only need to check `psutil.Process.cmdline()`, but I decided
to check both `psutil.Process.name()` and `psutil.Process.cmdline()` based on
the definition of "title" stated above.
2. Additionally, the definition of `psutil.Process.name()` is not consistent
with the definition of "title" in `setproctitle`. The length of `/proc/PID/comm` and
the prefix of `/proc/PID/cmdline` affect the return value of
`psutil.Process.name()`.
In addition, executing `setproctitle` in different threads within the same
process may result in different outcomes.
To learn more details, please refer to the source code of `psutil`:
[ref]:
https://github.com/giampaolo/psutil/blob/a17550784b0d3175da01cdb02cee1bc6b61637dc/psutil/__init__.py#L664-L693
3. `/proc/PID/comm` will be truncated to TASK_COMM_LEN (16) characters
(including the terminating null byte).
[ref]:
https://man7.org/linux/man-pages/man5/proc_pid_comm.5.html
"""
name = psutil.Process(actor_pid).name()
if task_name in name and name.startswith("ray::"):
return True
cmdline = psutil.Process(actor_pid).cmdline()
# If `options.name` is set, the format is `ray::<task_name>`. If not,
# the format is `ray::<actor_name>.<task_name>`.
if cmdline and task_name in cmdline[0] and cmdline[0].startswith("ray::"):
return True
return False
def verify_tasks_running_or_terminated(
task_pids: Dict[str, Tuple[int, Optional[str]]], expect_num_tasks: int
):
"""
Check if the tasks in task_pids are in RUNNING state if pid exists
and running the task.
If the pid is missing or the task is not running the task, check if the task
is marked FAILED or FINISHED.
Args:
task_pids: A dict of task name to (pid, expected terminal state).
"""
assert len(task_pids) == expect_num_tasks, task_pids
for task_name, pid_and_state in task_pids.items():
tasks = list_tasks(detail=True, filters=[("name", "=", task_name)])
assert len(tasks) == 1, (
f"One unique task with {task_name} should be found. "
"Use `options(name=<task_name>)` when creating the task."
)
task = tasks[0]
pid, expected_state = pid_and_state
# If it's windows/macos, we don't have a way to check if the process
# is actually running the task since the process name is just python,
# rather than the actual task name.
if sys.platform in ["win32", "darwin"]:
if expected_state is not None:
assert task["state"] == expected_state, task
continue
if _is_actor_task_running(pid, task_name):
assert (
"ray::IDLE" not in task["name"]
), "One should not name it 'IDLE' since it's reserved in Ray"
assert task["state"] == "RUNNING", task
if expected_state is not None:
assert task["state"] == expected_state, task
else:
# Tasks no longer running.
if expected_state is None:
assert task["state"] in [
"FAILED",
"FINISHED",
], f"{task_name}: {task['task_id']} = {task['state']}"
else:
assert (
task["state"] == expected_state
), f"expect {expected_state} but {task['state']} for {task}"
return True
def verify_schema(state, result_dict: dict, detail: bool = False):
"""
Verify the schema of the result_dict is the same as the state.
"""
state_fields_columns = set()
if detail:
state_fields_columns = state.columns()
else:
state_fields_columns = state.base_columns()
for k in state_fields_columns:
assert k in result_dict
for k in result_dict:
assert k in state_fields_columns
# Make the field values can be converted without error as well
state(**result_dict)
def create_api_options(
timeout: int = DEFAULT_RPC_TIMEOUT,
limit: int = DEFAULT_LIMIT,
filters: List[Tuple[str, PredicateType, SupportedFilterType]] = None,
detail: bool = False,
exclude_driver: bool = True,
):
if not filters:
filters = []
return ListApiOptions(
limit=limit,
timeout=timeout,
filters=filters,
server_timeout_multiplier=1.0,
detail=detail,
exclude_driver=exclude_driver,
)
| PidActor |
python | marshmallow-code__apispec | tests/test_ext_marshmallow.py | {
"start": 11242,
"end": 11965
} | class ____:
@pytest.mark.parametrize("spec", ("3.0.0",), indirect=True)
@pytest.mark.parametrize("schema", [PetSchema, PetSchema()])
def test_can_use_schema_in_header(self, spec, schema):
param = {"schema": schema}
spec.components.header("Pet", param)
header = get_headers(spec)["Pet"]
reference = header["schema"]
assert reference == build_ref(spec, "schema", "Pet")
resolved_schema = spec.components.schemas["Pet"]
assert resolved_schema["properties"]["name"]["type"] == "string"
assert resolved_schema["properties"]["password"]["type"] == "string"
assert resolved_schema["properties"]["id"]["type"] == "integer"
| TestComponentHeaderHelper |
python | numba__numba | numba/tests/test_practical_lowering_issues.py | {
"start": 458,
"end": 6953
} | class ____(MemoryLeakMixin, TestCase):
def test_issue4156_loop_vars_leak(self):
"""Test issues with zero-filling of refct'ed variables inside loops.
Before the fix, the in-loop variables are always zero-filled at their
definition location. As a result, their state from the previous
iteration is erased. No decref is applied. To fix this, the
zero-filling must only happen once after the alloca at the function
entry block. The loop variables are technically defined once per
function (one alloca per definition per function), but semantically
defined once per assignment. Semantically, their lifetime stop only
when the variable is re-assigned or when the function ends.
"""
@njit
def udt(N):
sum_vec = np.zeros(3)
for n in range(N):
if n >= 0:
# `vec` would leak without the fix.
vec = np.ones(1)
if n >= 0:
sum_vec += vec[0]
return sum_vec
got = udt(4)
expect = udt.py_func(4)
self.assertPreciseEqual(got, expect)
def test_issue4156_loop_vars_leak_variant1(self):
"""Variant of test_issue4156_loop_vars_leak.
Adding an outer loop.
"""
@njit
def udt(N):
sum_vec = np.zeros(3)
for x in range(N):
for y in range(N):
n = x + y
if n >= 0:
# `vec` would leak without the fix.
vec = np.ones(1)
if n >= 0:
sum_vec += vec[0]
return sum_vec
got = udt(4)
expect = udt.py_func(4)
self.assertPreciseEqual(got, expect)
def test_issue4156_loop_vars_leak_variant2(self):
"""Variant of test_issue4156_loop_vars_leak.
Adding deeper outer loop.
"""
@njit
def udt(N):
sum_vec = np.zeros(3)
for z in range(N):
for x in range(N):
for y in range(N):
n = x + y + z
if n >= 0:
# `vec` would leak without the fix.
vec = np.ones(1)
if n >= 0:
sum_vec += vec[0]
return sum_vec
got = udt(4)
expect = udt.py_func(4)
self.assertPreciseEqual(got, expect)
def test_issue4156_loop_vars_leak_variant3(self):
"""Variant of test_issue4156_loop_vars_leak.
Adding inner loop around allocation
"""
@njit
def udt(N):
sum_vec = np.zeros(3)
for z in range(N):
for x in range(N):
n = x + z
if n >= 0:
for y in range(N):
# `vec` would leak without the fix.
vec = np.ones(y)
if n >= 0:
sum_vec += vec[0]
return sum_vec
got = udt(4)
expect = udt.py_func(4)
self.assertPreciseEqual(got, expect)
def test_issue4156_loop_vars_leak_variant4(self):
"""Variant of test_issue4156_loop_vars_leak.
Interleaves loops and allocations
"""
@njit
def udt(N):
sum_vec = 0
for n in range(N):
vec = np.zeros(7)
for n in range(N):
z = np.zeros(7)
sum_vec += vec[0] + z[0]
return sum_vec
got = udt(4)
expect = udt.py_func(4)
self.assertPreciseEqual(got, expect)
def test_issue_with_literal_in_static_getitem(self):
"""Test an issue with literal type used as index of static_getitem
"""
@register_pass(mutates_CFG=False, analysis_only=False)
class ForceStaticGetitemLiteral(FunctionPass):
_name = "force_static_getitem_literal"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
repl = {}
# Force the static_getitem to have a literal type as
# index to replicate the problem.
for inst, sig in state.calltypes.items():
if (isinstance(inst, ir.Expr) and
inst.op == 'static_getitem'):
[obj, idx] = sig.args
new_sig = sig.replace(args=(obj,
types.literal(inst.index)))
repl[inst] = new_sig
state.calltypes.update(repl)
return True
class CustomPipeline(CompilerBase):
def define_pipelines(self):
pm = DefaultPassBuilder.define_nopython_pipeline(self.state)
pm.add_pass_after(ForceStaticGetitemLiteral,
NopythonTypeInference)
pm.finalize()
return [pm]
@njit(pipeline_class=CustomPipeline)
def foo(arr):
return arr[4] # force static_getitem
arr = np.arange(10)
got = foo(arr)
expect = foo.py_func(arr)
self.assertEqual(got, expect)
def test_issue7507(self):
"""
Test a problem with BaseContext.get_function() because of changes
related to the new style error handling.
"""
from numba.core.typing.templates import AbstractTemplate, infer_global
from numba.core.imputils import lower_builtin
@infer_global(issue7507_lround)
class lroundTemplate(AbstractTemplate):
key = issue7507_lround
def generic(self, args, kws):
signature = types.int64(types.float64)
# insert a new builtin during the compilation process
@lower_builtin(issue7507_lround, types.float64)
def codegen(context, builder, sig, args):
# Simply truncate with the cast to integer.
return context.cast(builder, args[0], sig.args[0],
sig.return_type)
return signature
@njit('int64(float64)')
def foo(a):
return issue7507_lround(a)
self.assertEqual(foo(3.4), 3)
| TestLowering |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 93452,
"end": 95848
} | class ____(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self) -> None:
a = easy_array((10, 15, 3, 2))
darray = DataArray(a, dims=["y", "x", "col", "row"])
darray.coords["col"] = np.array(
["col" + str(x) for x in darray.coords["col"].values]
)
darray.coords["row"] = np.array(
["row" + str(x) for x in darray.coords["row"].values]
)
self.darray = darray
def test_title_kwargs(self) -> None:
g = xplt.FacetGrid(self.darray, col="col", row="row")
g.set_titles(template="{value}", weight="bold")
# Rightmost column titles should be bold
for label, ax in zip(
self.darray.coords["row"].values, g.axs[:, -1], strict=True
):
assert property_in_axes_text("weight", "bold", label, ax)
# Top row titles should be bold
for label, ax in zip(
self.darray.coords["col"].values, g.axs[0, :], strict=True
):
assert property_in_axes_text("weight", "bold", label, ax)
@pytest.mark.slow
def test_default_labels(self) -> None:
g = xplt.FacetGrid(self.darray, col="col", row="row")
assert (2, 3) == g.axs.shape
g.map_dataarray(xplt.imshow, "x", "y")
# Rightmost column should be labeled
for label, ax in zip(
self.darray.coords["row"].values, g.axs[:, -1], strict=True
):
assert substring_in_axes(label, ax)
# Top row should be labeled
for label, ax in zip(
self.darray.coords["col"].values, g.axs[0, :], strict=True
):
assert substring_in_axes(label, ax)
# ensure that row & col labels can be changed
g.set_titles("abc={value}")
for label, ax in zip(
self.darray.coords["row"].values, g.axs[:, -1], strict=True
):
assert substring_in_axes(f"abc={label}", ax)
# previous labels were "row=row0" etc.
assert substring_not_in_axes("row=", ax)
for label, ax in zip(
self.darray.coords["col"].values, g.axs[0, :], strict=True
):
assert substring_in_axes(f"abc={label}", ax)
# previous labels were "col=row0" etc.
assert substring_not_in_axes("col=", ax)
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
| TestFacetGrid4d |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/partition_keys.py | {
"start": 129,
"end": 388
} | class ____(graphene.ObjectType):
results = non_null_list(graphene.String)
cursor = graphene.NonNull(graphene.String)
hasMore = graphene.NonNull(graphene.Boolean)
class Meta:
name = "PartitionKeyConnection"
| GraphenePartitionKeyConnection |
python | sympy__sympy | sympy/physics/units/quantities.py | {
"start": 4517,
"end": 4671
} | class ____(Quantity):
"""Represents a physical constant, eg. `speed_of_light` or `avogadro_constant`."""
is_physical_constant = True
| PhysicalConstant |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.