language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran/fivetran_event_iterator.py | {
"start": 2352,
"end": 5652
} | class ____(Iterator[T]):
"""A wrapper around an iterator of Fivetran events which contains additional methods for
post-processing the events, such as fetching column metadata.
"""
def __init__(
self,
events: Iterator[T],
fivetran_workspace: "FivetranWorkspace",
context: Union[OpExecutionContext, AssetExecutionContext],
) -> None:
self._inner_iterator = events
self._fivetran_workspace = fivetran_workspace
self._context = context
def __next__(self) -> T:
return next(self._inner_iterator)
def __iter__(self) -> "FivetranEventIterator[T]":
return self
@public
def fetch_column_metadata(self) -> "FivetranEventIterator":
"""Fetches column metadata for each table synced with the Fivetran API.
Retrieves the column schema for each destination table.
Returns:
FivetranEventIterator: An iterator of Dagster events with column metadata attached.
"""
fetch_metadata_fn: Callable[
[FivetranEventType],
dict[str, Any],
] = lambda materialization: _fetch_column_metadata(
materialization=materialization,
fivetran_workspace=self._fivetran_workspace,
)
return self._attach_metadata(fetch_metadata_fn)
def _attach_metadata(
self,
fn: Callable[[FivetranEventType], dict[str, Any]],
) -> "FivetranEventIterator":
"""Runs a threaded task to attach metadata to each event in the iterator.
Args:
fn (Callable[[Union[AssetMaterialization, MaterializeResult]], Dict[str, Any]]):
A function which takes a FivetranEventType and returns
a dictionary of metadata to attach to the event.
Returns:
Iterator[Union[AssetMaterialization, MaterializeResult]]:
A set of corresponding Dagster events for Fivetran tables, with any metadata output
by the function attached, yielded in the order they are emitted by the Fivetran API.
"""
def _map_fn(event: FivetranEventType) -> FivetranEventType:
return event._replace(metadata={**check.is_dict(event.metadata), **fn(event)})
def _threadpool_wrap_map_fn() -> Iterator[FivetranEventType]:
assets_def = self._context.assets_def
connector_id = next(
check.not_none(FivetranMetadataSet.extract(spec.metadata).connector_id)
for spec in assets_def.specs
)
with ThreadPoolExecutor(
max_workers=int(
os.getenv(
"FIVETRAN_POSTPROCESSING_THREADPOOL_WORKERS",
default=DEFAULT_MAX_THREADPOOL_WORKERS,
)
),
thread_name_prefix=f"fivetran_{connector_id}",
) as executor:
yield from imap(
executor=executor,
iterable=self._inner_iterator,
func=_map_fn,
)
return FivetranEventIterator(
events=_threadpool_wrap_map_fn(),
fivetran_workspace=self._fivetran_workspace,
context=self._context,
)
| FivetranEventIterator |
python | chroma-core__chroma | chromadb/test/property/test_embeddings.py | {
"start": 1808,
"end": 2124
} | class ____:
initialize = "initialize"
add_embeddings = "add_embeddings"
delete_by_ids = "delete_by_ids"
update_embeddings = "update_embeddings"
upsert_embeddings = "upsert_embeddings"
collection_st = st.shared(strategies.collections(with_hnsw_params=True), key="coll")
| EmbeddingStateMachineStates |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-salesforce/source_salesforce/exceptions.py | {
"start": 483,
"end": 613
} | class ____(Error):
def __init__(self, msg: str, err: str = None):
self.logger.fatal(f"{msg}. Error: {err}")
| TmpFileIOError |
python | plotly__plotly.py | plotly/graph_objs/contourcarpet/_line.py | {
"start": 233,
"end": 5616
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "contourcarpet"
_path_str = "contourcarpet.line"
_valid_props = {"color", "dash", "smoothing", "width"}
@property
def color(self):
"""
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
@property
def smoothing(self):
"""
Sets the amount of smoothing for the contour lines, where 0
corresponds to no smoothing.
The 'smoothing' property is a number and may be specified as:
- An int or float in the interval [0, 1.3]
Returns
-------
int|float
"""
return self["smoothing"]
@smoothing.setter
def smoothing(self, val):
self["smoothing"] = val
@property
def width(self):
"""
Sets the contour line width in (in px) Defaults to 0.5 when
`contours.type` is "levels". Defaults to 2 when `contour.type`
is "constraint".
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
smoothing
Sets the amount of smoothing for the contour lines,
where 0 corresponds to no smoothing.
width
Sets the contour line width in (in px) Defaults to 0.5
when `contours.type` is "levels". Defaults to 2 when
`contour.type` is "constraint".
"""
def __init__(
self, arg=None, color=None, dash=None, smoothing=None, width=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contourcarpet.Line`
color
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
smoothing
Sets the amount of smoothing for the contour lines,
where 0 corresponds to no smoothing.
width
Sets the contour line width in (in px) Defaults to 0.5
when `contours.type` is "levels". Defaults to 2 when
`contour.type` is "constraint".
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contourcarpet.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contourcarpet.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("dash", arg, dash)
self._set_property("smoothing", arg, smoothing)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | ray-project__ray | python/ray/tune/tests/test_trial_scheduler.py | {
"start": 30692,
"end": 38282
} | class ____(unittest.TestCase):
def setUp(self):
ray.init(object_store_memory=int(1e8))
register_mock_trainable()
def tearDown(self):
ray.shutdown()
def testLargestBracketFirst(self):
sched = HyperBandForBOHB(
metric="episode_reward_mean", mode="max", max_t=3, reduction_factor=3
)
runner = _MockTrialRunner(sched)
for i in range(3):
t = Trial(MOCK_TRAINABLE_NAME)
sched.on_trial_add(runner, t)
runner._launch_trial(t)
self.assertEqual(sched.state()["num_brackets"], 1)
sched.on_trial_add(runner, Trial(MOCK_TRAINABLE_NAME))
self.assertEqual(sched.state()["num_brackets"], 2)
def testCheckTrialInfoUpdate(self):
def result(score, ts):
return {"episode_reward_mean": score, TRAINING_ITERATION: ts}
sched = HyperBandForBOHB(
metric="episode_reward_mean", mode="max", max_t=3, reduction_factor=3
)
runner = _MockTrialRunner(sched)
runner.search_alg = MagicMock()
runner.search_alg.searcher = MagicMock()
trials = [Trial(MOCK_TRAINABLE_NAME) for i in range(3)]
for t in trials:
runner.add_trial(t)
runner._launch_trial(t)
for trial, trial_result in zip(trials, [result(1, 1), result(2, 1)]):
decision = sched.on_trial_result(runner, trial, trial_result)
self.assertEqual(decision, TrialScheduler.PAUSE)
runner.pause_trial(trial)
spy_result = result(0, 1)
decision = sched.on_trial_result(runner, trials[-1], spy_result)
self.assertEqual(decision, TrialScheduler.STOP)
sched.choose_trial_to_run(runner)
self.assertEqual(runner.search_alg.searcher.on_pause.call_count, 2)
self.assertEqual(runner.search_alg.searcher.on_unpause.call_count, 1)
self.assertTrue("hyperband_info" in spy_result)
self.assertEqual(spy_result["hyperband_info"]["budget"], 1)
def testCheckTrialInfoUpdateMin(self):
def result(score, ts):
return {"episode_reward_mean": score, TRAINING_ITERATION: ts}
sched = HyperBandForBOHB(
metric="episode_reward_mean", mode="min", max_t=3, reduction_factor=3
)
runner = _MockTrialRunner(sched)
runner.search_alg = MagicMock()
runner.search_alg.searcher = MagicMock()
trials = [Trial(MOCK_TRAINABLE_NAME) for i in range(3)]
for t in trials:
runner.add_trial(t)
runner._launch_trial(t)
for trial, trial_result in zip(trials, [result(1, 1), result(2, 1)]):
decision = sched.on_trial_result(runner, trial, trial_result)
self.assertEqual(decision, TrialScheduler.PAUSE)
runner.pause_trial(trial)
spy_result = result(0, 1)
decision = sched.on_trial_result(runner, trials[-1], spy_result)
self.assertEqual(decision, TrialScheduler.CONTINUE)
sched.choose_trial_to_run(runner)
self.assertEqual(runner.search_alg.searcher.on_pause.call_count, 2)
self.assertTrue("hyperband_info" in spy_result)
self.assertEqual(spy_result["hyperband_info"]["budget"], 1)
def testPauseResumeChooseTrial(self):
def result(score, ts):
return {"episode_reward_mean": score, TRAINING_ITERATION: ts}
sched = HyperBandForBOHB(
metric="episode_reward_mean", mode="min", max_t=10, reduction_factor=3
)
runner = _MockTrialRunner(sched)
runner.search_alg = MagicMock()
runner.search_alg.searcher = MagicMock()
trials = [Trial(MOCK_TRAINABLE_NAME) for i in range(3)]
for t in trials:
runner.add_trial(t)
runner._launch_trial(t)
all_results = [result(1, 5), result(2, 1), result(3, 5)]
for trial, trial_result in zip(trials, all_results):
decision = sched.on_trial_result(runner, trial, trial_result)
self.assertEqual(decision, TrialScheduler.PAUSE)
runner.pause_trial(trial)
run_trial = sched.choose_trial_to_run(runner)
self.assertEqual(run_trial, trials[1])
self.assertSequenceEqual(
[t.status for t in trials], [Trial.PAUSED, Trial.PAUSED, Trial.PAUSED]
)
@pytest.mark.skipif(
sys.version_info >= (3, 12), reason="BOHB doesn't support py312"
)
def testNonstopBOHB(self):
from ray.tune.search.bohb import TuneBOHB
def train_fn(cfg):
start = 0
if tune.get_checkpoint():
with tune.get_checkpoint().as_directory() as checkpoint_dir:
with open(os.path.join(checkpoint_dir, "checkpoint")) as f:
start = int(f.read())
for i in range(start, 200):
time.sleep(0.1)
with tempfile.TemporaryDirectory() as checkpoint_dir:
with open(os.path.join(checkpoint_dir, "checkpoint"), "w") as f:
f.write(str(i))
tune.report(
dict(episode_reward_mean=i),
checkpoint=Checkpoint.from_directory(checkpoint_dir),
)
config = {"test_variable": tune.uniform(0, 20)}
sched = HyperBandForBOHB(max_t=10, reduction_factor=3, stop_last_trials=False)
alg = ConcurrencyLimiter(TuneBOHB(), 4)
analysis = tune.run(
train_fn,
scheduler=sched,
search_alg=alg,
stop={"training_iteration": 32},
num_samples=20,
config=config,
metric="episode_reward_mean",
mode="min",
verbose=1,
fail_fast="raise",
)
counter = Counter(
t.run_metadata.last_result.get("training_iteration")
for t in analysis.trials
)
assert 32 in counter
assert counter[32] > 1
def testBOHBProcessing(self):
trials = [Trial("foo", stub=True) for i in range(5)]
bohb = HyperBandForBOHB(max_t=10, metric="metric", mode="max")
for trial in trials:
bohb.on_trial_add(None, trial)
trial.status = Trial.RUNNING
mock = MagicMock()
bohb.on_trial_result(mock, trials[0], {"training_iteration": 10, "metric": 40})
trials[0].status = Trial.PAUSED
bohb.on_trial_result(mock, trials[1], {"training_iteration": 10, "metric": 30})
trials[1].status = Trial.PAUSED
bohb.on_trial_result(mock, trials[2], {"training_iteration": 10, "metric": 20})
trials[2].status = Trial.PAUSED
bohb.on_trial_result(mock, trials[3], {"training_iteration": 10, "metric": 10})
trials[3].status = Trial.PAUSED
bohb.on_trial_result(mock, trials[4], {"training_iteration": 10, "metric": 0})
trials[4].status = Trial.PAUSED
def set_status(trial, status):
trial.status = status
return None
def stop_trial(trial):
# See TrialRunner.stop_trial()
if trial.status in [Trial.PENDING, Trial.PAUSED]:
bohb.on_trial_remove(mock, trial)
trial.status = Trial.TERMINATED
return None
mock._set_trial_status.side_effect = set_status
mock.stop_trial.side_effect = stop_trial
assert not bohb._hyperbands[0][0].is_being_processed
bohb.choose_trial_to_run(mock, allow_recurse=False)
assert bohb._hyperbands[0][0].is_being_processed
| BOHBSuite |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/datacatalog.py | {
"start": 18528,
"end": 23657
} | class ____(GoogleCloudBaseOperator):
"""
Creates a tag template.
The newly created tag template are saved under the ``tag_template_id`` key in XCOM.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogCreateTagTemplateOperator`
:param location: Required. The location of the tag template to create.
:param tag_template_id: Required. The id of the tag template to create.
:param tag_template: Required. The tag template to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.TagTemplate`
:param project_id: The ID of the Google Cloud project that owns the tag template.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"tag_template_id",
"tag_template",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogTagTemplateLink(),)
def __init__(
self,
*,
location: str,
tag_template_id: str,
tag_template: dict | TagTemplate,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.tag_template_id = tag_template_id
self.tag_template = tag_template
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
result = hook.create_tag_template(
location=self.location,
tag_template_id=self.tag_template_id,
tag_template=self.tag_template,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info("Tag Template already exists. Skipping create operation.")
result = hook.get_tag_template(
location=self.location,
tag_template=self.tag_template_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
_, _, tag_template = result.name.rpartition("/")
self.log.info("Current Tag ID: %s", tag_template)
context["ti"].xcom_push(key="tag_template_id", value=tag_template)
DataCatalogTagTemplateLink.persist(
context=context,
tag_template_id=self.tag_template_id,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return TagTemplate.to_dict(result)
@deprecated(
planned_removal_date="January 30, 2026",
use_instead="airflow.providers.google.cloud.operators.dataplex.DataplexCatalogUpdateAspectTypeOperator, "
"airflow.providers.google.cloud.operators.dataplex.DataplexCatalogCreateAspectTypeOperator",
reason="The Data Catalog will be discontinued on January 30, 2026 "
"in favor of Dataplex Universal Catalog.",
category=AirflowProviderDeprecationWarning,
)
| CloudDataCatalogCreateTagTemplateOperator |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 7077,
"end": 7467
} | class ____(graphene.ObjectType):
success = graphene.NonNull(graphene.Boolean)
class Meta:
interfaces = (GrapheneDisplayableEvent,)
name = "TypeCheck"
def resolve_metadataEntries(self, _graphene_info: ResolveInfo):
from dagster_graphql.implementation.events import _to_metadata_entries
return _to_metadata_entries(self.metadata)
| GrapheneTypeCheck |
python | google__pytype | pytype/pytd/visitors.py | {
"start": 4570,
"end": 5864
} | class ____(_RemoveTypeParametersFromGenericAny):
"""Replace all types not in a symbol table with AnythingType."""
def __init__(self, lookup_list, do_not_log_prefix=None):
"""Create this visitor.
Args:
lookup_list: An iterable of symbol tables (i.e., objects that have a
"lookup" function)
do_not_log_prefix: If given, don't log error messages for classes with
this prefix.
"""
super().__init__()
self._lookup_list = lookup_list
self._do_not_log_prefix = do_not_log_prefix
def VisitNamedType(self, node):
"""Do replacement on a pytd.NamedType."""
name = node.name
for lookup in self._lookup_list:
try:
cls = lookup.Lookup(name)
if isinstance(cls, pytd.Class):
return node
except KeyError:
pass
if "." in node.name:
return node
else:
if self._do_not_log_prefix is None or not name.startswith(
self._do_not_log_prefix
):
logging.warning("Setting %s to Any", name)
return pytd.AnythingType()
def VisitCallableType(self, node):
return self.VisitGenericType(node)
def VisitTupleType(self, node):
return self.VisitGenericType(node)
def VisitClassType(self, node):
return self.VisitNamedType(node)
| DefaceUnresolved |
python | django__django | tests/forms_tests/field_tests/test_filefield.py | {
"start": 4681,
"end": 5144
} | class ____(FileField):
def __init__(self, *args, **kwargs):
kwargs.setdefault("widget", MultipleFileInput())
super().__init__(*args, **kwargs)
def clean(self, data, initial=None):
single_file_clean = super().clean
if isinstance(data, (list, tuple)):
result = [single_file_clean(d, initial) for d in data]
else:
result = single_file_clean(data, initial)
return result
| MultipleFileField |
python | kamyu104__LeetCode-Solutions | Python/make-two-arrays-equal-by-reversing-sub-arrays.py | {
"start": 50,
"end": 324
} | class ____(object):
def canBeEqual(self, target, arr):
"""
:type target: List[int]
:type arr: List[int]
:rtype: bool
"""
return collections.Counter(target) == collections.Counter(arr)
# Time: O(nlogn)
# Space: O(1)
| Solution |
python | scikit-learn__scikit-learn | sklearn/gaussian_process/kernels.py | {
"start": 5988,
"end": 16518
} | class ____(metaclass=ABCMeta):
"""Base class for all kernels.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.gaussian_process.kernels import Kernel, RBF
>>> import numpy as np
>>> class CustomKernel(Kernel):
... def __init__(self, length_scale=1.0):
... self.length_scale = length_scale
... def __call__(self, X, Y=None):
... if Y is None:
... Y = X
... return np.inner(X, X if Y is None else Y) ** 2
... def diag(self, X):
... return np.ones(X.shape[0])
... def is_stationary(self):
... return True
>>> kernel = CustomKernel(length_scale=2.0)
>>> X = np.array([[1, 2], [3, 4]])
>>> print(kernel(X))
[[ 25 121]
[121 625]]
"""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if parameter.kind != parameter.VAR_KEYWORD and parameter.name != "self":
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError(
"scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention." % (cls,)
)
for arg in args:
params[arg] = getattr(self, arg)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split("__", 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError(
"Invalid parameter %s for kernel %s. "
"Check the list of available parameters "
"with `kernel.get_params().keys()`." % (name, self)
)
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError(
"Invalid parameter %s for kernel %s. "
"Check the list of available parameters "
"with `kernel.get_params().keys()`."
% (key, self.__class__.__name__)
)
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta.
Parameters
----------
theta : ndarray of shape (n_dims,)
The hyperparameters
"""
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = [
getattr(self, attr)
for attr in dir(self)
if attr.startswith("hyperparameter_")
]
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(params[hyperparameter.name])
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
params[hyperparameter.name] = np.exp(
theta[i : i + hyperparameter.n_elements]
)
i += hyperparameter.n_elements
else:
params[hyperparameter.name] = np.exp(theta[i])
i += 1
if i != len(theta):
raise ValueError(
"theta has not the correct number of entries."
" Should be %d; given are %d" % (i, len(theta))
)
self.set_params(**params)
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = [
hyperparameter.bounds
for hyperparameter in self.hyperparameters
if not hyperparameter.fixed
]
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.theta))
)
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples,)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary."""
@property
def requires_vector_input(self):
"""Returns whether the kernel is defined on fixed-length feature
vectors or generic objects. Defaults to True for backward
compatibility."""
return True
def _check_bounds_params(self):
"""Called after fitting to warn if bounds may have been too tight."""
list_close = np.isclose(self.bounds, np.atleast_2d(self.theta).T)
idx = 0
for hyp in self.hyperparameters:
if hyp.fixed:
continue
for dim in range(hyp.n_elements):
if list_close[idx, 0]:
warnings.warn(
"The optimal value found for "
"dimension %s of parameter %s is "
"close to the specified lower "
"bound %s. Decreasing the bound and"
" calling fit again may find a "
"better value." % (dim, hyp.name, hyp.bounds[dim][0]),
ConvergenceWarning,
)
elif list_close[idx, 1]:
warnings.warn(
"The optimal value found for "
"dimension %s of parameter %s is "
"close to the specified upper "
"bound %s. Increasing the bound and"
" calling fit again may find a "
"better value." % (dim, hyp.name, hyp.bounds[dim][1]),
ConvergenceWarning,
)
idx += 1
| Kernel |
python | cython__cython | runtests.py | {
"start": 86508,
"end": 122828
} | class ____(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.current_thread()
blocking_threads = []
for t in threading.enumerate():
if not t.is_alive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.is_alive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
# Set an environment variable to the top directory
os.environ['CYTHON_PROJECT_DIR'] = os.path.abspath(os.path.dirname(__file__))
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from argparse import ArgumentParser
parser = ArgumentParser(usage="usage: %(prog)s [options] [selector ...]")
parser.add_argument(
"--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_argument(
"--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_argument(
"--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_argument(
"--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_argument(
"--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_argument(
"--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_argument(
"--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_argument(
"--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_argument(
"--no-cpp-locals", dest="use_cpp_locals",
action="store_false", default=True,
help="do not rerun select C++ tests with cpp_locals directive")
parser.add_argument(
"--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_argument(
"--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_argument(
"--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_argument(
"--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_argument(
"--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_argument(
"--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_argument(
"--no-tree-asserts", dest="evaluate_tree_assertions",
action="store_false", default=True,
help="Do not evaluation tree path assertions (which prevents C code generation in tests)")
parser.add_argument(
"--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_argument(
"--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_argument(
"--no-fork", dest="fork",
action="store_false", default=True,
help="does nothing, argument kept for compatibility only",
# 'deprecated' added in Python 3.13
**({'deprecated': True} if sys.version_info >= (3, 13) else {}))
parser.add_argument(
"--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_argument(
"-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_argument(
"--listfile", dest="listfile",
action="append",
help="specify a file containing a list of tests to run")
parser.add_argument(
"--excludefile", dest="excludefile",
action="append",
help="specify a file containing a list of tests to run")
parser.add_argument(
"-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_argument(
"--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_argument(
"--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_argument(
"-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_argument(
"--coverage-xml", dest="coverage_formats",
action="append_const", const="xml",
help="report source coverage data for the Compiler in XML format (coverage-report.xml)")
parser.add_argument(
"--coverage-html", dest="coverage_formats",
action="append_const", const="html",
help="report source coverage data for the Compiler in HTML format (coverage-report-html/)")
parser.add_argument(
"--coverage-md", dest="coverage_formats",
action="append_const", const="markdown",
help="report source coverage data for the Compiler in Markdown format (coverage-report.md)")
parser.add_argument(
"--tracemalloc", dest="tracemalloc",
action="store_true", default=False,
help="enable tracemalloc for the tests")
parser.add_argument(
"-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_argument(
"--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_argument(
"-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_argument(
"-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_argument(
"-k", dest="only_pattern",
help="a regex pattern for selecting doctests and test functions in the test modules")
parser.add_argument(
"-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_argument(
"--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_argument(
"--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_argument(
"--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_argument(
"--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help="Directory to look for the file based tests (the ones which are deactivated with '--no-file'.")
parser.add_argument(
"--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_argument(
"--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_argument(
"--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_argument(
"--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_argument(
"--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_argument(
"--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_argument(
"--use_common_utility_dir", default=False, action="store_true")
parser.add_argument(
"--use_formal_grammar", default=False, action="store_true")
parser.add_argument(
"--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_argument(
"--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
parser.add_argument(
"--no-capture", dest="capture", default=True, action="store_false",
help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive")
parser.add_argument(
"--limited-api", dest="limited_api", nargs='?', default='', const="%d.%d" % sys.version_info[:2], action="store",
help=("Use CPython's Limited API. "
"Accepts an optional API version in the form '3.11', otherwise uses current."))
parser.add_argument(
"--abi3audit", dest="abi3audit", default=False, action="store_true",
help="Validate compiled files with ABI3 audit")
parser.add_argument('cmd_args', nargs='*')
options = parser.parse_args(args)
cmd_args = options.cmd_args
if options.with_cython:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_formats:
if not WITH_CYTHON:
options.coverage = False
options.coverage_formats = []
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.listfile:
for listfile in options.listfile:
cmd_args.extend(load_listfile(listfile).excludes.keys())
if options.capture and not options.for_debugging:
keep_alive_interval = 10
else:
keep_alive_interval = None
if options.shard_count > 1 and options.shard_num == -1:
if "PYTHONIOENCODING" not in os.environ:
# Make sure subprocesses can print() Unicode text.
os.environ["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
from concurrent.futures import ProcessPoolExecutor, as_completed
pool = ProcessPoolExecutor(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
open_shards = list(range(options.shard_count))
error_shards = []
failure_outputs = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
merged_pipeline_stats = defaultdict(lambda: (0, 0))
with time_stamper_thread(interval=keep_alive_interval, open_shards=open_shards):
futures = [ pool.submit(runtests_callback, task) for task in tasks ]
for future in as_completed(futures):
shard_num, shard_stats, pipeline_stats, return_code, failure_output = future.result()
open_shards.remove(shard_num)
if return_code != 0:
error_shards.append(shard_num)
failure_outputs.append(failure_output)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
for stage_name, (stage_time, stage_count) in pipeline_stats.items():
old_time, old_count = merged_pipeline_stats[stage_name]
merged_pipeline_stats[stage_name] = (old_time + stage_time, old_count + stage_count)
pool.shutdown()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if error_shards:
sys.stderr.write("Errors found in shards %s\n" % ", ".join([str(e) for e in error_shards]))
for failure_output in zip(error_shards, failure_outputs):
sys.stderr.write("\nErrors from shard %s:\n%s" % failure_output)
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread(interval=keep_alive_interval):
_, stats, merged_pipeline_stats, return_code, _ = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
def as_msecs(t, unit=1000000):
# pipeline times are in msecs
return t // unit + float(t % unit) / unit
pipeline_stats = [
(as_msecs(stage_time), as_msecs(stage_time) / stage_count, stage_count, stage_name)
for stage_name, (stage_time, stage_count) in merged_pipeline_stats.items()
]
total_pipeline_time_percent = math.fsum(stats[0] for stats in pipeline_stats) / 100.0
pipeline_stats.sort(reverse=True)
sys.stderr.write("Most expensive pipeline stages: %s\n" % ", ".join(
"%r: %.2f / %d (%.3f / run, %.1f%%)" % (
stage_name, total_stage_time, stage_count, stage_time, total_stage_time / total_pipeline_time_percent)
for total_stage_time, stage_time, stage_count, stage_name in pipeline_stats[:10]
))
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10, open_shards=None):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
if not interval or interval < 0:
# Do nothing
yield
return
import threading
import datetime
from time import sleep
interval = range(interval * 4)
now = datetime.datetime.now
stop = False
# We capture stderr in some places.
# => make sure we write to the real (original) stderr of the test runner.
stderr = os.dup(2)
def write(s):
os.write(stderr, s if type(s) is bytes else s.encode('ascii'))
def time_stamper():
waiting_for_shards = ""
while True:
if stop:
return
for _ in interval:
sleep(1./4)
if stop:
return
if open_shards is not None:
waiting_for_shards = f" - waiting for {open_shards}"
write(f'\n#### {now()}{waiting_for_shards}\n')
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.daemon = True
thread.start()
try:
yield
finally:
stop = True
thread.join()
os.close(stderr)
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Options import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
DebugFlags.debug_no_exception_intercept = 1 # provide better crash output in CI runs
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if not options.coverage_formats:
return
if 'markdown' in options.coverage_formats:
with open("coverage-report.md", "w") as f:
coverage.report(
file=f, output_format='markdown',
show_missing=True, ignore_errors=True, skip_empty=True)
if 'xml' in options.coverage_formats:
coverage.xml_report(outfile="coverage-report.xml")
if 'html' in options.coverage_formats:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
# Make the shard number visible in faulthandler stack traces in the case of process crashes.
try:
runtests.__code__ = runtests.__code__.replace(co_name=f"runtests_SHARD_{shard_num}")
except (AttributeError, TypeError):
# No .replace() in Py3.7, 'co_name' might not be replacible, whatever.
pass
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
# faulthandler should be able to provide a limited traceback
# in the event of a segmentation fault. Only available on Python 3.3+
try:
import faulthandler
except ImportError:
pass # OK - not essential
else:
faulthandler.enable()
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Options import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY or IS_GRAAL:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy/GraalPy\n")
options.with_refnanny = False
refnanny = None
if options.with_refnanny:
try:
refnanny = import_refnanny()
except ImportError:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
refnanny = import_refnanny()
CDEFS.append(('CYTHON_REFNANNY', '1'))
global sys_version_or_limited_version
sys_version_or_limited_version = sys.version_info
if options.limited_api:
limited_api_version = re.match(r"^(\d+)[.](\d+)$", options.limited_api)
if not limited_api_version:
sys.stderr.write('Limited API version string should be in the form "3.11"\n')
exit(1)
limited_api_major_version = int(limited_api_version.group(1))
limited_api_minor_version = int(limited_api_version.group(2))
CDEFS.append((
"Py_LIMITED_API",
f"0x{limited_api_major_version:02x}{limited_api_minor_version:02x}0000")
)
sys_version_or_limited_version = (
limited_api_major_version,
limited_api_minor_version,
0
)
CFLAGS.append('-Wno-unused-function')
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if options.excludefile:
for excludefile in options.excludefile:
exclude_selectors.append(load_listfile(excludefile))
if not COMPILER_HAS_INT128:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('pypy_crash_bugs.txt', IS_PYPY),
('pypy_implementation_detail_bugs.txt', IS_PYPY),
('graal_bugs.txt', IS_GRAAL),
('limited_api_bugs.txt', options.limited_api),
('windows_bugs.txt', sys.platform == 'win32'),
('windows_arm_bugs.txt', sys.platform == 'win32' and platform.machine().lower() == "arm64"),
('cygwin_bugs.txt', sys.platform == 'cygwin'),
('windows_bugs_39.txt', sys.platform == 'win32' and sys.version_info[:2] == (3, 9)),
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
if sys_version_or_limited_version < (3, 11) and options.limited_api:
# exclude everything with memoryviews in since this is a big
# missing feature from the limited API in these versions
exclude_selectors += [
TagsSelector('tag', 'memoryview'),
FileListExcluder(os.path.join(ROOTDIR, "memoryview_tests.txt")),
]
if not test_bugs and re.match("arm|aarch", platform.machine(), re.IGNORECASE):
# Pythran is only excluded on arm because it fails to link with blas on the CI.
# I don't think there's anything fundamentally wrong with it.
exclude_selectors += [
TagsSelector('tag', 'pythran')
]
exclude_selectors += [TagsSelector('tag', tag) for tag, exclude in TAG_EXCLUDERS if exclude]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'CI' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats,
add_cpp_locals_extra_tests=options.use_cpp_locals)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
examples_workdir = os.path.join(WORKDIR, 'examples')
language_level = 3
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, examples_workdir, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats, add_cython_import=True)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
enable_faulthandler = False
old_faulhandler_envvar = os.environ.get('PYTHONFAULTHANDLER')
try:
import faulthandler
except ImportError:
pass
else:
os.environ['PYTHONFAULTHANDLER'] = "1"
enable_faulthandler = not faulthandler.is_enabled()
if enable_faulthandler:
faulthandler.enable()
if options.tracemalloc:
import tracemalloc
tracemalloc.start()
# Run the collected tests.
try:
if options.shard_num > -1:
thread_id = f" (Thread ID 0x{threading.get_ident():x})" if threading is not None else ""
sys.stderr.write(f"Tests in shard ({options.shard_num}/{options.shard_count}) starting{thread_id}\n")
result = test_runner.run(test_suite)
except Exception as exc:
# Make sure we print exceptions also from shards.
if options.shard_num > -1:
sys.stderr.write(f"Tests in shard ({options.shard_num}/{options.shard_count}) crashed: {exc}\n")
import traceback
traceback.print_exc()
raise
finally:
if enable_faulthandler:
faulthandler.disable()
if os.environ.get('PYTHONFAULTHANDLER') != old_faulhandler_envvar:
if old_faulhandler_envvar is None:
del os.environ['PYTHONFAULTHANDLER']
else:
os.environ['PYTHONFAULTHANDLER'] = old_faulhandler_envvar
if options.tracemalloc:
import tracemalloc
snapshot = tracemalloc.take_snapshot()
run_dir = os.curdir
mallocs = '\n'.join(f" {os.path.relpath(str(tm_stat), run_dir)}" for tm_stat in snapshot.statistics('lineno')[:20])
del snapshot
tracemalloc.stop()
sys.stderr.write(f"Memory allocations:\n{mallocs}\n")
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
from Cython.Compiler.Pipeline import get_timings
pipeline_stats = get_timings()
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny and refnanny is not None:
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
result_code = 0 if options.exit_ok else not result.wasSuccessful()
if xml_output_dir:
failure_output = ""
else:
failure_output = "".join(collect_failure_output(result))
return options.shard_num, stats, pipeline_stats, result_code, failure_output
def collect_failure_output(result):
"""Extract test error/failure output from a TextTestResult."""
failure_output = []
for flavour, errors in (("ERROR", result.errors), ("FAIL", result.failures)):
for test, err in errors:
failure_output.append("%s\n%s: %s\n%s\n%s\n" % (
result.separator1,
flavour, result.getDescription(test),
result.separator2,
err))
return failure_output
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
| PendingThreadsError |
python | pytorch__pytorch | test/distributed/checkpoint/test_tp_checkpoint.py | {
"start": 1125,
"end": 5633
} | class ____(DTensorTestBase):
@with_comms
@skip_if_lt_x_gpu(2)
@with_temp_dir
def test_tp_checkpoint(self):
CHECKPOINT_DIR = self.temp_dir
mesh_shpe = (self.world_size,)
tp_mesh = init_device_mesh(self.device_type, mesh_shpe)
# create model and move it to GPU with id rank
model = MLPModule(self.device_type).to(self.rank)
# Parallelize the module based on the given Parallel Style.
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model = parallelize_module(model, tp_mesh, parallelize_plan)
optimizer = torch.optim.SGD(model.parameters(), lr=0.25)
original_state_dict = deepcopy(model.state_dict())
dcp.save(
state_dict=original_state_dict,
storage_writer=dcp.FileSystemWriter(CHECKPOINT_DIR),
planner=DefaultSavePlanner(),
)
# Update the parameters so model.state_dict() will be different from original_state_dict.
torch.manual_seed(0)
inp = torch.rand(20, 10).to(self.rank)
output = model(inp)
output.sum().backward()
optimizer.step()
state_dict = model.state_dict()
# ensure the current model parameters are different from original_state_dict before loading from checkpoint
for param1, param2 in zip(original_state_dict.values(), state_dict.values()):
self.assertNotEqual(param1.to_local(), param2.to_local())
dcp.load(
state_dict=state_dict,
storage_reader=dcp.FileSystemReader(CHECKPOINT_DIR),
planner=DefaultLoadPlanner(),
)
# now load from checkpoint to check current model parameters are the same as original_state_dict
for param1, param2 in zip(original_state_dict.values(), state_dict.values()):
self.assertEqual(param1.to_local(), param2.to_local())
@with_comms
@skip_if_lt_x_gpu(2)
@with_temp_dir
def test_tp_checkpoint_load_on_meta_device(self):
CHECKPOINT_DIR = self.temp_dir
mesh_shpe = (self.world_size,)
tp_mesh = init_device_mesh(self.device_type, mesh_shpe)
# create model and move it to GPU with id rank
model = UnevenShardedModel(self.device_type).to(self.rank)
# Parallelize the module based on the given Parallel Style.
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
"net3": ColwiseParallel(),
}
model = parallelize_module(model, tp_mesh, parallelize_plan=parallelize_plan)
original_state_dict = {
"model": model.state_dict(),
}
dcp.save(
state_dict=original_state_dict,
storage_writer=dcp.FileSystemWriter(CHECKPOINT_DIR),
)
model2 = parallelize_module(
UnevenShardedModel("meta"), tp_mesh, parallelize_plan=parallelize_plan
)
model2_sd_before_load = model2.state_dict()
state_dict_to_load = {"model": model2_sd_before_load}
dcp.load(
state_dict=state_dict_to_load,
storage_reader=dcp.FileSystemReader(CHECKPOINT_DIR),
)
# We need to make sure state_dict_to_load["model"] is the same as state_dict_after_load["model"],
# since we are doing in-place loading.
self.assertTrue(state_dict_to_load["model"] is model2_sd_before_load)
model2.load_state_dict(state_dict_to_load["model"], assign=True)
state_dict_after_load = {"model": model2.state_dict()}
self.assertEqual(
len(original_state_dict["model"]), len(state_dict_to_load["model"])
)
self.assertEqual(
len(original_state_dict["model"]), len(state_dict_after_load["model"])
)
for name, param in original_state_dict["model"].items():
param_to_load = state_dict_to_load["model"][name]
param_after_load = state_dict_after_load["model"][name]
# we need to explicitly check the device is not meta as the assertEqual check
# currently doesn't handle DTensor with meta device.
self.assertTrue(not param_to_load.is_meta)
self.assertTrue(not param_after_load.is_meta)
self.assertEqual(param.to_local(), param_to_load.to_local())
self.assertEqual(param.to_local(), param_after_load.to_local())
if __name__ == "__main__":
run_tests()
| TestTpCheckpoint |
python | coleifer__peewee | tests/fields.py | {
"start": 4889,
"end": 5024
} | class ____(TestModel):
date = DateField(null=True)
time = TimeField(null=True)
date_time = DateTimeField(null=True)
| DateModel |
python | Textualize__textual | src/textual/css/tokenize.py | {
"start": 9601,
"end": 11258
} | class ____(TCSSTokenizerState):
EXPECT = (
Expect(
"style token",
key_value=r"[@a-zA-Z_-][a-zA-Z0-9_-]*=.*",
key_value_quote=r"[@a-zA-Z_-][a-zA-Z0-9_-]*='.*'",
key_value_double_quote=r"""[@a-zA-Z_-][a-zA-Z0-9_-]*=".*\"""",
percent=PERCENT,
color=COLOR,
token=TOKEN,
variable_ref=VARIABLE_REF,
whitespace=r"\s+",
)
.expect_eof(True)
.expect_semicolon(False)
)
tokenize = TCSSTokenizerState()
tokenize_declarations = DeclarationTokenizerState()
tokenize_value = ValueTokenizerState()
tokenize_style = StyleTokenizerState()
def tokenize_values(values: dict[str, str]) -> dict[str, list[Token]]:
"""Tokenizes the values in a dict of strings.
Args:
values: A mapping of CSS variable name on to a value, to be
added to the CSS context.
Returns:
A mapping of name on to a list of tokens,
"""
value_tokens = {
name: list(tokenize_value(value, ("__name__", "")))
for name, value in values.items()
}
return value_tokens
if __name__ == "__main__":
text = "[@click=app.notify(['foo', 500])] Click me! [/] :-)"
# text = "[@click=hello]Click"
from rich.console import Console
c = Console(markup=False)
from textual._profile import timer
with timer("tokenize"):
list(tokenize_markup(text, read_from=("", "")))
from textual.markup import _parse
with timer("_parse"):
list(_parse(text))
for token in tokenize_markup(text, read_from=("", "")):
c.print(repr(token))
| StyleTokenizerState |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_health.py | {
"start": 3519,
"end": 3803
} | class ____(graphene.ObjectType):
numMissingPartitions = graphene.NonNull(graphene.Int)
totalNumPartitions = graphene.NonNull(graphene.Int)
class Meta:
name = "AssetHealthMaterializationHealthyPartitionedMeta"
| GrapheneAssetHealthMaterializationHealthyPartitionedMeta |
python | prabhupant__python-ds | data_structures/bst/reverse_inorder_traversal.py | {
"start": 0,
"end": 681
} | class ____():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def reverse_inorder(root):
if not root:
return None
stack = []
arr = []
while True:
if root:
stack.append(root)
root = root.right
else:
if not stack:
break
root = stack.pop()
arr.append(root.val)
root = root.left
return arr
root = Node(5)
root.left = Node(3)
root.right = Node(7)
root.left.left = Node(2)
root.left.right = Node(4)
root.right.right = Node(8)
root.right.left = Node(6)
lst = reverse_inorder(root)
print(lst)
| Node |
python | ZoranPandovski__al-go-rithms | data_structures/b_tree/Python/b_tree.py | {
"start": 0,
"end": 674
} | class ____(object):
"""A B-Tree Node.
attributes
=====================
leaf : boolean, determines whether this node is a leaf.
keys : list, a list of keys internal to this node
c : list, a list of children of this node
"""
def __init__(self, leaf=False):
self.leaf = leaf
self.keys = []
self.c = []
def __str__(self):
if self.leaf:
return "Leaf BTreeNode with {0} keys\n\tK:{1}\n\tC:{2}\n".format(len(self.keys), self.keys, self.c)
else:
return "Internal BTreeNode with {0} keys, {1} children\n\tK:{2}\n\n".format(len(self.keys), len(self.c), self.keys, self.c)
| BTreeNode |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 124401,
"end": 125087
} | class ____:
xlTotalsCalculationAverage = 2 # from enum XlTotalsCalculation
xlTotalsCalculationCount = 3 # from enum XlTotalsCalculation
xlTotalsCalculationCountNums = 4 # from enum XlTotalsCalculation
xlTotalsCalculationCustom = 9 # from enum XlTotalsCalculation
xlTotalsCalculationMax = 6 # from enum XlTotalsCalculation
xlTotalsCalculationMin = 5 # from enum XlTotalsCalculation
xlTotalsCalculationNone = 0 # from enum XlTotalsCalculation
xlTotalsCalculationStdDev = 7 # from enum XlTotalsCalculation
xlTotalsCalculationSum = 1 # from enum XlTotalsCalculation
xlTotalsCalculationVar = 8 # from enum XlTotalsCalculation
| TotalsCalculation |
python | mahmoud__boltons | boltons/tableutils.py | {
"start": 6000,
"end": 6314
} | class ____(InputType):
def check_type(self, obj):
return isinstance(obj, tuple)
def guess_headers(self, obj):
return None
def get_entry(self, obj, headers):
return list(obj)
def get_entry_seq(self, obj_seq, headers):
return [list(t) for t in obj_seq]
| TupleInputType |
python | aio-libs__aiohttp | tests/test_benchmarks_http_websocket.py | {
"start": 1896,
"end": 2184
} | class ____(asyncio.Transport):
"""Mock transport for testing that do no real I/O."""
def is_closing(self) -> bool:
"""Swallow is_closing."""
return False
def write(self, data: bytes | bytearray | memoryview) -> None:
"""Swallow writes."""
| MockTransport |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0144_addons_blank_field.py | {
"start": 183,
"end": 827
} | class ____(migrations.Migration):
safe = Safe.always()
dependencies = [
("builds", "0059_add_version_date_index"),
("projects", "0143_addons_flyout_position"),
]
operations = [
migrations.AlterField(
model_name="addonsconfig",
name="options_base_version",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="builds.version",
verbose_name="Base version to compare against (eg. DocDiff, File Tree Diff)",
),
),
]
| Migration |
python | numba__numba | numba/tests/npyufunc/test_parallel_ufunc_issues.py | {
"start": 177,
"end": 2556
} | class ____(unittest.TestCase):
_numba_parallel_test_ = False
def test_thread_response(self):
"""
Related to #89.
This does not test #89 but tests the fix for it.
We want to make sure the worker threads can be used multiple times
and with different time gap between each execution.
"""
@vectorize('float64(float64, float64)', target='parallel')
def fnv(a, b):
return a + b
sleep_time = 1 # 1 second
while sleep_time > 0.00001: # 10us
time.sleep(sleep_time)
a = b = np.arange(10**5)
np.testing.assert_equal(a + b, fnv(a, b))
# Reduce sleep time
sleep_time /= 2
@skip_if_freethreading
def test_gil_reacquire_deadlock(self):
"""
Testing issue #1998 due to GIL reacquiring
"""
# make a ctypes callback that requires the GIL
proto = ctypes.CFUNCTYPE(None, ctypes.c_int32)
characters = 'abcdefghij'
def bar(x):
print(characters[x])
cbar = proto(bar)
# our unit under test
@vectorize(['int32(int32)'], target='parallel', nopython=True)
def foo(x):
print(x % 10) # this reacquires the GIL
cbar(x % 10) # this reacquires the GIL
return x * 2
# Numpy ufunc has a heuristic to determine whether to release the GIL
# during execution. Small input size (10) seems to not release the GIL.
# Large input size (1000) seems to release the GIL.
for nelem in [1, 10, 100, 1000]:
# inputs
a = np.arange(nelem, dtype=np.int32)
acopy = a.copy()
# run and capture stdout
with captured_stdout() as buf:
got = foo(a)
stdout = buf.getvalue()
buf.close()
# process outputs from print
got_output = sorted(map(lambda x: x.strip(), stdout.splitlines()))
# build expected output
expected_output = [str(x % 10) for x in range(nelem)]
expected_output += [characters[x % 10] for x in range(nelem)]
expected_output = sorted(expected_output)
# verify
self.assertEqual(got_output, expected_output)
np.testing.assert_equal(got, 2 * acopy)
| TestParUfuncIssues |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_block_documents.py | {
"start": 11069,
"end": 23500
} | class ____:
@pytest.fixture(autouse=True)
async def block_documents(self, session, block_schemas):
block_documents = []
block_documents.append(
await models.block_documents.create_block_document(
session=session,
block_document=schemas.actions.BlockDocumentCreate(
block_schema_id=block_schemas[0].id,
name="block-1",
block_type_id=block_schemas[0].block_type_id,
),
)
)
block_documents.append(
await models.block_documents.create_block_document(
session=session,
block_document=schemas.actions.BlockDocumentCreate(
block_schema_id=block_schemas[1].id,
name="block-2",
block_type_id=block_schemas[1].block_type_id,
),
)
)
block_documents.append(
await models.block_documents.create_block_document(
session=session,
block_document=schemas.actions.BlockDocumentCreate(
block_schema_id=block_schemas[2].id,
name="block-3",
block_type_id=block_schemas[2].block_type_id,
),
)
)
block_documents.append(
await models.block_documents.create_block_document(
session=session,
block_document=schemas.actions.BlockDocumentCreate(
block_schema_id=block_schemas[1].id,
name="block-4",
block_type_id=block_schemas[1].block_type_id,
),
)
)
block_documents.append(
await models.block_documents.create_block_document(
session=session,
block_document=schemas.actions.BlockDocumentCreate(
block_schema_id=block_schemas[2].id,
name="block-5",
block_type_id=block_schemas[2].block_type_id,
),
)
)
block_documents.append(
await models.block_documents.create_block_document(
session=session,
block_document=schemas.actions.BlockDocumentCreate(
block_schema_id=block_schemas[2].id,
block_type_id=block_schemas[2].block_type_id,
is_anonymous=True,
),
)
)
block_documents.append(
await models.block_documents.create_block_document(
session=session,
block_document=schemas.actions.BlockDocumentCreate(
name="nested-block-1",
block_schema_id=block_schemas[3].id,
block_type_id=block_schemas[3].block_type_id,
data={
"b": {"$ref": {"block_document_id": block_documents[1].id}},
"z": "index",
},
),
)
)
block_documents.append(
await models.block_documents.create_block_document(
session=session,
block_document=schemas.actions.BlockDocumentCreate(
name="nested-block-2",
block_schema_id=block_schemas[4].id,
block_type_id=block_schemas[4].block_type_id,
data={
"c": {"$ref": {"block_document_id": block_documents[2].id}},
"d": {"$ref": {"block_document_id": block_documents[5].id}},
},
),
)
)
await session.commit()
return sorted(block_documents, key=lambda b: b.name)
async def test_read_block_documents(self, client, block_documents):
response = await client.post("/block_documents/filter")
assert response.status_code == status.HTTP_200_OK
read_block_documents = parse_obj_as(
List[schemas.core.BlockDocument], response.json()
)
# sorted by block document name
# anonymous blocks excluded by default
assert [b.id for b in read_block_documents] == [
b.id for b in block_documents if not b.is_anonymous
]
# make sure that API results are as expected
required_attrs = [
"id",
"created",
"updated",
"name",
"data",
"block_schema_id",
"block_schema",
"block_type_id",
"block_type",
"block_document_references",
]
for b in read_block_documents:
for attr in required_attrs:
assert getattr(b, attr) is not None
async def test_read_nonsense_block_document(self, client, block_documents):
"""Regression test for an issue we observed in Cloud where a client made
requests for /block_documents/null"""
response = await client.get("/block_documents/not-even")
assert response.status_code == status.HTTP_404_NOT_FOUND
@pytest.mark.parametrize("is_anonymous", [True, False])
async def test_read_block_documents_with_filter_is_anonymous(
self, client, block_documents, is_anonymous
):
response = await client.post(
"/block_documents/filter",
json=dict(block_documents=dict(is_anonymous=dict(eq_=is_anonymous))),
)
assert response.status_code == status.HTTP_200_OK
read_block_documents = parse_obj_as(
List[schemas.core.BlockDocument], response.json()
)
# sorted by block document name
assert [b.id for b in read_block_documents] == [
b.id for b in block_documents if b.is_anonymous is is_anonymous
]
@pytest.mark.parametrize("is_anonymous_filter", [None, dict(eq_=None)])
async def test_read_block_documents_with_both_anonymous_and_non_anonymous(
self, client, block_documents, is_anonymous_filter
):
"""
anonymous blocks are filtered by default, so have to explicitly disable
the filter to get all blocks. This can be done either by disabling the
is_anonymous filter (recommended) OR by setting eq_=None and we test
both to make sure the default value doesn't override
"""
response = await client.post(
"/block_documents/filter",
json=dict(block_documents=dict(is_anonymous=is_anonymous_filter)),
)
assert response.status_code == status.HTTP_200_OK
read_block_documents = parse_obj_as(
List[schemas.core.BlockDocument], response.json()
)
# sorted by block document name
assert [b.id for b in read_block_documents] == [b.id for b in block_documents]
async def test_read_block_documents_limit_offset(self, client, block_documents):
# sorted by block document name
response = await client.post("/block_documents/filter", json=dict(limit=2))
read_block_documents = parse_obj_as(
List[schemas.core.BlockDocument], response.json()
)
assert [b.id for b in read_block_documents] == [
block_documents[1].id,
block_documents[2].id,
]
response = await client.post(
"/block_documents/filter", json=dict(limit=2, offset=2)
)
read_block_documents = parse_obj_as(
List[schemas.core.BlockDocument], response.json()
)
assert [b.id for b in read_block_documents] == [
block_documents[3].id,
block_documents[4].id,
]
async def test_read_block_documents_filter_capabilities(
self, client, block_documents
):
response = await client.post(
"/block_documents/filter",
json=dict(
block_schemas=dict(block_capabilities=dict(all_=["fly", "swim"]))
),
)
assert response.status_code == 200
fly_and_swim_block_documents = parse_obj_as(
List[schemas.core.BlockDocument], response.json()
)
assert len(fly_and_swim_block_documents) == 1
assert fly_and_swim_block_documents[0].id == block_documents[6].id
response = await client.post(
"/block_documents/filter",
json=dict(block_schemas=dict(block_capabilities=dict(all_=["fly"]))),
)
assert response.status_code == 200
fly_block_documents = parse_obj_as(
List[schemas.core.BlockDocument], response.json()
)
assert len(fly_block_documents) == 3
assert [b.id for b in fly_block_documents] == [
block_documents[2].id,
block_documents[4].id,
block_documents[6].id,
]
response = await client.post(
"/block_documents/filter",
json=dict(block_schemas=dict(block_capabilities=dict(all_=["swim"]))),
)
assert response.status_code == 200
swim_block_documents = parse_obj_as(
List[schemas.core.BlockDocument], response.json()
)
assert len(swim_block_documents) == 1
assert swim_block_documents[0].id == block_documents[6].id
async def test_read_block_documents_filter_types(self, client, block_documents):
response = await client.post(
"/block_documents/filter",
json=dict(block_types=dict(slug=dict(any_=["a", "b"]))),
)
assert response.status_code == 200
docs = parse_obj_as(List[schemas.core.BlockDocument], response.json())
assert len(docs) == 3
assert len([d for d in docs if d.block_type.slug == "a"]) == 1
assert len([d for d in docs if d.block_type.slug == "b"]) == 2
assert [b.id for b in docs] == [
block_documents[1].id,
block_documents[2].id,
block_documents[4].id,
]
async def test_read_block_documents_filter_name_like(self, client, block_documents):
response = await client.post(
"/block_documents/filter",
json=dict(block_documents=dict(name=dict(like_="nested"))),
)
assert response.status_code == 200
docs = parse_obj_as(List[schemas.core.BlockDocument], response.json())
assert [b.id for b in docs] == [
block_documents[6].id,
block_documents[7].id,
]
async def test_read_block_documents_filter_multiple(self, client, block_documents):
response = await client.post(
"/block_documents/filter",
json=dict(
block_types=dict(slug=dict(any_=["a", "b"])),
block_schemas=dict(block_capabilities=dict(all_=["fly"])),
),
)
assert response.status_code == 200
docs = parse_obj_as(List[schemas.core.BlockDocument], response.json())
assert [b.id for b in docs] == [block_documents[2].id, block_documents[4].id]
async def test_read_block_documents_sorts_by_block_type_name_name(
self, client, block_documents
):
block_documents_sorted_by_block_type_name_name = sorted(
block_documents,
key=lambda block_document: (
block_document.block_type.name,
block_document.name,
),
)
response = await client.post(
"/block_documents/filter",
json={
"block_documents": {
"name": {
"any_": [
b.name
for b in block_documents_sorted_by_block_type_name_name
]
}
},
"sort": schemas.sorting.BlockDocumentSort.BLOCK_TYPE_AND_NAME_ASC.value,
},
)
assert response.status_code == status.HTTP_200_OK
read_block_documents = parse_obj_as(
List[schemas.core.BlockDocument], response.json()
)
# sorted by block type name, block document name
# anonymous blocks excluded by default
assert [b.id for b in read_block_documents] == [
b.id
for b in block_documents_sorted_by_block_type_name_name
if not b.is_anonymous
]
| TestReadBlockDocuments |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 24409,
"end": 24593
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("CREATED_AT", "NAME", "UPDATED_AT")
| ProjectOrderField |
python | scikit-learn__scikit-learn | sklearn/model_selection/tests/test_validation.py | {
"start": 5292,
"end": 5699
} | class ____(MockEstimatorWithParameter):
"""Dummy classifier that disallows repeated calls of fit method"""
def fit(self, X_subset, y_subset):
assert not hasattr(self, "fit_called_"), "fit is called the second time"
self.fit_called_ = True
return super().fit(X_subset, y_subset)
def predict(self, X):
raise NotImplementedError
| MockEstimatorWithSingleFitCallAllowed |
python | networkx__networkx | networkx/algorithms/assortativity/tests/base_test.py | {
"start": 1898,
"end": 2651
} | class ____:
@classmethod
def setup_class(cls):
cls.P4 = nx.path_graph(4)
cls.D = nx.DiGraph()
cls.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)])
cls.D2 = nx.DiGraph()
cls.D2.add_edges_from([(0, 3), (1, 0), (1, 2), (2, 4), (4, 1), (4, 3), (4, 2)])
cls.M = nx.MultiGraph()
nx.add_path(cls.M, range(4))
cls.M.add_edge(0, 1)
cls.S = nx.Graph()
cls.S.add_edges_from([(0, 0), (1, 1)])
cls.W = nx.Graph()
cls.W.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5)
cls.W.add_edge(0, 2, weight=1)
S1 = nx.star_graph(4)
S2 = nx.star_graph(4)
cls.DS = nx.disjoint_union(S1, S2)
cls.DS.add_edge(4, 5)
| BaseTestDegreeMixing |
python | PrefectHQ__prefect | src/prefect/exceptions.py | {
"start": 8723,
"end": 8866
} | class ____(BaseException):
"""
Base type for external signal-like exceptions that should never be caught by users.
"""
| ExternalSignal |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mailchimp/unit_tests/integration/config.py | {
"start": 119,
"end": 549
} | class ____:
def __init__(self) -> None:
self._config: Dict[str, Any] = {"credentials": {"auth_type": "apikey", "apikey": "Mailchimp_token-us10"}, "data_center": "us10"}
def with_start_date(self, start_datetime: datetime) -> "ConfigBuilder":
self._config["start_date"] = start_datetime.isoformat()[:-3] + "Z"
return self
def build(self) -> Dict[str, Any]:
return self._config
| ConfigBuilder |
python | viewflow__viewflow | tests/components/test_utils__viewprop.py | {
"start": 65,
"end": 258
} | class ____(object): # noqa : D101
def __init__(self, view=None):
if view is not None:
self.view = view
@viewprop
def view(self):
return 'default'
| Viewset |
python | pandas-dev__pandas | pandas/tests/arrays/sparse/test_libsparse.py | {
"start": 14671,
"end": 17476
} | class ____:
def test_check_integrity(self):
# Too many indices than specified in self.length
msg = "Too many indices"
with pytest.raises(ValueError, match=msg):
IntIndex(length=1, indices=[1, 2, 3])
# No index can be negative.
msg = "No index can be less than zero"
with pytest.raises(ValueError, match=msg):
IntIndex(length=5, indices=[1, -2, 3])
# No index can be negative.
msg = "No index can be less than zero"
with pytest.raises(ValueError, match=msg):
IntIndex(length=5, indices=[1, -2, 3])
# All indices must be less than the length.
msg = "All indices must be less than the length"
with pytest.raises(ValueError, match=msg):
IntIndex(length=5, indices=[1, 2, 5])
with pytest.raises(ValueError, match=msg):
IntIndex(length=5, indices=[1, 2, 6])
# Indices must be strictly ascending.
msg = "Indices must be strictly increasing"
with pytest.raises(ValueError, match=msg):
IntIndex(length=5, indices=[1, 3, 2])
with pytest.raises(ValueError, match=msg):
IntIndex(length=5, indices=[1, 3, 3])
def test_int_internal(self):
idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="integer")
assert isinstance(idx, IntIndex)
assert idx.npoints == 2
tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32))
idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="integer")
assert isinstance(idx, IntIndex)
assert idx.npoints == 0
tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32))
idx = make_sparse_index(
4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer"
)
assert isinstance(idx, IntIndex)
assert idx.npoints == 4
tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32))
def test_equals(self):
index = IntIndex(10, [0, 1, 2, 3, 4])
assert index.equals(index)
assert not index.equals(IntIndex(10, [0, 1, 2, 3]))
def test_to_block_index(self, cases, test_length):
xloc, xlen, yloc, ylen, _, _ = cases
xindex = BlockIndex(test_length, xloc, xlen)
yindex = BlockIndex(test_length, yloc, ylen)
# see if survive the round trip
xbindex = xindex.to_int_index().to_block_index()
ybindex = yindex.to_int_index().to_block_index()
assert isinstance(xbindex, BlockIndex)
assert xbindex.equals(xindex)
assert ybindex.equals(yindex)
def test_to_int_index(self):
index = IntIndex(10, [2, 3, 4, 5, 6])
assert index.to_int_index() is index
| TestIntIndex |
python | pypa__pipenv | pipenv/vendor/plette/models/sections.py | {
"start": 385,
"end": 1075
} | class ____(DataModel):
"""Representation of the `[requires]` section in a Pipfile."""
__SCHEMA__ = {}
__OPTIONAL__ = {
"python_version": str,
"python_full_version": str,
}
@property
def python_version(self):
try:
return self._data["python_version"]
except KeyError:
raise AttributeError("python_version")
@property
def python_full_version(self):
try:
return self._data["python_full_version"]
except KeyError:
raise AttributeError("python_full_version")
META_SECTIONS = {
"hash": Hash,
"requires": Requires,
"sources": SourceCollection,
}
| Requires |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 227947,
"end": 231536
} | class ____(fixtures.TestBase):
__backend__ = True
__only_on__ = "postgresql"
def _combinations():
return testing.combinations(
(
postgresql.INET,
lambda: [
"1.1.1.1",
"192.168.1.1",
"10.1.2.25",
"192.168.22.5",
],
IPv4Address,
),
(
postgresql.INET,
lambda: [
"2001:db8::1000",
],
IPv6Address,
),
(
postgresql.CIDR,
lambda: [
"10.0.0.0/8",
"192.168.1.0/24",
"192.168.0.0/16",
"192.168.1.25/32",
],
IPv4Network,
),
(
postgresql.CIDR,
lambda: [
"::ffff:1.2.3.0/120",
],
IPv6Network,
),
argnames="datatype,values,pytype",
)
@_combinations()
def test_default_native_inet_types(
self, datatype, values, pytype, connection, metadata
):
t = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("data", datatype),
)
metadata.create_all(connection)
connection.execute(
t.insert(),
[
{"id": i, "data": val}
for i, val in enumerate(values(), start=1)
],
)
if testing.against(["+psycopg", "+asyncpg"]) or (
testing.against("+pg8000")
and issubclass(datatype, postgresql.INET)
):
eq_(
connection.scalars(select(t.c.data).order_by(t.c.id)).all(),
[pytype(val) for val in values()],
)
else:
eq_(
connection.scalars(select(t.c.data).order_by(t.c.id)).all(),
values(),
)
@_combinations()
def test_str_based_inet_handlers(
self, datatype, values, pytype, testing_engine, metadata
):
t = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("data", datatype),
)
e = testing_engine(options={"native_inet_types": False})
with e.begin() as connection:
metadata.create_all(connection)
connection.execute(
t.insert(),
[
{"id": i, "data": val}
for i, val in enumerate(values(), start=1)
],
)
with e.connect() as connection:
eq_(
connection.scalars(select(t.c.data).order_by(t.c.id)).all(),
values(),
)
@testing.only_on("+psycopg2")
def test_not_impl_psycopg2(self, testing_engine):
with expect_raises_message(
NotImplementedError,
"The psycopg2 dialect does not implement ipaddress type handling",
):
testing_engine(options={"native_inet_types": True})
@testing.only_on("+pg8000")
def test_not_impl_pg8000(self, testing_engine):
with expect_raises_message(
NotImplementedError,
"The pg8000 dialect does not fully implement "
"ipaddress type handling",
):
testing_engine(options={"native_inet_types": True})
| InetRoundTripTests |
python | PyCQA__pylint | tests/functional/t/typing_use.py | {
"start": 896,
"end": 1150
} | class ____:
@typing.overload
def method(self, param: int) -> None:
...
@overload
def method(self, param: str) -> None:
...
def method(self, param):
return (self, param)
# pylint: enable=too-few-public-methods
| Cls |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/dags.py | {
"start": 1101,
"end": 1350
} | class ____(DAGResponse):
"""DAG with latest dag runs response serializer."""
asset_expression: dict | None
latest_dag_runs: list[DAGRunLightResponse]
pending_actions: list[HITLDetail]
is_favorite: bool
| DAGWithLatestDagRunsResponse |
python | falconry__falcon | falcon/_typing.py | {
"start": 4298,
"end": 4448
} | class ____(Protocol):
async def __call__(
self, req: AsgiRequest, resp: AsgiResponse, **kwargs: Any
) -> None: ...
| AsgiResponderCallable |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/dagster_type.py | {
"start": 11884,
"end": 13194
} | class ____(DagsterType):
def __init__(
self, name: str, type_check_fn: TypeCheckFn, typing_type: t.Type, **kwargs
):
super(BuiltinScalarDagsterType, self).__init__(
key=name,
name=name,
kind=DagsterTypeKind.SCALAR,
type_check_fn=type_check_fn,
is_builtin=True,
typing_type=typing_type,
**kwargs,
)
# This is passed to the constructor of subclasses as the argument `type_check_fn`-- that's why
# it exists together with the `type_check_fn` arg.
def type_check_fn(self, _context, value) -> TypeCheck:
return self.type_check_scalar_value(value)
@abstractmethod
def type_check_scalar_value(self, _value) -> TypeCheck:
raise NotImplementedError()
def _typemismatch_error_str(value: object, expected_type_desc: str) -> str:
return f'Value "{value}" of python type "{type(value).__name__}" must be a {expected_type_desc}.'
def _fail_if_not_of_type(
value: object, value_type: t.Type[t.Any], value_type_desc: str
) -> TypeCheck:
if not isinstance(value, value_type):
return TypeCheck(
success=False, description=_typemismatch_error_str(value, value_type_desc)
)
return TypeCheck(success=True)
| BuiltinScalarDagsterType |
python | django__django | tests/wsgi/tests.py | {
"start": 3146,
"end": 4872
} | class ____(SimpleTestCase):
@override_settings(WSGI_APPLICATION="wsgi.wsgi.application")
def test_success(self):
"""
If ``WSGI_APPLICATION`` is a dotted path, the referenced object is
returned.
"""
app = get_internal_wsgi_application()
from .wsgi import application
self.assertIs(app, application)
@override_settings(WSGI_APPLICATION=None)
def test_default(self):
"""
If ``WSGI_APPLICATION`` is ``None``, the return value of
``get_wsgi_application`` is returned.
"""
# Mock out get_wsgi_application so we know its return value is used
fake_app = object()
def mock_get_wsgi_app():
return fake_app
from django.core.servers import basehttp
_orig_get_wsgi_app = basehttp.get_wsgi_application
basehttp.get_wsgi_application = mock_get_wsgi_app
try:
app = get_internal_wsgi_application()
self.assertIs(app, fake_app)
finally:
basehttp.get_wsgi_application = _orig_get_wsgi_app
@override_settings(WSGI_APPLICATION="wsgi.noexist.app")
def test_bad_module(self):
msg = "WSGI application 'wsgi.noexist.app' could not be loaded; Error importing"
with self.assertRaisesMessage(ImproperlyConfigured, msg):
get_internal_wsgi_application()
@override_settings(WSGI_APPLICATION="wsgi.wsgi.noexist")
def test_bad_name(self):
msg = (
"WSGI application 'wsgi.wsgi.noexist' could not be loaded; Error importing"
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
get_internal_wsgi_application()
| GetInternalWSGIApplicationTest |
python | pytorch__pytorch | test/onnx/test_lazy_import.py | {
"start": 155,
"end": 1120
} | class ____(pytorch_test_common.ExportTestCase):
def _test_package_is_lazily_imported(self, pkg, torch_pkg="torch.onnx"):
with tempfile.TemporaryDirectory() as wd:
r = subprocess.run(
[sys.executable, "-Ximporttime", "-c", "import torch.onnx"],
capture_output=True,
text=True,
cwd=wd,
check=True,
)
# The extra space makes sure we're checking the package, not any package containing its name.
self.assertTrue(
f" {pkg}" not in r.stderr,
f"`{pkg}` should not be imported, full importtime: {r.stderr}",
)
def test_onnxruntime_is_lazily_imported(self):
self._test_package_is_lazily_imported("onnxruntime")
def test_onnxscript_is_lazily_imported(self):
self._test_package_is_lazily_imported("onnxscript")
if __name__ == "__main__":
common_utils.run_tests()
| TestLazyONNXPackages |
python | numpy__numpy | numpy/lib/tests/test_shape_base.py | {
"start": 26712,
"end": 27406
} | class ____:
def test_basic(self):
d = np.ones((50, 60))
d2 = np.ones((30, 60, 6))
assert_(np.may_share_memory(d, d))
assert_(np.may_share_memory(d, d[::-1]))
assert_(np.may_share_memory(d, d[::2]))
assert_(np.may_share_memory(d, d[1:, ::-1]))
assert_(not np.may_share_memory(d[::-1], d2))
assert_(not np.may_share_memory(d[::2], d2))
assert_(not np.may_share_memory(d[1:, ::-1], d2))
assert_(np.may_share_memory(d2[1:, ::-1], d2))
# Utility
def compare_results(res, desired):
"""Compare lists of arrays."""
for x, y in zip(res, desired, strict=False):
assert_array_equal(x, y)
| TestMayShareMemory |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/pythonic_config/io_manager.py | {
"start": 6587,
"end": 7931
} | class ____(
PartialResource[TResValue],
Generic[TResValue],
):
@cached_method
def get_resource_definition(self) -> ConfigurableIOManagerFactoryResourceDefinition: # pyright: ignore[reportIncompatibleMethodOverride]
input_config_schema = None
output_config_schema = None
if safe_is_subclass(self.resource_cls, ConfigurableIOManagerFactory):
factory_cls: type[ConfigurableIOManagerFactory] = cast(
"type[ConfigurableIOManagerFactory]", self.resource_cls
)
input_config_schema = factory_cls.input_config_schema()
output_config_schema = factory_cls.output_config_schema()
return ConfigurableIOManagerFactoryResourceDefinition(
self.resource_cls,
resource_fn=self._state__internal__.resource_fn,
config_schema=self._state__internal__.config_schema,
description=self._state__internal__.description,
nested_resources=self._state__internal__.nested_resources,
nested_partial_resources=self._state__internal__.nested_partial_resources,
input_config_schema=input_config_schema,
output_config_schema=output_config_schema,
dagster_maintained=self.resource_cls._is_dagster_maintained(), # noqa: SLF001
)
@public
| PartialIOManager |
python | spack__spack | lib/spack/spack/caches.py | {
"start": 1484,
"end": 2222
} | class ____:
def __init__(self, root, skip_unstable_versions):
self.root = os.path.abspath(root)
self.skip_unstable_versions = skip_unstable_versions
def store(self, fetcher, relative_dest):
"""Fetch and relocate the fetcher's target into our mirror cache."""
# Note this will archive package sources even if they would not
# normally be cached (e.g. the current tip of an hg/git branch)
dst = os.path.join(self.root, relative_dest)
mkdirp(os.path.dirname(dst))
fetcher.archive(dst)
#: Spack's local cache for downloaded source archives
FETCH_CACHE: "spack.fetch_strategy.FsCache"
FETCH_CACHE = spack.llnl.util.lang.Singleton(_fetch_cache) # type: ignore
| MirrorCache |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 15359,
"end": 16425
} | class ____(ASTExpression):
def __init__(self, prefix: ASTExpression, postFixes: list[ASTPostfixOp]) -> None:
self.prefix = prefix
self.postFixes = postFixes
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTPostfixExpr):
return NotImplemented
return self.prefix == other.prefix and self.postFixes == other.postFixes
def __hash__(self) -> int:
return hash((self.prefix, self.postFixes))
def _stringify(self, transform: StringifyTransform) -> str:
return ''.join([
transform(self.prefix),
*(transform(p) for p in self.postFixes),
])
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
self.prefix.describe_signature(signode, mode, env, symbol)
for p in self.postFixes:
p.describe_signature(signode, mode, env, symbol)
# Unary expressions
################################################################################
| ASTPostfixExpr |
python | kamyu104__LeetCode-Solutions | Python/maximum-twin-sum-of-a-linked-list.py | {
"start": 182,
"end": 864
} | class ____(object):
def pairSum(self, head):
"""
:type head: Optional[ListNode]
:rtype: int
"""
def reverseList(head):
dummy = ListNode()
while head:
dummy.next, head.next, head = head, dummy.next, head.next
return dummy.next
dummy = ListNode(next=head)
slow = fast = dummy
while fast.next and fast.next.next:
slow, fast = slow.next, fast.next.next
result = 0
head2 = reverseList(slow)
while head:
result = max(result, head.val+head2.val)
head, head2 = head.next, head2.next
return result
| Solution |
python | pypa__hatch | backend/src/hatchling/builders/hooks/plugin/interface.py | {
"start": 273,
"end": 4250
} | class ____(Generic[BuilderConfigBound]): # no cov
"""
Example usage:
```python tab="plugin.py"
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class SpecialBuildHook(BuildHookInterface):
PLUGIN_NAME = "special"
...
```
```python tab="hooks.py"
from hatchling.plugin import hookimpl
from .plugin import SpecialBuildHook
@hookimpl
def hatch_register_build_hook():
return SpecialBuildHook
```
"""
PLUGIN_NAME = ""
"""The name used for selection."""
def __init__(
self,
root: str,
config: dict[str, Any],
build_config: BuilderConfigBound,
metadata: ProjectMetadata,
directory: str,
target_name: str,
app: Application | None = None,
) -> None:
self.__root = root
self.__config = config
self.__build_config = build_config
self.__metadata = metadata
self.__directory = directory
self.__target_name = target_name
self.__app = app
@property
def app(self) -> Application:
"""
An instance of [Application](../utilities.md#hatchling.bridge.app.Application).
"""
if self.__app is None:
from hatchling.bridge.app import Application
self.__app = cast(Application, Application().get_safe_application())
return self.__app
@property
def root(self) -> str:
"""
The root of the project tree.
"""
return self.__root
@property
def config(self) -> dict[str, Any]:
"""
The cumulative hook configuration.
```toml config-example
[tool.hatch.build.hooks.<PLUGIN_NAME>]
[tool.hatch.build.targets.<TARGET_NAME>.hooks.<PLUGIN_NAME>]
```
"""
return self.__config
@property
def metadata(self) -> ProjectMetadata:
# Undocumented for now
return self.__metadata
@property
def build_config(self) -> BuilderConfigBound:
"""
An instance of [BuilderConfig](../utilities.md#hatchling.builders.config.BuilderConfig).
"""
return self.__build_config
@property
def directory(self) -> str:
"""
The build directory.
"""
return self.__directory
@property
def target_name(self) -> str:
"""
The plugin name of the build target.
"""
return self.__target_name
def dependencies(self) -> list[str]: # noqa: PLR6301
"""
A list of extra [dependencies](../../config/dependency.md) that must be installed
prior to builds.
!!! warning
- For this to have any effect the hook dependency itself cannot be dynamic and
must always be defined in `build-system.requires`.
- As the hook must be imported to call this method, imports that require these
dependencies must be evaluated lazily.
"""
return []
def clean(self, versions: list[str]) -> None:
"""
This occurs before the build process if the `-c`/`--clean` flag was passed to
the [`build`](../../cli/reference.md#hatch-build) command, or when invoking
the [`clean`](../../cli/reference.md#hatch-clean) command.
"""
def initialize(self, version: str, build_data: dict[str, Any]) -> None:
"""
This occurs immediately before each build.
Any modifications to the build data will be seen by the build target.
"""
def finalize(self, version: str, build_data: dict[str, Any], artifact_path: str) -> None:
"""
This occurs immediately after each build and will not run if the `--hooks-only` flag
was passed to the [`build`](../../cli/reference.md#hatch-build) command.
The build data will reflect any modifications done by the target during the build.
"""
| BuildHookInterface |
python | django-crispy-forms__django-crispy-forms | tests/forms.py | {
"start": 6583,
"end": 6638
} | class ____(forms.RadioSelect):
pass
| CustomRadioSelect |
python | kamyu104__LeetCode-Solutions | Python/minimum-swaps-to-group-all-1s-together-ii.py | {
"start": 29,
"end": 405
} | class ____(object):
def minSwaps(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = cnt = w = nums.count(1)
for i in xrange(len(nums)+(w-1)):
if i >= w:
cnt += nums[(i-w)%len(nums)]
cnt -= nums[i%len(nums)]
result = min(result, cnt)
return result
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/cloud_run.py | {
"start": 7758,
"end": 10415
} | class ____(GoogleBaseHook):
"""
Hook for the Google Cloud Run services.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
self._client: ServicesClient | None = None
super().__init__(gcp_conn_id=gcp_conn_id, impersonation_chain=impersonation_chain, **kwargs)
def get_conn(self):
if self._client is None:
self._client = ServicesClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def get_service(self, service_name: str, region: str, project_id: str = PROVIDE_PROJECT_ID):
get_service_request = GetServiceRequest(
name=f"projects/{project_id}/locations/{region}/services/{service_name}"
)
return self.get_conn().get_service(get_service_request)
@GoogleBaseHook.fallback_to_default_project_id
def create_service(
self, service_name: str, service: Service | dict, region: str, project_id: str = PROVIDE_PROJECT_ID
) -> Service:
if isinstance(service, dict):
service = Service(service)
create_request = CreateServiceRequest(
parent=f"projects/{project_id}/locations/{region}",
service=service,
service_id=service_name,
)
operation = self.get_conn().create_service(create_request)
return operation.result()
@GoogleBaseHook.fallback_to_default_project_id
def delete_service(self, service_name: str, region: str, project_id: str = PROVIDE_PROJECT_ID) -> Service:
delete_request = DeleteServiceRequest(
name=f"projects/{project_id}/locations/{region}/services/{service_name}"
)
operation = self.get_conn().delete_service(delete_request)
return operation.result()
| CloudRunServiceHook |
python | doocs__leetcode | solution/2400-2499/2442.Count Number of Distinct Integers After Reverse Operations/Solution.py | {
"start": 0,
"end": 199
} | class ____:
def countDistinctIntegers(self, nums: List[int]) -> int:
s = set(nums)
for x in nums:
y = int(str(x)[::-1])
s.add(y)
return len(s)
| Solution |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 204549,
"end": 208603
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[4, 3]", L_y_: "f32[3, 4]"):
l_x_ = L_x_
l_y_ = L_y_
tensor: "i64[1]" = torch.tensor((12,))
cumsum: "i64[1]" = tensor.cumsum(dim = 0); tensor = None
getitem: "i64[0]" = cumsum[slice(None, -1, None)]; cumsum = None
neg: "i64[0]" = getitem.neg(); getitem = None
unbind = neg.unbind(); neg = unbind = None
chunk: "f32[12, 12]" = l_y_.new_zeros(12, 12)
diagonal: "f32[12]" = chunk.diagonal(0)
fill_: "f32[12]" = diagonal.fill_(1); diagonal = fill_ = None
child: "f32[12, 3, 4]" = chunk.view(12, 3, 4); chunk = None
lazy_load_decompositions = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions = None
_vmap_increment_nesting = torch._functorch.predispatch._vmap_increment_nesting(12, 'error'); _vmap_increment_nesting = None
child_1: "f32[3, 4]" = torch._functorch.predispatch._add_batch_dim(child, 0, 1); child = None
_jvp_increment_nesting = torch._C._functorch._jvp_increment_nesting(); _jvp_increment_nesting = None
_set_fwd_grad_enabled = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled = None
_enter_dual_level = torch._C._enter_dual_level(); _enter_dual_level = None
_maybe_load_decompositions = torch.autograd.forward_ad._maybe_load_decompositions(); _maybe_load_decompositions = None
_make_dual: "f32[3, 4]" = torch._make_dual(l_y_, child_1, level = 0); child_1 = None
aux: "f32[4, 3]" = torch._C._functorch._wrap_for_grad(l_x_, 2); l_x_ = None
_wrap_for_grad_1: "f32[3, 4]" = torch._C._functorch._wrap_for_grad(l_y_, 2); l_y_ = _wrap_for_grad_1 = None
result_duals: "f32[3, 4]" = _make_dual.sin(); _make_dual = None
aux_1: "f32[4, 3]" = torch._C._functorch._unwrap_for_grad(aux, 2); aux = None
_unpack_dual = torch._unpack_dual(result_duals, level = 0); result_duals = None
primal: "f32[3, 4]" = _unpack_dual[0]
dual: "f32[3, 4]" = _unpack_dual[1]; _unpack_dual = None
primals_out_unflatten: "f32[3, 4]" = torch._C._functorch._unwrap_for_grad(primal, 2); primal = primals_out_unflatten = None
tangents_out_unflatten: "f32[3, 4]" = torch._C._functorch._unwrap_for_grad(dual, 2); dual = None
_exit_dual_level = torch._C._exit_dual_level(0); _exit_dual_level = None
_set_fwd_grad_enabled_1 = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled_1 = None
_jvp_decrement_nesting = torch._C._functorch._jvp_decrement_nesting(); _jvp_decrement_nesting = None
results: "f32[12, 3, 4]" = torch._functorch.predispatch._remove_batch_dim(tangents_out_unflatten, 1, 12, 0); tangents_out_unflatten = None
aux_2: "f32[12, 4, 3]" = torch._functorch.predispatch._remove_batch_dim(aux_1, 1, 12, 0); aux_1 = None
_vmap_decrement_nesting = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting = None
aux_3: "f32[4, 3]" = aux_2[0]; aux_2 = None
movedim: "f32[3, 4, 12]" = results.movedim(0, -1); results = None
split = movedim.split((12,), dim = -1); movedim = None
jac_out_in: "f32[3, 4, 12]" = split[0]; split = None
unflatten: "f32[3, 4, 3, 4]" = jac_out_in.unflatten(-1, (3, 4)); jac_out_in = None
return (unflatten, aux_3)
""",
)
def test_jacfwd_randomness(self):
counters.clear()
def fn(x, y):
return y.sin(), x
def wrapper_fn(x, y):
return torch.func.jacfwd(fn, randomness="same")(x, y)
x = torch.randn(4, 3)
y = torch.randn(3, 4)
wrapped_gm = self._compile_check(wrapper_fn, (x, y))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | davidhalter__jedi | test/completion/pep0526_variables.py | {
"start": 752,
"end": 1245
} | class ____:
var_instance1: int = ''
var_instance2: float
var_class1: typing.ClassVar[str] = 1
var_class2: typing.ClassVar[bytes]
var_class3 = None
def __init__(self):
#? int()
d.var_instance1
#? float()
d.var_instance2
#? str()
d.var_class1
#? bytes()
d.var_class2
#? []
d.int
#? ['var_class1', 'var_class2', 'var_instance1', 'var_instance2', 'var_class3']
self.var_
| VarClass |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/multiple_models/tutorial001_py310.py | {
"start": 99,
"end": 303
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
| Hero |
python | PrefectHQ__prefect | tests/utilities/test_collections.py | {
"start": 21157,
"end": 22428
} | class ____:
def test_remove_single_key(self):
obj = {"a": "a", "b": "b", "c": "c"}
assert remove_nested_keys(["a"], obj) == {"b": "b", "c": "c"}
def test_remove_multiple_keys(self):
obj = {"a": "a", "b": "b", "c": "c"}
assert remove_nested_keys(["a", "b"], obj) == {"c": "c"}
def test_remove_keys_recursively(self):
obj = {
"title": "Test",
"description": "This is a docstring",
"type": "object",
"properties": {
"a": {"title": "A", "description": "A field", "type": "string"}
},
"required": ["a"],
"block_type_name": "Test",
"block_schema_references": {},
}
assert remove_nested_keys(["description"], obj) == {
"title": "Test",
"type": "object",
"properties": {"a": {"title": "A", "type": "string"}},
"required": ["a"],
"block_type_name": "Test",
"block_schema_references": {},
}
def test_passes_through_non_dict(self):
assert remove_nested_keys(["foo"], 1) == 1
assert remove_nested_keys(["foo"], "foo") == "foo"
assert remove_nested_keys(["foo"], b"foo") == b"foo"
| TestRemoveKeys |
python | huggingface__transformers | src/transformers/models/moonshine/modeling_moonshine.py | {
"start": 22442,
"end": 26928
} | class ____(MoonshinePreTrainedModel):
"""
Transformer encoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoonshineEncoderLayer`]
Args:
config: MoonshineConfig
"""
main_input_name = "input_values"
_can_record_outputs = {
"attentions": MoonshineAttention,
"hidden_states": MoonshineEncoderLayer,
}
def __init__(self, config: MoonshineConfig):
super().__init__(config)
self.config = config
embed_dim = config.hidden_size
self.conv1 = nn.Conv1d(1, embed_dim, kernel_size=127, stride=64, bias=False)
self.conv2 = nn.Conv1d(embed_dim, 2 * embed_dim, kernel_size=7, stride=3)
self.conv3 = nn.Conv1d(2 * embed_dim, embed_dim, kernel_size=3, stride=2)
self.groupnorm = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=1e-5)
self.layers = nn.ModuleList(
[MoonshineEncoderLayer(config, idx) for idx in range(config.encoder_num_hidden_layers)]
)
self.layer_norm = nn.LayerNorm(embed_dim, bias=False)
self.rotary_emb = MoonshineRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.conv1
def set_input_embeddings(self, value: nn.Module):
self.conv1 = value
@check_model_inputs()
def forward(
self,
input_values: torch.FloatTensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
r"""
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `input_values`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
input_values = input_values.unsqueeze(1)
hidden_states = nn.functional.tanh(self.conv1(input_values))
hidden_states = self.groupnorm(hidden_states)
hidden_states = nn.functional.gelu(self.conv2(hidden_states))
hidden_states = nn.functional.gelu(self.conv3(hidden_states))
hidden_states = hidden_states.permute(0, 2, 1)
# attention mask downsampling
if attention_mask is not None:
mask_len = self._get_feat_extract_output_lengths(attention_mask.shape[-1])
downsample_stride = 64 * 3 * 2 # conv strides
attention_mask = attention_mask[..., ::downsample_stride][..., :mask_len]
if self.config._attn_implementation == "flash_attention_2":
attention_mask = attention_mask if (attention_mask == 0.0).any() else None
elif self.config._attn_implementation == "sdpa":
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, hidden_states.dtype)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
position_ids = torch.arange(0, hidden_states.shape[1], device=hidden_states.device).unsqueeze(0)
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for encoder_layer in self.layers:
hidden_states = encoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.layer_norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
)
@auto_docstring
| MoonshineEncoder |
python | doocs__leetcode | solution/2800-2899/2826.Sorting Three Groups/Solution.py | {
"start": 0,
"end": 546
} | class ____:
def minimumOperations(self, nums: List[int]) -> int:
f = [0] * 3
for x in nums:
g = [0] * 3
if x == 1:
g[0] = f[0]
g[1] = min(f[:2]) + 1
g[2] = min(f) + 1
elif x == 2:
g[0] = f[0] + 1
g[1] = min(f[:2])
g[2] = min(f) + 1
else:
g[0] = f[0] + 1
g[1] = min(f[:2]) + 1
g[2] = min(f)
f = g
return min(f)
| Solution |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_container.py | {
"start": 1775,
"end": 3330
} | class ____:
def test_init(self) -> None:
with pytest.raises(TypeError):
bcpc.ColumnData()
assert issubclass(bcpc.ColumnData, bcpc.Dict)
def test_has_ref(self) -> None:
prop = bcpc.ColumnData(String, Seq(Any))
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpc.ColumnData(String, Seq(Any))
assert str(prop) == "ColumnData(String, Seq(Any))"
def test__hinted_value_with_hint_ColumnDataChanged(self) -> None:
from bokeh.document.events import ColumnDataChangedEvent
prop = bcpc.ColumnData(String, Seq(Any))
source = ColumnDataSource(data=dict(foo=[10], bar=[20], baz=[30]))
hint = ColumnDataChangedEvent("doc", source, "data", cols=["foo"])
assert prop._hinted_value(source.data, hint) == dict(foo=[10])
def test__hinted_value_with_hint_ColumnsStreamed(self) -> None:
from bokeh.document.events import ColumnsStreamedEvent
prop = bcpc.ColumnData(String, Seq(Any))
source = ColumnDataSource(data=dict(foo=[10], bar=[20], baz=[30]))
new_data = dict(foo=[11], bar=[21], baz=[31])
hint = ColumnsStreamedEvent("doc", source, "data", new_data, rollover=10)
assert prop._hinted_value(source.data, hint) == new_data
def test__hinted_value_without_hint(self) -> None:
prop = bcpc.ColumnData(String, Seq(Any))
source = ColumnDataSource(data=dict(foo=[10], bar=[20], baz=[30]))
assert prop._hinted_value(source.data, None) == source.data
| Test_ColumnData |
python | getsentry__sentry | tests/sentry/dashboards/endpoints/test_organization_dashboard_details.py | {
"start": 165648,
"end": 168375
} | class ____(OrganizationDashboardDetailsTestCase):
def url(self, dashboard_id):
return reverse(
"sentry-api-0-organization-dashboard-visit",
kwargs={
"organization_id_or_slug": self.organization.slug,
"dashboard_id": dashboard_id,
},
)
def test_visit_dashboard(self) -> None:
assert self.dashboard.last_visited is not None
last_visited = self.dashboard.last_visited
assert self.dashboard.visits == 1
response = self.do_request("post", self.url(self.dashboard.id))
assert response.status_code == 204
dashboard = Dashboard.objects.get(id=self.dashboard.id)
assert dashboard.visits == 2
assert dashboard.last_visited is not None
assert dashboard.last_visited > last_visited
def test_visit_dashboard_no_access(self) -> None:
last_visited = self.dashboard.last_visited
assert self.dashboard.visits == 1
with self.feature({"organizations:dashboards-edit": False}):
response = self.do_request("post", self.url(self.dashboard.id))
assert response.status_code == 404
dashboard = Dashboard.objects.get(id=self.dashboard.id)
assert dashboard.visits == 1
assert dashboard.last_visited == last_visited
def test_user_visited_dashboard_creates_entry(self) -> None:
member = OrganizationMember.objects.get(
organization=self.organization, user_id=self.user.id
)
assert not DashboardLastVisited.objects.filter(
dashboard=self.dashboard,
member=member,
).exists()
response = self.do_request("post", self.url(self.dashboard.id))
assert response.status_code == 204
visit = DashboardLastVisited.objects.get(
dashboard=self.dashboard,
member=member,
)
assert visit.last_visited.timestamp() == pytest.approx(timezone.now().timestamp())
def test_user_visited_dashboard_updates_entry(self) -> None:
member = OrganizationMember.objects.get(
organization=self.organization, user_id=self.user.id
)
DashboardLastVisited.objects.create(
dashboard=self.dashboard,
member=member,
last_visited=timezone.now() - timedelta(days=10),
)
response = self.do_request("post", self.url(self.dashboard.id))
assert response.status_code == 204
visit = DashboardLastVisited.objects.get(
dashboard=self.dashboard,
member=member,
)
assert visit.last_visited.timestamp() == pytest.approx(timezone.now().timestamp())
| OrganizationDashboardVisitTest |
python | tensorflow__tensorflow | tensorflow/python/client/session.py | {
"start": 17507,
"end": 22214
} | class ____(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability and to
convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = feed_handles or {}
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, tensor.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch.ref()] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x.ref() not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise errors.InaccessibleTensorError(
f'Operation {op.name} has been marked as not fetchable. Typically '
'this happens when it is defined in another function or code block. '
'Use return values, explicit Python locals or TensorFlow collections '
'to access it.')
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i].ref() in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i].ref()].eval()
else:
value = self._feeds.get(self._fetches[i].ref())
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i].ref())
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
| _FetchHandler |
python | pytorch__pytorch | torch/distributed/fsdp/api.py | {
"start": 17695,
"end": 18046
} | class ____(OptimStateDictConfig):
"""
Attributes:
rank0_only (bool): If ``True``, then only rank 0 saves the full state
dict, and nonzero ranks save an empty dict. If ``False``, then all
ranks save the full state dict. (Default: ``False``)
"""
rank0_only: bool = False
@dataclass
| FullOptimStateDictConfig |
python | google__pytype | pytype/errors/error_printer.py | {
"start": 713,
"end": 4227
} | class ____:
"""Print the details of a BadCall."""
def __init__(
self,
pp: pretty_printer_base.PrettyPrinterBase,
bad_call: error_types.BadCall,
):
self.bad_call = bad_call
self._pp = pp
def _iter_sig(self):
"""Iterate through a Signature object. Focus on a bad parameter."""
sig = self.bad_call.sig
for name in sig.posonly_params:
yield "", name
if sig.posonly_params:
yield ("/", "")
for name in sig.param_names[sig.posonly_count :]:
yield "", name
if sig.varargs_name is not None:
yield "*", sig.varargs_name
elif sig.kwonly_params:
yield ("*", "")
for name in sorted(sig.kwonly_params):
yield "", name
if sig.kwargs_name is not None:
yield "**", sig.kwargs_name
def _iter_expected(self):
"""Yield the prefix, name and type information for expected parameters."""
bad_param = self.bad_call.bad_param
sig = self.bad_call.sig
for prefix, name in self._iter_sig():
suffix = " = ..." if sig.has_default(name) else ""
if bad_param and name == bad_param.name:
type_str = self._pp.print_type_of_instance(bad_param.typ)
suffix = ": " + type_str + suffix
yield prefix, name, suffix
def _iter_actual(self, literal):
"""Yield the prefix, name and type information for actual parameters."""
# We want to display the passed_args in the order they're defined in the
# signature, unless there are starargs or starstarargs.
# Map param names to their position in the list, then sort the list of
# passed args so it's in the same order as the params.
sig = self.bad_call.sig
passed_args = self.bad_call.passed_args
bad_param = self.bad_call.bad_param
keys = {param: n for n, (_, param) in enumerate(self._iter_sig())}
def key_f(arg):
arg_name = arg[0]
# starargs are given anonymous names, which won't be found in the sig.
# Instead, use the same name as the varags param itself, if present.
if arg_name not in keys and pytd_utils.ANON_PARAM.match(arg_name):
return keys.get(sig.varargs_name, len(keys) + 1)
return keys.get(arg_name, len(keys) + 1)
for name, arg in sorted(passed_args, key=key_f):
if bad_param and name == bad_param.name:
suffix = ": " + self._pp.print_type(arg, literal=literal)
else:
suffix = ""
yield "", name, suffix
def _print_args(self, arg_iter):
"""Pretty-print a list of arguments. Focus on a bad parameter."""
# (foo, bar, broken : type, ...)
bad_param = self.bad_call.bad_param
printed_params = []
found = False
for prefix, name, suffix in arg_iter:
if bad_param and name == bad_param.name:
printed_params.append(prefix + name + suffix)
found = True
elif found:
printed_params.append("...")
break
elif pytd_utils.ANON_PARAM.match(name):
printed_params.append(prefix + "_")
else:
printed_params.append(prefix + name)
return ", ".join(printed_params)
def print_call_details(self):
bad_param = self.bad_call.bad_param
expected = self._print_args(self._iter_expected())
literal = "Literal[" in expected
actual = self._print_args(self._iter_actual(literal))
if bad_param and bad_param.error_details:
mp = MatcherErrorPrinter(self._pp)
details = mp.print_error_details(bad_param.error_details)
else:
details = []
return BadCall(expected, actual, details)
| BadCallPrinter |
python | coleifer__peewee | tests/sql.py | {
"start": 76569,
"end": 82935
} | class ____(BaseTestCase):
def test_reselect(self):
query = Person.select(Person.name)
self.assertSQL(query, 'SELECT "t1"."name" FROM "person" AS "t1"', [])
query = query.columns(Person.id, Person.name, Person.dob)
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."name", "t1"."dob" '
'FROM "person" AS "t1"'), [])
def test_distinct_on(self):
query = (Note
.select(Person.name, Note.content)
.join(Person, on=(Note.person_id == Person.id))
.order_by(Person.name, Note.content)
.distinct(Person.name))
self.assertSQL(query, (
'SELECT DISTINCT ON ("t1"."name") '
'"t1"."name", "t2"."content" '
'FROM "note" AS "t2" '
'INNER JOIN "person" AS "t1" ON ("t2"."person_id" = "t1"."id") '
'ORDER BY "t1"."name", "t2"."content"'), [])
query = (Person
.select(Person.name)
.distinct(Person.name))
self.assertSQL(query, (
'SELECT DISTINCT ON ("t1"."name") "t1"."name" '
'FROM "person" AS "t1"'), [])
def test_distinct(self):
query = Person.select(Person.name).distinct()
self.assertSQL(query,
'SELECT DISTINCT "t1"."name" FROM "person" AS "t1"', [])
def test_distinct_count(self):
query = Person.select(fn.COUNT(Person.name.distinct()))
self.assertSQL(query, (
'SELECT COUNT(DISTINCT "t1"."name") FROM "person" AS "t1"'), [])
def test_filtered_count(self):
filtered_count = (fn.COUNT(Person.name)
.filter(Person.dob < datetime.date(2000, 1, 1)))
query = Person.select(fn.COUNT(Person.name), filtered_count)
self.assertSQL(query, (
'SELECT COUNT("t1"."name"), COUNT("t1"."name") '
'FILTER (WHERE ("t1"."dob" < ?)) '
'FROM "person" AS "t1"'), [datetime.date(2000, 1, 1)])
def test_ordered_aggregate(self):
agg = fn.array_agg(Person.name).order_by(Person.id.desc())
self.assertSQL(Person.select(agg.alias('names')), (
'SELECT array_agg("t1"."name" ORDER BY "t1"."id" DESC) AS "names" '
'FROM "person" AS "t1"'), [])
agg = fn.string_agg(Person.name, ',').order_by(Person.dob, Person.id)
self.assertSQL(Person.select(agg), (
'SELECT string_agg("t1"."name", ? ORDER BY "t1"."dob", "t1"."id")'
' FROM "person" AS "t1"'), [','])
agg = (fn.string_agg(Person.name.concat('-x'), ',')
.order_by(Person.name.desc(), Person.dob.asc()))
self.assertSQL(Person.select(agg), (
'SELECT string_agg(("t1"."name" || ?), ? ORDER BY "t1"."name" DESC'
', "t1"."dob" ASC) '
'FROM "person" AS "t1"'), ['-x', ','])
agg = agg.order_by()
self.assertSQL(Person.select(agg), (
'SELECT string_agg(("t1"."name" || ?), ?) '
'FROM "person" AS "t1"'), ['-x', ','])
def test_for_update(self):
query = (Person
.select()
.where(Person.name == 'charlie')
.for_update())
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."name", "t1"."dob" '
'FROM "person" AS "t1" '
'WHERE ("t1"."name" = ?) '
'FOR UPDATE'), ['charlie'], for_update=True)
query = query.for_update('FOR SHARE NOWAIT')
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."name", "t1"."dob" '
'FROM "person" AS "t1" '
'WHERE ("t1"."name" = ?) '
'FOR SHARE NOWAIT'), ['charlie'], for_update=True)
def test_for_update_nested(self):
PA = Person.alias('pa')
subq = PA.select(PA.id).where(PA.name == 'charlie').for_update()
query = (Person
.delete()
.where(Person.id.in_(subq)))
self.assertSQL(query, (
'DELETE FROM "person" WHERE ("person"."id" IN ('
'SELECT "pa"."id" FROM "person" AS "pa" '
'WHERE ("pa"."name" = ?) FOR UPDATE))'),
['charlie'],
for_update=True)
def test_for_update_options(self):
query = (Person
.select(Person.id)
.where(Person.name == 'huey')
.for_update(of=Person, nowait=True))
self.assertSQL(query, (
'SELECT "t1"."id" FROM "person" AS "t1" WHERE ("t1"."name" = ?) '
'FOR UPDATE OF "t1" NOWAIT'), ['huey'], for_update=True)
# Check default behavior.
query = query.for_update()
self.assertSQL(query, (
'SELECT "t1"."id" FROM "person" AS "t1" WHERE ("t1"."name" = ?) '
'FOR UPDATE'), ['huey'], for_update=True)
# Clear flag.
query = query.for_update(None)
self.assertSQL(query, (
'SELECT "t1"."id" FROM "person" AS "t1" WHERE ("t1"."name" = ?)'),
['huey'])
# Old-style is still supported.
query = query.for_update('FOR UPDATE NOWAIT')
self.assertSQL(query, (
'SELECT "t1"."id" FROM "person" AS "t1" WHERE ("t1"."name" = ?) '
'FOR UPDATE NOWAIT'), ['huey'], for_update=True)
# Mix of old and new is OK.
query = query.for_update('FOR SHARE NOWAIT', of=Person)
self.assertSQL(query, (
'SELECT "t1"."id" FROM "person" AS "t1" WHERE ("t1"."name" = ?) '
'FOR SHARE OF "t1" NOWAIT'), ['huey'], for_update=True)
def test_parentheses(self):
query = (Person
.select(fn.MAX(
fn.IFNULL(1, 10) * 151,
fn.IFNULL(None, 10))))
self.assertSQL(query, (
'SELECT MAX((IFNULL(?, ?) * ?), IFNULL(?, ?)) '
'FROM "person" AS "t1"'), [1, 10, 151, None, 10])
query = (Person
.select(Person.name)
.where(fn.EXISTS(
User.select(User.c.id).where(
User.c.username == Person.name))))
self.assertSQL(query, (
'SELECT "t1"."name" FROM "person" AS "t1" '
'WHERE EXISTS('
'SELECT "t2"."id" FROM "users" AS "t2" '
'WHERE ("t2"."username" = "t1"."name"))'), [])
| TestSelectFeatures |
python | encode__django-rest-framework | rest_framework/exceptions.py | {
"start": 5700,
"end": 6087
} | class ____(APIException):
status_code = status.HTTP_405_METHOD_NOT_ALLOWED
default_detail = _('Method "{method}" not allowed.')
default_code = 'method_not_allowed'
def __init__(self, method, detail=None, code=None):
if detail is None:
detail = force_str(self.default_detail).format(method=method)
super().__init__(detail, code)
| MethodNotAllowed |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol42.py | {
"start": 378,
"end": 507
} | class ____:
def __call__(self, input: Sequence[T]) -> T:
return input[0]
v1: ProtoA[Sequence[int], int] = ImplA()
| ImplA |
python | pytorch__pytorch | test/inductor/test_loop_ordering.py | {
"start": 22825,
"end": 35878
} | class ____(MockSchedulerTest):
"""Tests for memory coalescing analysis with specific tensor sizes."""
device = GPU_TYPE
_exit_stack = None
def setUp(self):
super().setUp()
metrics.reset()
def _create_buffer(self, name, sizes):
"""Create a buffer with specified sizes"""
strides = ir.FlexibleLayout.contiguous_strides(sizes)
box = ir.TensorBox.create(
ir.Buffer(
name=name,
layout=ir.FixedLayout(
torch.device(self.device),
dtype=torch.float32,
size=sizes,
stride=strides,
),
)
)
box_loader = box.make_loader()
def inner_fn(index):
return box_loader(index) * 2
buf = ir.Pointwise.create(
device=box.get_device(),
dtype=box.get_dtype(),
inner_fn=inner_fn,
ranges=box.get_size(),
)
buf.realize()
computed_buf = buf.data.data
computed_buf.decide_layout()
return computed_buf
def _create_scheduler_node(self, buf):
s = SchedulerNode(V.graph.scheduler, buf)
s.min_order = 0
s.max_order = 100
return s
@parametrize(
"inps",
(
((128, 384, 196), (768, 64, 196), (128, 6, 64, 196)),
((64,), (16, 4), (16, 4)),
((5, 6), (3, 10), (30,)),
((5, 6, 20), (3, 10, 20), (30, 20)),
),
)
def test_inferred_splits(self, inps):
"""
Test memory coalescing analysis with the specified tensor sizes.
Using direct SchedulerNode creation with sizes (128, 384, 196) and (768, 64, 196).
"""
s1, s2, expected_size = inps
# Create buffers with the specified sizes
buf1 = self._create_buffer("buffer1", s1)
buf2 = self._create_buffer("buffer2", s2)
# Create scheduler nodes
snode1 = self._create_scheduler_node(buf1)
snode2 = self._create_scheduler_node(buf2)
# Create a fused node
fused_node = torch._inductor.scheduler.FusedSchedulerNode.fuse(snode1, snode2)
from torch._inductor import tiling_utils
fused_norm_read_writes = tiling_utils.extract_normalized_read_writes(fused_node)
var_ranges = fused_norm_read_writes.var_ranges
self.assertEqual(list(var_ranges.values()), list(expected_size))
def test_remapped_reads(self):
from torch._inductor import tiling_utils
def fn(nodes):
assert len(nodes) == 1
fused_norm_read_writes = tiling_utils.extract_normalized_read_writes(
nodes[0]
)
self.assertTrue(len(fused_norm_read_writes.var_ranges) == 2)
# both reads remapped correctly
FileCheck().check("4*n0 + n1").run(
repr(fused_norm_read_writes.reads.keys())
)
FileCheck().check("n0 + 4*n1").run(
repr(fused_norm_read_writes.reads.keys())
)
return nodes
with torch._inductor.config.patch(_post_fusion_custom_pass=fn):
@torch.compile()
def foo(x, y):
return x + y
foo(
torch.rand([4, 4], device=GPU_TYPE),
torch.rand([4, 4], device=GPU_TYPE).T,
)
def test_remapped_reads_split(self):
from torch._inductor import tiling_utils
def fn(nodes):
self.assertTrue(len(nodes) == 1)
fused_norm_read_writes = tiling_utils.extract_normalized_read_writes(
nodes[0]
)
inp_node_reads = nodes[0].get_nodes()[1]._body.get_read_exprs()
node_ranges = nodes[0].get_nodes()[1]._body.var_ranges
self.assertTrue(len(node_ranges) == 1)
self.assertTrue(next(iter(node_ranges.values())) == 36)
var = next(iter(node_ranges.keys()))
r = FloorDiv(var, 6) + 6 * ModularIndexing(var, 1, 6)
self.assertTrue(r in inp_node_reads)
# mapped reads
self.assertTrue(list(fused_norm_read_writes.var_ranges.values()) == [6, 6])
n0, n1 = list(fused_norm_read_writes.var_ranges.keys())
# translation of above is n0 + 6 * n1
self.assertTrue((n0 + 6 * n1) in fused_norm_read_writes.reads)
return nodes
with torch._inductor.config.patch(_post_fusion_custom_pass=fn):
@torch.compile()
def foo(x, y):
return (
x + y
).contiguous().flatten() + torch.ops._inductor_test.realize(
(y.T + 1).flatten()
)
foo(
torch.rand([6, 6], device=GPU_TYPE),
torch.rand([6, 6], device=GPU_TYPE).T,
)
def test_reduction_pointwise(self):
# test one pw var, one red var
from torch._inductor import tiling_utils
def fn(nodes):
self.assertTrue(len(nodes) == 1)
fused_rw = tiling_utils.extract_normalized_read_writes(nodes[0])
i_vars, r_vars = fused_rw.index_vars, fused_rw.reduce_vars
self.assertTrue(len(i_vars) == 1)
self.assertTrue(len(r_vars) == 1)
# single write to index var
self.assertTrue(
fused_rw.index_vars[0] == next(iter(fused_rw.writes.keys()))
)
# the write to the fused intermediary node should be removed
self.assertTrue(len(fused_rw.writes) == 1)
# single read
self.assertTrue(len(fused_rw.reads) == 1)
# that is applied to two bufs
self.assertTrue(len(next(iter(fused_rw.reads.values()))) == 2)
# and the read should be in terms of the index + reduce var,
# even though node is pointwise
self.assertTrue(256 * i_vars[0] + r_vars[0] in fused_rw.reads)
return nodes
with torch._inductor.config.patch(_post_fusion_custom_pass=fn), torch.no_grad():
@torch.compile()
def foo(x, y):
out = torch.ops._inductor_test.realize(x + y)
return out.sum(dim=1)
foo(
torch.rand(256, 256, device=GPU_TYPE),
torch.rand(256, 256, device=GPU_TYPE),
)
def test_reduction_no_pointwise(self):
# test one pw var, one red var
from torch._inductor import tiling_utils
def fn(nodes):
self.assertTrue(len(nodes) == 1)
fused_rw = tiling_utils.extract_normalized_read_writes(nodes[0])
i_vars, r_vars = fused_rw.index_vars, fused_rw.reduce_vars
self.assertTrue(len(i_vars) == 0)
self.assertTrue(len(r_vars) == 1)
return nodes
with torch._inductor.config.patch(_post_fusion_custom_pass=fn), torch.no_grad():
@torch.compile()
def foo(x):
return x.sum()
foo(torch.rand(1024, device=GPU_TYPE))
def test_coalescing(self):
from torch._inductor import tiling_utils
# Define symbolic variables
i, j, n, m = sympy.symbols("i j n m", integer=True)
# Test cases: (expression, var_ranges, expected_result)
test_cases = [
# Simple direct case
(i + j * 5, {i: 10, j: 8}, i),
# Floor division case
(i + FloorDiv(j, 2), {i: 4, j: 8}, i),
# Modular indexing
(i * 10 + ModularIndexing(j, 1, 3), {i: 5, j: 10}, j),
# Case with no coalescing variable
(i * 2 + j * 3, {i: 8, j: 5}, None),
# Division case
(i / 2, {i: 10}, None),
# More complex floor division
(j + FloorDiv(i, 3), {i: 6, j: 12}, j),
# Addition inside modular indexing
(ModularIndexing(i + 3, 1, 6), {i: 8, j: 12}, i),
]
for expr, var_ranges, expected in test_cases:
# Test the function
result = tiling_utils.find_coalesced_var(expr, var_ranges)
self.assertEqual(result, expected)
@parametrize("downcast_transposed_v", (False, True))
def test_tiled_coalesce_analysis(self, downcast_transposed_v):
# test one pw var, one red var
from torch._inductor import tiling_utils
def fn(nodes):
self.assertTrue(len(nodes) == 1)
coalesce_analysis = tiling_utils.analyze_memory_coalescing(nodes[0])
i_vars = coalesce_analysis.norm_read_writes.index_vars
# because output is contiguous, second dimension should
# coalesce twice as many bytes as first dimension
# if not downcasted
# if downcasted, should be equal, bc larger dtype size
# we also weight writes x 2
cont_reads = coalesce_analysis.coalesced_by_var[i_vars[1]]
t_reads = coalesce_analysis.coalesced_by_var[i_vars[0]]
if not downcast_transposed_v:
self.assertEqual(cont_reads, t_reads * 3)
else:
self.assertEqual(cont_reads, t_reads * 1.5)
return nodes
with torch._inductor.config.patch(_post_fusion_custom_pass=fn), torch.no_grad():
@torch.compile()
def foo(x, y):
return x + y.to(x.dtype)
y_dtype = torch.float if not downcast_transposed_v else torch.float64
foo(
torch.rand(256, 256, device=GPU_TYPE),
torch.rand(256, 256, device=GPU_TYPE, dtype=y_dtype).T,
)
def test_solve_for_zero(self):
from torch._inductor import tiling_utils
x, y = sympy.symbols("x y", integer=True)
# Test cases: (expression, expected_result)
test_cases = [
# Simple linear expressions
(x + 5, (-5)),
(2 * x - 10, (5)),
# Constant expressions (should return None)
(sympy.Integer(7), None),
(sympy.Integer(0), None),
# FloorDiv cases (should return None per function)
(FloorDiv(x, 2), None),
(FloorDiv(x, 2) + 5, None),
# ModularIndexing cases
(ModularIndexing(x, 1, 5), (5)),
(ModularIndexing(x, 1, 3), (3)),
# Expressions with no constant solution
(x**2 + 1, None), # No real solution
]
for expr, expected in test_cases:
result = tiling_utils.solve_for_zero(expr)
self.assertEqual(result, expected)
def test_solve_for_tiling(self):
from torch._inductor import tiling_utils
x = sympy.Symbol("x", integer=True)
test_cases = [
# Simple linear cases that coalesce
(3 * x, None),
# # # # Expression with no free symbols
# (sympy.Integer(5), None),
(x / 3, 3),
(FloorDiv(x * 2, 6), 3),
# # ModularIndexing expressions
(ModularIndexing(FloorDiv(x, 4), 1, 64), 4),
(x + ModularIndexing(x, 1, 5), None),
(x**2, None), # Non-linear, diff is not constant
(4096 * (ModularIndexing(32 * x, 1, 2048)) + FloorDiv(x, 64), 64),
(4096 * (ModularIndexing(x, 1, 2048)) + FloorDiv(x, 2048), 2048),
]
for expr, expected in test_cases:
result = tiling_utils.solve_for_tiling(expr)
self.assertEqual(result, expected)
def test_induced_fused_tiling(self):
from torch._inductor import tiling_utils
def fn(nodes):
self.assertTrue(len(nodes) == 1)
coalesce_analysis = tiling_utils.analyze_memory_coalescing(nodes[0])
self.assertEqual(coalesce_analysis.suggested_split.tiling_factor, 64)
return nodes
with torch._inductor.config.patch(_post_fusion_custom_pass=fn), torch.no_grad():
def forward(permute):
clone = torch.ops.aten.clone.default(
permute, memory_format=torch.contiguous_format
)
view_2 = torch.ops.aten.view.default(clone, [-1, 32])
amax_1 = torch.ops.aten.amax.default(view_2, [1])
return amax_1
XDIM = 2048
YDIM = 4096
arg0_1 = torch.randn([XDIM, YDIM], device=GPU_TYPE, dtype=torch.bfloat16)
permute = torch.ops.aten.permute.default(arg0_1, [1, 0])
out, code = run_and_get_code(torch.compile(forward), (permute))
self.assertEqual(out, forward(permute))
FileCheck().check("YBLOCK").check("XBLOCK").run(code[0])
layouts = ("cont", "NHWC", "T")
@inductor_config.patch(
{
"triton.unique_kernel_names": True,
"loop_ordering_after_fusion": True,
"triton.coalesce_tiling_analysis": True,
}
)
@instantiate_parametrized_tests
| MemoryCoalescingTest |
python | numpy__numpy | benchmarks/benchmarks/bench_ufunc_strides.py | {
"start": 3894,
"end": 4057
} | class ____(_AbstractBinary):
params = [
[np.maximum, np.minimum, np.fmax, np.fmin, np.ldexp],
[1, 2], [1, 4], [1, 2, 4], ['f', 'd']
]
| BinaryFP |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/flows.py | {
"start": 765,
"end": 862
} | class ____(Exception):
"""Raised when Databricks jobs runs submit skips"""
| DatabricksJobSkipped |
python | rapidsai__cudf | python/cudf/cudf/core/indexing_utils.py | {
"start": 956,
"end": 1046
} | class ____:
"""An indexer for a gather map."""
key: GatherMap
@dataclass
| MapIndexer |
python | eventlet__eventlet | eventlet/hubs/selects.py | {
"start": 327,
"end": 1984
} | class ____(hub.BaseHub):
def _remove_bad_fds(self):
""" Iterate through fds, removing the ones that are bad per the
operating system.
"""
all_fds = list(self.listeners[self.READ]) + list(self.listeners[self.WRITE])
for fd in all_fds:
try:
select.select([fd], [], [], 0)
except OSError as e:
if support.get_errno(e) in BAD_SOCK:
self.remove_descriptor(fd)
def wait(self, seconds=None):
readers = self.listeners[self.READ]
writers = self.listeners[self.WRITE]
if not readers and not writers:
if seconds:
time.sleep(seconds)
return
reader_fds = list(readers)
writer_fds = list(writers)
all_fds = reader_fds + writer_fds
try:
r, w, er = select.select(reader_fds, writer_fds, all_fds, seconds)
except OSError as e:
if support.get_errno(e) == errno.EINTR:
return
elif support.get_errno(e) in BAD_SOCK:
self._remove_bad_fds()
return
else:
raise
for fileno in er:
readers.get(fileno, hub.noop).cb(fileno)
writers.get(fileno, hub.noop).cb(fileno)
for listeners, events in ((readers, r), (writers, w)):
for fileno in events:
try:
listeners.get(fileno, hub.noop).cb(fileno)
except self.SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
| Hub |
python | EpistasisLab__tpot | tpot/tpot_estimator/steady_state_estimator.py | {
"start": 2431,
"end": 55812
} | class ____(BaseEstimator):
def __init__(self,
search_space,
scorers= [],
scorers_weights = [],
classification = False,
cv = 10,
other_objective_functions=[], #tpot.objectives.estimator_objective_functions.number_of_nodes_objective],
other_objective_functions_weights = [],
objective_function_names = None,
bigger_is_better = True,
export_graphpipeline = False,
memory = None,
categorical_features = None,
subsets = None,
preprocessing = False,
validation_strategy = "none",
validation_fraction = .2,
disable_label_encoder = False,
initial_population_size = 50,
population_size = 50,
max_evaluated_individuals = None,
early_stop = None,
early_stop_mins = None,
scorers_early_stop_tol = 0.001,
other_objectives_early_stop_tol = None,
max_time_mins=None,
max_eval_time_mins=10,
n_jobs=1,
memory_limit = None,
client = None,
crossover_probability=.2,
mutate_probability=.7,
mutate_then_crossover_probability=.05,
crossover_then_mutate_probability=.05,
survival_selector = survival_select_NSGA2,
parent_selector = tournament_selection_dominated,
budget_range = None,
budget_scaling = .5,
individuals_until_end_budget = 1,
stepwise_steps = 5,
warm_start = False,
verbose = 0,
periodic_checkpoint_folder = None,
callback = None,
processes = True,
scatter = True,
# random seed for random number generator (rng)
random_state = None,
optuna_optimize_pareto_front = False,
optuna_optimize_pareto_front_trials = 100,
optuna_optimize_pareto_front_timeout = 60*10,
optuna_storage = "sqlite:///optuna.db",
):
'''
An sklearn baseestimator that uses genetic programming to optimize a pipeline.
Parameters
----------
scorers : (list, scorer)
A scorer or list of scorers to be used in the cross-validation process.
see https://scikit-learn.org/stable/modules/model_evaluation.html
scorers_weights : list
A list of weights to be applied to the scorers during the optimization process.
classification : bool
If True, the problem is treated as a classification problem. If False, the problem is treated as a regression problem.
Used to determine the CV strategy.
cv : int, cross-validator
- (int): Number of folds to use in the cross-validation process. By uses the sklearn.model_selection.KFold cross-validator for regression and StratifiedKFold for classification. In both cases, shuffled is set to True.
- (sklearn.model_selection.BaseCrossValidator): A cross-validator to use in the cross-validation process.
other_objective_functions : list, default=[]
A list of other objective functions to apply to the pipeline. The function takes a single parameter for the graphpipeline estimator and returns either a single score or a list of scores.
other_objective_functions_weights : list, default=[]
A list of weights to be applied to the other objective functions.
objective_function_names : list, default=None
A list of names to be applied to the objective functions. If None, will use the names of the objective functions.
bigger_is_better : bool, default=True
If True, the objective function is maximized. If False, the objective function is minimized. Use negative weights to reverse the direction.
max_size : int, default=np.inf
The maximum number of nodes of the pipelines to be generated.
linear_pipeline : bool, default=False
If True, the pipelines generated will be linear. If False, the pipelines generated will be directed acyclic graphs.
root_config_dict : dict, default='auto'
The configuration dictionary to use for the root node of the model.
If 'auto', will use "classifiers" if classification=True, else "regressors".
- 'selectors' : A selection of sklearn Selector methods.
- 'classifiers' : A selection of sklearn Classifier methods.
- 'regressors' : A selection of sklearn Regressor methods.
- 'transformers' : A selection of sklearn Transformer methods.
- 'arithmetic_transformer' : A selection of sklearn Arithmetic Transformer methods that replicate symbolic classification/regression operators.
- 'passthrough' : A node that just passes though the input. Useful for passing through raw inputs into inner nodes.
- 'feature_set_selector' : A selector that pulls out specific subsets of columns from the data. Only well defined as a leaf.
Subsets are set with the subsets parameter.
- 'skrebate' : Includes ReliefF, SURF, SURFstar, MultiSURF.
- 'MDR' : Includes MDR.
- 'ContinuousMDR' : Includes ContinuousMDR.
- 'genetic encoders' : Includes Genetic Encoder methods as used in AutoQTL.
- 'FeatureEncodingFrequencySelector': Includes FeatureEncodingFrequencySelector method as used in AutoQTL.
- list : a list of strings out of the above options to include the corresponding methods in the configuration dictionary.
inner_config_dict : dict, default=["selectors", "transformers"]
The configuration dictionary to use for the inner nodes of the model generation.
Default ["selectors", "transformers"]
- 'selectors' : A selection of sklearn Selector methods.
- 'classifiers' : A selection of sklearn Classifier methods.
- 'regressors' : A selection of sklearn Regressor methods.
- 'transformers' : A selection of sklearn Transformer methods.
- 'arithmetic_transformer' : A selection of sklearn Arithmetic Transformer methods that replicate symbolic classification/regression operators.
- 'passthrough' : A node that just passes though the input. Useful for passing through raw inputs into inner nodes.
- 'feature_set_selector' : A selector that pulls out specific subsets of columns from the data. Only well defined as a leaf.
Subsets are set with the subsets parameter.
- 'skrebate' : Includes ReliefF, SURF, SURFstar, MultiSURF.
- 'MDR' : Includes MDR.
- 'ContinuousMDR' : Includes ContinuousMDR.
- 'genetic encoders' : Includes Genetic Encoder methods as used in AutoQTL.
- 'FeatureEncodingFrequencySelector': Includes FeatureEncodingFrequencySelector method as used in AutoQTL.
- list : a list of strings out of the above options to include the corresponding methods in the configuration dictionary.
- None : If None and max_depth>1, the root_config_dict will be used for the inner nodes as well.
leaf_config_dict : dict, default=None
The configuration dictionary to use for the leaf node of the model. If set, leaf nodes must be from this dictionary.
Otherwise leaf nodes will be generated from the root_config_dict.
Default None
- 'selectors' : A selection of sklearn Selector methods.
- 'classifiers' : A selection of sklearn Classifier methods.
- 'regressors' : A selection of sklearn Regressor methods.
- 'transformers' : A selection of sklearn Transformer methods.
- 'arithmetic_transformer' : A selection of sklearn Arithmetic Transformer methods that replicate symbolic classification/regression operators.
- 'passthrough' : A node that just passes though the input. Useful for passing through raw inputs into inner nodes.
- 'feature_set_selector' : A selector that pulls out specific subsets of columns from the data. Only well defined as a leaf.
Subsets are set with the subsets parameter.
- 'skrebate' : Includes ReliefF, SURF, SURFstar, MultiSURF.
- 'MDR' : Includes MDR.
- 'ContinuousMDR' : Includes ContinuousMDR.
- 'genetic encoders' : Includes Genetic Encoder methods as used in AutoQTL.
- 'FeatureEncodingFrequencySelector': Includes FeatureEncodingFrequencySelector method as used in AutoQTL.
- list : a list of strings out of the above options to include the corresponding methods in the configuration dictionary.
- None : If None, a leaf will not be required (i.e. the pipeline can be a single root node). Leaf nodes will be generated from the inner_config_dict.
categorical_features: list or None
Categorical columns to inpute and/or one hot encode during the preprocessing step. Used only if preprocessing is not False.
- None : If None, TPOT will automatically use object columns in pandas dataframes as objects for one hot encoding in preprocessing.
- List of categorical features. If X is a dataframe, this should be a list of column names. If X is a numpy array, this should be a list of column indices
memory: Memory object or string, default=None
If supplied, pipeline will cache each transformer after calling fit with joblib.Memory. This feature
is used to avoid computing the fit transformers within a pipeline if the parameters
and input data are identical with another fitted pipeline during optimization process.
- String 'auto':
TPOT uses memory caching with a temporary directory and cleans it up upon shutdown.
- String path of a caching directory
TPOT uses memory caching with the provided directory and TPOT does NOT clean
the caching directory up upon shutdown. If the directory does not exist, TPOT will
create it.
- Memory object:
TPOT uses the instance of joblib.Memory for memory caching,
and TPOT does NOT clean the caching directory up upon shutdown.
- None:
TPOT does not use memory caching.
preprocessing : bool or BaseEstimator/Pipeline,
EXPERIMENTAL
A pipeline that will be used to preprocess the data before CV.
- bool : If True, will use a default preprocessing pipeline.
- Pipeline : If an instance of a pipeline is given, will use that pipeline as the preprocessing pipeline.
validation_strategy : str, default='none'
EXPERIMENTAL The validation strategy to use for selecting the final pipeline from the population. TPOT may overfit the cross validation score. A second validation set can be used to select the final pipeline.
- 'auto' : Automatically determine the validation strategy based on the dataset shape.
- 'reshuffled' : Use the same data for cross validation and final validation, but with different splits for the folds. This is the default for small datasets.
- 'split' : Use a separate validation set for final validation. Data will be split according to validation_fraction. This is the default for medium datasets.
- 'none' : Do not use a separate validation set for final validation. Select based on the original cross-validation score. This is the default for large datasets.
validation_fraction : float, default=0.2
EXPERIMENTAL The fraction of the dataset to use for the validation set when validation_strategy is 'split'. Must be between 0 and 1.
disable_label_encoder : bool, default=False
If True, TPOT will check if the target needs to be relabeled to be sequential ints from 0 to N. This is necessary for XGBoost compatibility. If the labels need to be encoded, TPOT will use sklearn.preprocessing.LabelEncoder to encode the labels. The encoder can be accessed via the self.label_encoder_ attribute.
If False, no additional label encoders will be used.
population_size : int, default=50
Size of the population
initial_population_size : int, default=50
Size of the initial population. If None, population_size will be used.
population_scaling : int, default=0.5
Scaling factor to use when determining how fast we move the threshold moves from the start to end percentile.
generations_until_end_population : int, default=1
Number of generations until the population size reaches population_size
generations : int, default=50
Number of generations to run
early_stop : int, default=None
Number of evaluated individuals without improvement before early stopping. Counted across all objectives independently. Triggered when all objectives have not improved by the given number of individuals.
early_stop_mins : float, default=None
Number of seconds without improvement before early stopping. All objectives must not have improved for the given number of seconds for this to be triggered.
scorers_early_stop_tol :
-list of floats
list of tolerances for each scorer. If the difference between the best score and the current score is less than the tolerance, the individual is considered to have converged
If an index of the list is None, that item will not be used for early stopping
-int
If an int is given, it will be used as the tolerance for all objectives
other_objectives_early_stop_tol :
-list of floats
list of tolerances for each of the other objective function. If the difference between the best score and the current score is less than the tolerance, the individual is considered to have converged
If an index of the list is None, that item will not be used for early stopping
-int
If an int is given, it will be used as the tolerance for all objectives
max_time_mins : float, default=float("inf")
Maximum time to run the optimization. If none or inf, will run until the end of the generations.
max_eval_time_mins : float, default=10
Maximum time to evaluate a single individual. If none or inf, there will be no time limit per evaluation.
n_jobs : int, default=1
Number of processes to run in parallel.
memory_limit : str, default=None
Memory limit for each job. See Dask [LocalCluster documentation](https://distributed.dask.org/en/stable/api.html#distributed.Client) for more information.
client : dask.distributed.Client, default=None
A dask client to use for parallelization. If not None, this will override the n_jobs and memory_limit parameters. If None, will create a new client with num_workers=n_jobs and memory_limit=memory_limit.
crossover_probability : float, default=.2
Probability of generating a new individual by crossover between two individuals.
mutate_probability : float, default=.7
Probability of generating a new individual by crossover between one individuals.
mutate_then_crossover_probability : float, default=.05
Probability of generating a new individual by mutating two individuals followed by crossover.
crossover_then_mutate_probability : float, default=.05
Probability of generating a new individual by crossover between two individuals followed by a mutation of the resulting individual.
survival_selector : function, default=survival_select_NSGA2
Function to use to select individuals for survival. Must take a matrix of scores and return selected indexes.
Used to selected population_size individuals at the start of each generation to use for mutation and crossover.
parent_selector : function, default=parent_select_NSGA2
Function to use to select pairs parents for crossover and individuals for mutation. Must take a matrix of scores and return selected indexes.
budget_range : list [start, end], default=None
A starting and ending budget to use for the budget scaling.
budget_scaling float : [0,1], default=0.5
A scaling factor to use when determining how fast we move the budget from the start to end budget.
individuals_until_end_budget : int, default=1
The number of generations to run before reaching the max budget.
stepwise_steps : int, default=1
The number of staircase steps to take when scaling the budget and population size.
threshold_evaluation_pruning : list [start, end], default=None
starting and ending percentile to use as a threshold for the evaluation early stopping.
Values between 0 and 100.
threshold_evaluation_scaling : float [0,inf), default=0.5
A scaling factor to use when determining how fast we move the threshold moves from the start to end percentile.
Must be greater than zero. Higher numbers will move the threshold to the end faster.
min_history_threshold : int, default=0
The minimum number of previous scores needed before using threshold early stopping.
selection_evaluation_pruning : list, default=None
A lower and upper percent of the population size to select each round of CV.
Values between 0 and 1.
selection_evaluation_scaling : float, default=0.5
A scaling factor to use when determining how fast we move the threshold moves from the start to end percentile.
Must be greater than zero. Higher numbers will move the threshold to the end faster.
n_initial_optimizations : int, default=0
Number of individuals to optimize before starting the evolution.
optimization_cv : int
Number of folds to use for the optuna optimization's internal cross-validation.
max_optimize_time_seconds : float, default=60*5
Maximum time to run an optimization
optimization_steps : int, default=10
Number of steps per optimization
warm_start : bool, default=False
If True, will use the continue the evolutionary algorithm from the last generation of the previous run.
verbose : int, default=1
How much information to print during the optimization process. Higher values include the information from lower values.
0. nothing
1. progress bar
3. best individual
4. warnings
>=5. full warnings trace
random_state : int, None, default=None
A seed for reproducability of experiments. This value will be passed to numpy.random.default_rng() to create an instnce of the genrator to pass to other classes
- int
Will be used to create and lock in Generator instance with 'numpy.random.default_rng()'
- None
Will be used to create Generator for 'numpy.random.default_rng()' where a fresh, unpredictable entropy will be pulled from the OS
periodic_checkpoint_folder : str, default=None
Folder to save the population to periodically. If None, no periodic saving will be done.
If provided, training will resume from this checkpoint.
callback : tpot.CallBackInterface, default=None
Callback object. Not implemented
processes : bool, default=True
If True, will use multiprocessing to parallelize the optimization process. If False, will use threading.
True seems to perform better. However, False is required for interactive debugging.
Attributes
----------
fitted_pipeline_ : GraphPipeline
A fitted instance of the GraphPipeline that inherits from sklearn BaseEstimator. This is fitted on the full X, y passed to fit.
evaluated_individuals : A pandas data frame containing data for all evaluated individuals in the run.
Columns:
- *objective functions : The first few columns correspond to the passed in scorers and objective functions
- Parents : A tuple containing the indexes of the pipelines used to generate the pipeline of that row. If NaN, this pipeline was generated randomly in the initial population.
- Variation_Function : Which variation function was used to mutate or crossover the parents. If NaN, this pipeline was generated randomly in the initial population.
- Individual : The internal representation of the individual that is used during the evolutionary algorithm. This is not an sklearn BaseEstimator.
- Generation : The generation the pipeline first appeared.
- Pareto_Front : The nondominated front that this pipeline belongs to. 0 means that its scores is not strictly dominated by any other individual.
To save on computational time, the best frontier is updated iteratively each generation.
The pipelines with the 0th pareto front do represent the exact best frontier. However, the pipelines with pareto front >= 1 are only in reference to the other pipelines in the final population.
All other pipelines are set to NaN.
- Instance : The unfitted GraphPipeline BaseEstimator.
- *validation objective functions : Objective function scores evaluated on the validation set.
- Validation_Pareto_Front : The full pareto front calculated on the validation set. This is calculated for all pipelines with Pareto_Front equal to 0. Unlike the Pareto_Front which only calculates the frontier and the final population, the Validation Pareto Front is calculated for all pipelines tested on the validation set.
pareto_front : The same pandas dataframe as evaluated individuals, but containing only the frontier pareto front pipelines.
'''
# sklearn BaseEstimator must have a corresponding attribute for each parameter.
# These should not be modified once set.
self.search_space = search_space
self.scorers = scorers
self.scorers_weights = scorers_weights
self.classification = classification
self.cv = cv
self.other_objective_functions = other_objective_functions
self.other_objective_functions_weights = other_objective_functions_weights
self.objective_function_names = objective_function_names
self.bigger_is_better = bigger_is_better
self.export_graphpipeline = export_graphpipeline
self.memory = memory
self.categorical_features = categorical_features
self.preprocessing = preprocessing
self.validation_strategy = validation_strategy
self.validation_fraction = validation_fraction
self.disable_label_encoder = disable_label_encoder
self.population_size = population_size
self.initial_population_size = initial_population_size
self.early_stop = early_stop
self.early_stop_mins = early_stop_mins
self.scorers_early_stop_tol = scorers_early_stop_tol
self.other_objectives_early_stop_tol = other_objectives_early_stop_tol
self.max_time_mins = max_time_mins
self.max_eval_time_mins = max_eval_time_mins
self.n_jobs= n_jobs
self.memory_limit = memory_limit
self.client = client
self.crossover_probability = crossover_probability
self.mutate_probability = mutate_probability
self.mutate_then_crossover_probability= mutate_then_crossover_probability
self.crossover_then_mutate_probability= crossover_then_mutate_probability
self.survival_selector=survival_selector
self.parent_selector=parent_selector
self.budget_range = budget_range
self.budget_scaling = budget_scaling
self.individuals_until_end_budget = individuals_until_end_budget
self.stepwise_steps = stepwise_steps
self.warm_start = warm_start
self.verbose = verbose
self.periodic_checkpoint_folder = periodic_checkpoint_folder
self.callback = callback
self.processes = processes
self.scatter = scatter
self.optuna_optimize_pareto_front = optuna_optimize_pareto_front
self.optuna_optimize_pareto_front_trials = optuna_optimize_pareto_front_trials
self.optuna_optimize_pareto_front_timeout = optuna_optimize_pareto_front_timeout
self.optuna_storage = optuna_storage
# create random number generator based on rngseed
self.rng = np.random.default_rng(random_state)
# save random state passed to us for other functions that use random_state
self.random_state = random_state
self.max_evaluated_individuals = max_evaluated_individuals
#Initialize other used params
if self.initial_population_size is None:
self._initial_population_size = self.population_size
else:
self._initial_population_size = self.initial_population_size
if isinstance(self.scorers, str):
self._scorers = [self.scorers]
elif callable(self.scorers):
self._scorers = [self.scorers]
else:
self._scorers = self.scorers
self._scorers = [sklearn.metrics.get_scorer(scoring) for scoring in self._scorers]
self._scorers_early_stop_tol = self.scorers_early_stop_tol
self._evolver = tpot.evolvers.SteadyStateEvolver
self.objective_function_weights = [*scorers_weights, *other_objective_functions_weights]
if self.objective_function_names is None:
obj_names = [f.__name__ for f in other_objective_functions]
else:
obj_names = self.objective_function_names
self.objective_names = [f._score_func.__name__ if hasattr(f,"_score_func") else f.__name__ for f in self._scorers] + obj_names
if not isinstance(self.other_objectives_early_stop_tol, list):
self._other_objectives_early_stop_tol = [self.other_objectives_early_stop_tol for _ in range(len(self.other_objective_functions))]
else:
self._other_objectives_early_stop_tol = self.other_objectives_early_stop_tol
if not isinstance(self._scorers_early_stop_tol, list):
self._scorers_early_stop_tol = [self._scorers_early_stop_tol for _ in range(len(self._scorers))]
else:
self._scorers_early_stop_tol = self._scorers_early_stop_tol
self.early_stop_tol = [*self._scorers_early_stop_tol, *self._other_objectives_early_stop_tol]
self._evolver_instance = None
self.evaluated_individuals = None
self.label_encoder_ = None
set_dask_settings()
def fit(self, X, y):
if self.client is not None: #If user passed in a client manually
_client = self.client
else:
if self.verbose >= 4:
silence_logs = 30
elif self.verbose >=5:
silence_logs = 40
else:
silence_logs = 50
cluster = LocalCluster(n_workers=self.n_jobs, #if no client is passed in and no global client exists, create our own
threads_per_worker=1,
processes=self.processes,
silence_logs=silence_logs,
memory_limit=self.memory_limit)
_client = Client(cluster)
if self.classification and not self.disable_label_encoder and not check_if_y_is_encoded(y):
warnings.warn("Labels are not encoded as ints from 0 to N. For compatibility with some classifiers such as sklearn, TPOT has encoded y with the sklearn LabelEncoder. When using pipelines outside the main TPOT estimator class, you can encode the labels with est.label_encoder_")
self.label_encoder_ = LabelEncoder()
y = self.label_encoder_.fit_transform(y)
self.evaluated_individuals = None
#determine validation strategy
if self.validation_strategy == 'auto':
nrows = X.shape[0]
ncols = X.shape[1]
if nrows/ncols < 20:
validation_strategy = 'reshuffled'
elif nrows/ncols < 100:
validation_strategy = 'split'
else:
validation_strategy = 'none'
else:
validation_strategy = self.validation_strategy
if validation_strategy == 'split':
if self.classification:
X, X_val, y, y_val = train_test_split(X, y, test_size=self.validation_fraction, stratify=y, random_state=self.random_state)
else:
X, X_val, y, y_val = train_test_split(X, y, test_size=self.validation_fraction, random_state=self.random_state)
X_original = X
y_original = y
if isinstance(self.cv, int) or isinstance(self.cv, float):
n_folds = self.cv
else:
n_folds = self.cv.get_n_splits(X, y)
if self.classification:
X, y = remove_underrepresented_classes(X, y, n_folds)
if self.preprocessing:
#X = pd.DataFrame(X)
if not isinstance(self.preprocessing, bool) and isinstance(self.preprocessing, sklearn.base.BaseEstimator):
self._preprocessing_pipeline = self.preprocessing
#TODO: check if there are missing values in X before imputation. If not, don't include imputation in pipeline. Check if there are categorical columns. If not, don't include one hot encoding in pipeline
else: #if self.preprocessing is True or not a sklearn estimator
pipeline_steps = []
if self.categorical_features is not None: #if categorical features are specified, use those
pipeline_steps.append(("impute_categorical", tpot.builtin_modules.ColumnSimpleImputer(self.categorical_features, strategy='most_frequent')))
pipeline_steps.append(("impute_numeric", tpot.builtin_modules.ColumnSimpleImputer("numeric", strategy='mean')))
pipeline_steps.append(("ColumnOneHotEncoder", tpot.builtin_modules.ColumnOneHotEncoder(self.categorical_features, strategy='most_frequent')))
else:
if isinstance(X, pd.DataFrame):
categorical_columns = X.select_dtypes(include=['object']).columns
if len(categorical_columns) > 0:
pipeline_steps.append(("impute_categorical", tpot.builtin_modules.ColumnSimpleImputer("categorical", strategy='most_frequent')))
pipeline_steps.append(("impute_numeric", tpot.builtin_modules.ColumnSimpleImputer("numeric", strategy='mean')))
pipeline_steps.append(("ColumnOneHotEncoder", tpot.builtin_modules.ColumnOneHotEncoder("categorical", strategy='most_frequent')))
else:
pipeline_steps.append(("impute_numeric", tpot.builtin_modules.ColumnSimpleImputer("all", strategy='mean')))
else:
pipeline_steps.append(("impute_numeric", tpot.builtin_modules.ColumnSimpleImputer("all", strategy='mean')))
self._preprocessing_pipeline = sklearn.pipeline.Pipeline(pipeline_steps)
X = self._preprocessing_pipeline.fit_transform(X, y)
else:
self._preprocessing_pipeline = None
#_, y = sklearn.utils.check_X_y(X, y, y_numeric=True)
#Set up the configuation dictionaries and the search spaces
#check if self.cv is a number
if isinstance(self.cv, int) or isinstance(self.cv, float):
if self.classification:
self.cv_gen = sklearn.model_selection.StratifiedKFold(n_splits=self.cv, shuffle=True, random_state=self.random_state)
else:
self.cv_gen = sklearn.model_selection.KFold(n_splits=self.cv, shuffle=True, random_state=self.random_state)
else:
self.cv_gen = sklearn.model_selection.check_cv(self.cv, y, classifier=self.classification)
n_samples= int(math.floor(X.shape[0]/n_folds))
n_features=X.shape[1]
if isinstance(X, pd.DataFrame):
self.feature_names = X.columns
else:
self.feature_names = None
def objective_function(pipeline_individual,
X,
y,
is_classification=self.classification,
scorers= self._scorers,
cv=self.cv_gen,
other_objective_functions=self.other_objective_functions,
export_graphpipeline=self.export_graphpipeline,
memory=self.memory,
**kwargs):
return objective_function_generator(
pipeline_individual,
X,
y,
is_classification=is_classification,
scorers= scorers,
cv=cv,
other_objective_functions=other_objective_functions,
export_graphpipeline=export_graphpipeline,
memory=memory,
**kwargs,
)
if self.classification:
n_classes = len(np.unique(y))
else:
n_classes = None
get_search_space_params = {"n_classes": n_classes,
"n_samples":len(y),
"n_features":X.shape[1],
"random_state":self.random_state}
self._search_space = get_template_search_spaces(self.search_space, classification=self.classification, inner_predictors=True, **get_search_space_params)
def ind_generator(rng):
rng = np.random.default_rng(rng)
while True:
yield self._search_space.generate(rng)
if self.scatter:
X_future = _client.scatter(X)
y_future = _client.scatter(y)
else:
X_future = X
y_future = y
#If warm start and we have an evolver instance, use the existing one
if not(self.warm_start and self._evolver_instance is not None):
self._evolver_instance = self._evolver( individual_generator=ind_generator(self.rng),
objective_functions= [objective_function],
objective_function_weights = self.objective_function_weights,
objective_names=self.objective_names,
bigger_is_better = self.bigger_is_better,
population_size= self.population_size,
initial_population_size = self._initial_population_size,
n_jobs=self.n_jobs,
verbose = self.verbose,
max_time_mins = self.max_time_mins ,
max_eval_time_mins = self.max_eval_time_mins,
periodic_checkpoint_folder = self.periodic_checkpoint_folder,
early_stop_tol = self.early_stop_tol,
early_stop= self.early_stop,
early_stop_mins = self.early_stop_mins,
budget_range = self.budget_range,
budget_scaling = self.budget_scaling,
individuals_until_end_budget = self.individuals_until_end_budget,
stepwise_steps = self.stepwise_steps,
client = _client,
objective_kwargs = {"X": X_future, "y": y_future},
survival_selector=self.survival_selector,
parent_selector=self.parent_selector,
crossover_probability = self.crossover_probability,
mutate_probability = self.mutate_probability,
mutate_then_crossover_probability= self.mutate_then_crossover_probability,
crossover_then_mutate_probability= self.crossover_then_mutate_probability,
max_evaluated_individuals = self.max_evaluated_individuals,
rng=self.rng,
)
self._evolver_instance.optimize()
#self._evolver_instance.population.update_pareto_fronts(self.objective_names, self.objective_function_weights)
self.make_evaluated_individuals()
if self.optuna_optimize_pareto_front:
pareto_front_inds = self.pareto_front['Individual'].values
all_graphs, all_scores = tpot.individual_representations.graph_pipeline_individual.simple_parallel_optuna(pareto_front_inds, objective_function, self.objective_function_weights, _client, storage=self.optuna_storage, steps=self.optuna_optimize_pareto_front_trials, verbose=self.verbose, max_eval_time_mins=self.max_eval_time_mins, max_time_mins=self.optuna_optimize_pareto_front_timeout, **{"X": X, "y": y})
all_scores = tpot.utils.eval_utils.process_scores(all_scores, len(self.objective_function_weights))
if len(all_graphs) > 0:
df = pd.DataFrame(np.column_stack((all_graphs, all_scores,np.repeat("Optuna",len(all_graphs)))), columns=["Individual"] + self.objective_names +["Parents"])
for obj in self.objective_names:
df[obj] = df[obj].apply(convert_to_float)
self.evaluated_individuals = pd.concat([self.evaluated_individuals, df], ignore_index=True)
else:
print("WARNING NO OPTUNA TRIALS COMPLETED")
tpot.utils.get_pareto_frontier(self.evaluated_individuals, column_names=self.objective_names, weights=self.objective_function_weights)
if validation_strategy == 'reshuffled':
best_pareto_front_idx = list(self.pareto_front.index)
best_pareto_front = list(self.pareto_front.loc[best_pareto_front_idx]['Individual'])
#reshuffle rows
X, y = sklearn.utils.shuffle(X, y, random_state=self.random_state)
if self.scatter:
X_future = _client.scatter(X)
y_future = _client.scatter(y)
else:
X_future = X
y_future = y
val_objective_function_list = [lambda ind,
X,
y,
is_classification=self.classification,
scorers= self._scorers,
cv=self.cv_gen,
other_objective_functions=self.other_objective_functions,
export_graphpipeline=self.export_graphpipeline,
memory=self.memory,
**kwargs: objective_function_generator(
ind,
X,
y,
is_classification=is_classification,
scorers= scorers,
cv=cv,
other_objective_functions=other_objective_functions,
export_graphpipeline=export_graphpipeline,
memory=memory,
**kwargs,
)]
objective_kwargs = {"X": X_future, "y": y_future}
val_scores, start_times, end_times, eval_errors = tpot.utils.eval_utils.parallel_eval_objective_list(best_pareto_front, val_objective_function_list, verbose=self.verbose, max_eval_time_mins=self.max_eval_time_mins, n_expected_columns=len(self.objective_names), client=_client, **objective_kwargs)
val_objective_names = ['validation_'+name for name in self.objective_names]
self.objective_names_for_selection = val_objective_names
self.evaluated_individuals.loc[best_pareto_front_idx,val_objective_names] = val_scores
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_start_times'] = start_times
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_end_times'] = end_times
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_eval_errors'] = eval_errors
self.evaluated_individuals["Validation_Pareto_Front"] = tpot.utils.get_pareto_frontier(self.evaluated_individuals, column_names=val_objective_names, weights=self.objective_function_weights)
elif validation_strategy == 'split':
if self.scatter:
X_future = _client.scatter(X)
y_future = _client.scatter(y)
X_val_future = _client.scatter(X_val)
y_val_future = _client.scatter(y_val)
else:
X_future = X
y_future = y
X_val_future = X_val
y_val_future = y_val
objective_kwargs = {"X": X_future, "y": y_future, "X_val" : X_val_future, "y_val":y_val_future }
best_pareto_front_idx = list(self.pareto_front.index)
best_pareto_front = list(self.pareto_front.loc[best_pareto_front_idx]['Individual'])
val_objective_function_list = [lambda ind,
X,
y,
X_val,
y_val,
scorers= self._scorers,
other_objective_functions=self.other_objective_functions,
export_graphpipeline=self.export_graphpipeline,
memory=self.memory,
**kwargs: val_objective_function_generator(
ind,
X,
y,
X_val,
y_val,
scorers= scorers,
other_objective_functions=other_objective_functions,
export_graphpipeline=export_graphpipeline,
memory=memory,
**kwargs,
)]
val_scores, start_times, end_times, eval_errors = tpot.utils.eval_utils.parallel_eval_objective_list(best_pareto_front, val_objective_function_list, verbose=self.verbose, max_eval_time_mins=self.max_eval_time_mins, n_expected_columns=len(self.objective_names), client=_client, **objective_kwargs)
val_objective_names = ['validation_'+name for name in self.objective_names]
self.objective_names_for_selection = val_objective_names
self.evaluated_individuals.loc[best_pareto_front_idx,val_objective_names] = val_scores
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_start_times'] = start_times
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_end_times'] = end_times
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_eval_errors'] = eval_errors
self.evaluated_individuals["Validation_Pareto_Front"] = tpot.utils.get_pareto_frontier(self.evaluated_individuals, column_names=val_objective_names, weights=self.objective_function_weights)
else:
self.objective_names_for_selection = self.objective_names
val_scores = self.evaluated_individuals[self.evaluated_individuals[self.objective_names_for_selection].isin(["TIMEOUT","INVALID"]).any(axis=1).ne(True)][self.objective_names_for_selection].astype(float)
weighted_scores = val_scores*self.objective_function_weights
if self.bigger_is_better:
best_indices = list(weighted_scores.sort_values(by=self.objective_names_for_selection, ascending=False).index)
else:
best_indices = list(weighted_scores.sort_values(by=self.objective_names_for_selection, ascending=True).index)
for best_idx in best_indices:
best_individual = self.evaluated_individuals.loc[best_idx]['Individual']
self.selected_best_score = self.evaluated_individuals.loc[best_idx]
#TODO
#best_individual_pipeline = best_individual.export_pipeline(memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv)
if self.export_graphpipeline:
best_individual_pipeline = best_individual.export_flattened_graphpipeline(memory=self.memory)
else:
best_individual_pipeline = best_individual.export_pipeline(memory=self.memory)
if self.preprocessing:
self.fitted_pipeline_ = sklearn.pipeline.make_pipeline(sklearn.base.clone(self._preprocessing_pipeline), best_individual_pipeline )
else:
self.fitted_pipeline_ = best_individual_pipeline
try:
self.fitted_pipeline_.fit(X_original,y_original) #TODO use y_original as well?
break
except Exception as e:
if self.verbose >= 4:
warnings.warn("Final pipeline failed to fit. Rarely, the pipeline might work on the objective function but fail on the full dataset. Generally due to interactions with different features being selected or transformations having different properties. Trying next pipeline")
print(e)
continue
if self.client is None: #no client was passed in
#close cluster and client
# _client.close()
# cluster.close()
try:
_client.shutdown()
cluster.close()
#catch exception
except Exception as e:
print("Error shutting down client and cluster")
Warning(e)
return self
def _estimator_has(attr):
'''Check if we can delegate a method to the underlying estimator.
First, we check the first fitted final estimator if available, otherwise we
check the unfitted final estimator.
'''
return lambda self: (self.fitted_pipeline_ is not None and
hasattr(self.fitted_pipeline_, attr)
)
@available_if(_estimator_has('predict'))
def predict(self, X, **predict_params):
check_is_fitted(self)
#X = check_array(X)
preds = self.fitted_pipeline_.predict(X,**predict_params)
if self.classification and self.label_encoder_:
preds = self.label_encoder_.inverse_transform(preds)
return preds
@available_if(_estimator_has('predict_proba'))
def predict_proba(self, X, **predict_params):
check_is_fitted(self)
#X = check_array(X)
return self.fitted_pipeline_.predict_proba(X,**predict_params)
@available_if(_estimator_has('decision_function'))
def decision_function(self, X, **predict_params):
check_is_fitted(self)
#X = check_array(X)
return self.fitted_pipeline_.decision_function(X,**predict_params)
@available_if(_estimator_has('transform'))
def transform(self, X, **predict_params):
check_is_fitted(self)
#X = check_array(X)
return self.fitted_pipeline_.transform(X,**predict_params)
@property
def classes_(self):
"""The classes labels. Only exist if the last step is a classifier."""
if self.label_encoder_:
return self.label_encoder_.classes_
else:
return self.fitted_pipeline_.classes_
@property
def _estimator_type(self):
return self.fitted_pipeline_._estimator_type
def __sklearn_tags__(self):
if hasattr(self, 'fitted_pipeline_'): #if fitted
try:
tags = copy.deepcopy(self.fitted_pipeline_.__sklearn_tags__())
except:
tags = copy.deepcopy(get_tags(self.fitted_pipeline_))
else: #if not fitted
tags = super().__sklearn_tags__()
if self.random_state is None:
tags.non_deterministic = False
if self.classification:
if tags.classifier_tags is None:
tags.classifier_tags = sklearn.utils.ClassifierTags()
tags.classifier_tags.multi_class = True
tags.classifier_tags.multi_label = True
return tags
def make_evaluated_individuals(self):
#check if _evolver_instance exists
if self.evaluated_individuals is None:
self.evaluated_individuals = self._evolver_instance.population.evaluated_individuals.copy()
objects = list(self.evaluated_individuals.index)
object_to_int = dict(zip(objects, range(len(objects))))
self.evaluated_individuals = self.evaluated_individuals.set_index(self.evaluated_individuals.index.map(object_to_int))
self.evaluated_individuals['Parents'] = self.evaluated_individuals['Parents'].apply(lambda row: convert_parents_tuples_to_integers(row, object_to_int))
self.evaluated_individuals["Instance"] = self.evaluated_individuals["Individual"].apply(lambda ind: apply_make_pipeline(ind, preprocessing_pipeline=self._preprocessing_pipeline, export_graphpipeline=self.export_graphpipeline, memory=self.memory))
return self.evaluated_individuals
@property
def pareto_front(self):
#check if _evolver_instance exists
if self.evaluated_individuals is None:
return None
else:
if "Pareto_Front" not in self.evaluated_individuals:
return self.evaluated_individuals
else:
return self.evaluated_individuals[self.evaluated_individuals["Pareto_Front"]==1]
| TPOTEstimatorSteadyState |
python | apache__airflow | providers/imap/src/airflow/providers/imap/hooks/imap.py | {
"start": 12152,
"end": 14029
} | class ____(LoggingMixin):
"""
This class simplifies working with mails returned by the imaplib client.
:param mail_body: The mail body of a mail received from imaplib client.
"""
def __init__(self, mail_body: str) -> None:
super().__init__()
self.mail = email.message_from_string(mail_body)
def has_attachments(self) -> bool:
"""
Check the mail for a attachments.
:returns: True if it has attachments and False if not.
"""
return self.mail.get_content_maintype() == "multipart"
def get_attachments_by_name(
self, name: str, check_regex: bool, find_first: bool = False
) -> list[tuple[Any, Any]]:
"""
Get all attachments by name for the mail.
:param name: The name of the attachment to look for.
:param check_regex: Checks the name for a regular expression.
:param find_first: If set to True it will only find the first match and then quit.
:returns: a list of tuples each containing name and payload
where the attachments name matches the given name.
"""
attachments = []
for attachment in self._iterate_attachments():
found_attachment = (
attachment.has_matching_name(name) if check_regex else attachment.has_equal_name(name)
)
if found_attachment:
file_name, file_payload = attachment.get_file()
self.log.info("Found attachment: %s", file_name)
attachments.append((file_name, file_payload))
if find_first:
break
return attachments
def _iterate_attachments(self) -> Iterable[MailPart]:
for part in self.mail.walk():
mail_part = MailPart(part)
if mail_part.is_attachment():
yield mail_part
| Mail |
python | google__pytype | pytype/abstract/abstract_test.py | {
"start": 443,
"end": 1236
} | class ____(test_base.UnitTest):
def setUp(self):
super().setUp()
options = config.Options.create(
python_version=self.python_version, color="never"
)
self._ctx = test_utils.make_context(options)
self._program = self._ctx.program
self._node = self._ctx.root_node.ConnectNew("test_node")
def new_var(self, *values):
"""Create a Variable bound to the given values."""
var = self._program.NewVariable()
for value in values:
var.AddBinding(value, source_set=(), where=self._node)
return var
def new_dict(self, **kwargs):
"""Create a Dict from keywords mapping names to Variable objects."""
d = abstract.Dict(self._ctx)
for name, var in kwargs.items():
d.set_str_item(self._node, name, var)
return d
| AbstractTestBase |
python | astropy__astropy | astropy/visualization/wcsaxes/frame.py | {
"start": 9378,
"end": 10754
} | class ____(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = "bt"
_spine_auto_position_order = "bt"
spine_class = SpineXAligned
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self["b"].data = np.array(([xmin, ymin], [xmax, ymin]))
self["t"].data = np.array(([xmax, ymax], [xmin, ymax]))
super().update_spines()
def _update_patch_path(self):
self.update_spines()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
line = Line2D(
x,
y,
linewidth=self._linewidth,
color=self._color,
zorder=1000,
transform=self.parent_axes.transData,
)
line.draw(renderer)
| RectangularFrame1D |
python | kamyu104__LeetCode-Solutions | Python/reformat-date.py | {
"start": 29,
"end": 433
} | class ____(object):
def reformatDate(self, date):
"""
:type date: str
:rtype: str
"""
lookup = {"Jan":1, "Feb":2, "Mar":3, "Apr":4,
"May":5, "Jun":6, "Jul":7, "Aug":8,
"Sep":9, "Oct":10, "Nov":11, "Dec":12}
return "{:04d}-{:02d}-{:02d}".format(int(date[-4:]), lookup[date[-8:-5]], int(date[:date.index(' ')-2]))
| Solution |
python | davidhalter__jedi | jedi/api/exceptions.py | {
"start": 40,
"end": 361
} | class ____(_JediError):
"""
This error might happen a subprocess is crashing. The reason for this is
usually broken C code in third party libraries. This is not a very common
thing and it is safe to use Jedi again. However using the same calls might
result in the same error again.
"""
| InternalError |
python | urllib3__urllib3 | src/urllib3/util/timeout.py | {
"start": 554,
"end": 10346
} | class ____:
"""Timeout configuration.
Timeouts can be defined as a default for a pool:
.. code-block:: python
import urllib3
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
http = urllib3.PoolManager(timeout=timeout)
resp = http.request("GET", "https://example.com/")
print(resp.status)
Or per-request (which overrides the default for the pool):
.. code-block:: python
response = http.request("GET", "https://example.com/", timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``:
.. code-block:: python
no_timeout = Timeout(connect=None, read=None)
response = http.request("GET", "https://example.com/", timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: int, float, or None
:param connect:
The maximum amount of time (in seconds) to wait for a connection
attempt to a server to succeed. Omitting the parameter will default the
connect timeout to the system default, probably `the global default
timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: int, float, or None
:param read:
The maximum amount of time (in seconds) to wait between consecutive
read operations for a response from the server. Omitting the parameter
will default the read timeout to the system default, probably `the
global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: int, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT
def __init__(
self,
total: _TYPE_TIMEOUT = None,
connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
) -> None:
self._connect = self._validate_timeout(connect, "connect")
self._read = self._validate_timeout(read, "read")
self.total = self._validate_timeout(total, "total")
self._start_connect: float | None = None
def __repr__(self) -> str:
return f"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})"
# __str__ provided for backwards compatibility
__str__ = __repr__
@staticmethod
def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None:
return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout
@classmethod
def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT:
"""Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If it is a numeric value less than or equal to
zero, or the type is not an integer, float, or None.
"""
if value is None or value is _DEFAULT_TIMEOUT:
return value
if isinstance(value, bool):
raise ValueError(
"Timeout cannot be a boolean value. It must "
"be an int, float or None."
)
try:
float(value)
except (TypeError, ValueError):
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
) from None
try:
if value <= 0:
raise ValueError(
"Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than or equal to 0." % (name, value)
)
except TypeError:
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
) from None
return value
@classmethod
def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout:
"""Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self) -> Timeout:
"""Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read, total=self.total)
def start_connect(self) -> float:
"""Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = time.monotonic()
return self._start_connect
def get_connect_duration(self) -> float:
"""Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time in seconds.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError(
"Can't get connect duration for timer that has not started."
)
return time.monotonic() - self._start_connect
@property
def connect_timeout(self) -> _TYPE_TIMEOUT:
"""Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is _DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total) # type: ignore[type-var]
@property
def read_timeout(self) -> float | None:
"""Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (
self.total is not None
and self.total is not _DEFAULT_TIMEOUT
and self._read is not None
and self._read is not _DEFAULT_TIMEOUT
):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(), self._read))
elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self.resolve_default_timeout(self._read)
| Timeout |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/inprocess.py | {
"start": 447,
"end": 1733
} | class ____(SuperQObject, InProcessChannel):
# Emitted when the channel is started.
started = QtCore.Signal()
# Emitted when the channel is stopped.
stopped = QtCore.Signal()
# Emitted when any message is received.
message_received = QtCore.Signal(object)
def start(self):
""" Reimplemented to emit signal.
"""
super().start()
self.started.emit()
def stop(self):
""" Reimplemented to emit signal.
"""
super().stop()
self.stopped.emit()
def call_handlers_later(self, *args, **kwds):
""" Call the message handlers later.
"""
do_later = lambda: self.call_handlers(*args, **kwds)
QtCore.QTimer.singleShot(0, do_later)
def call_handlers(self, msg):
self.message_received.emit(msg)
def process_events(self):
""" Process any pending GUI events.
"""
QtCore.QCoreApplication.instance().processEvents()
def flush(self, timeout=1.0):
""" Reimplemented to ensure that signals are dispatched immediately.
"""
super().flush()
self.process_events()
def closed(self):
""" Function to ensure compatibility with the QtZMQSocketChannel."""
return False
| QtInProcessChannel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec49.py | {
"start": 454,
"end": 2264
} | class ____:
dispatcher: Dispatcher
def method1(self, stub: TaskDeclaration[P]) -> Any:
def inner0(*args: P.args, **kwargs: P.kwargs) -> None:
self.dispatcher.dispatch(stub, 1, *args, **kwargs)
def inner1(*args: P.args, **kwargs: P.kwargs) -> None:
# This should generate an error because a positional argument
# cannot appear after an unpacked keyword argument.
self.dispatcher.dispatch(stub, 1, **kwargs, *args)
def inner2(*args: P.args, **kwargs: P.kwargs) -> None:
# This should generate an error because it's missing
# a positional argument for 'count'.
self.dispatcher.dispatch(stub, *args, **kwargs)
def inner3(*args: P.args, **kwargs: P.kwargs) -> None:
# This should generate an error because it has an
# additional positional argument.
self.dispatcher.dispatch(stub, 1, 1, *args, **kwargs)
def inner4(*args: P.args, **kwargs: P.kwargs) -> None:
# This should generate an error because it is missing
# the *args argument.
self.dispatcher.dispatch(stub, 1, **kwargs)
def inner5(*args: P.args, **kwargs: P.kwargs) -> None:
# This should generate an error because it is missing
# the *kwargs argument.
self.dispatcher.dispatch(stub, 1, *args)
def inner6(*args: P.args, **kwargs: P.kwargs) -> None:
# This should generate an error because it has an
# extra *args argument.
self.dispatcher.dispatch(stub, 1, *args, *args, **kwargs)
# This should generate an error because it has an
# extra **kwargs argument.
self.dispatcher.dispatch(stub, 1, *args, **kwargs, **kwargs)
| Queue |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP050.py | {
"start": 565,
"end": 619
} | class ____(metaclass=type):
...
import builtins
| Foo |
python | getsentry__sentry | src/sentry/auth_v2/types.py | {
"start": 64,
"end": 1053
} | class ____(SessionBase):
"""
These are the keys available when calling request.session.keys()
https://docs.djangoproject.com/en/5.2/topics/http/sessions/#django.contrib.sessions.backends.base.SessionBase
"""
# Flags to control the authentication flow on frontend.
# Keep the keys sorted in order of importance!!
# Maintaining the hierarchy is good context for future engineers.
todo_email_verification: bool | None
todo_2fa_verification: bool | None
todo_password_reset: bool | None
todo_2fa_setup: bool | None
# Django's internal session data
_auth_user_id: str | None # Django's internal user ID storage
_auth_user_backend: str | None # Authentication backend used
_auth_user_hash: str | None # Hash of user's authentication data
# Sentry-specific session data
session_orgs: list[str] | None # List of org IDs
# Any other custom session data
# [key: str]: Union[str, int, bool, Dict, list, None]
| SessionData |
python | tox-dev__tox | src/tox/tox_env/package.py | {
"start": 525,
"end": 561
} | class ____:
"""package."""
| Package |
python | realpython__materials | pyqt-calculator-tutorial/examples/dialog.py | {
"start": 181,
"end": 994
} | class ____(QDialog):
def __init__(self):
super().__init__(parent=None)
self.setWindowTitle("QDialog")
dialogLayout = QVBoxLayout()
formLayout = QFormLayout()
formLayout.addRow("Name:", QLineEdit())
formLayout.addRow("Age:", QLineEdit())
formLayout.addRow("Job:", QLineEdit())
formLayout.addRow("Hobbies:", QLineEdit())
dialogLayout.addLayout(formLayout)
buttons = QDialogButtonBox()
buttons.setStandardButtons(
QDialogButtonBox.StandardButton.Cancel
| QDialogButtonBox.StandardButton.Ok
)
dialogLayout.addWidget(buttons)
self.setLayout(dialogLayout)
if __name__ == "__main__":
app = QApplication([])
window = Window()
window.show()
sys.exit(app.exec())
| Window |
python | huggingface__transformers | src/transformers/models/lfm2_moe/modeling_lfm2_moe.py | {
"start": 22979,
"end": 27082
} | class ____(nn.Module):
def __init__(
self,
config: Lfm2MoeConfig,
layer_idx: int,
):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.L_cache = config.conv_L_cache
self.bias = config.conv_bias
self.conv = nn.Conv1d(
in_channels=config.hidden_size,
out_channels=config.hidden_size,
kernel_size=self.L_cache,
groups=config.hidden_size,
bias=self.bias,
padding=self.L_cache - 1,
)
self.in_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=self.bias)
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=self.bias)
def cuda_kernels_forward(
self,
x: torch.Tensor,
past_key_values: Optional[Lfm2MoeHybridConvCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
x = apply_mask_to_padding_states(x, attention_mask)
BCx = self.in_proj(x).transpose(-1, -2)
B, C, x = BCx.chunk(3, dim=-2)
Bx = B * x
conv_weights = self.conv.weight.view(self.conv.weight.size(0), self.conv.weight.size(2))
if past_key_values is not None and cache_position[0] > 0:
conv_out = causal_conv1d_update(
Bx.squeeze(-1),
past_key_values.conv_cache[self.layer_idx],
conv_weights,
self.conv.bias,
None,
)
conv_out = conv_out.unsqueeze(-1)
else:
if past_key_values is not None:
conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0))
past_key_values.conv_cache[self.layer_idx].copy_(conv_state)
conv_out = causal_conv1d_fn(Bx, conv_weights, self.conv.bias, activation=None)
y = C * conv_out
y = self.out_proj(y.transpose(-1, -2).contiguous())
return y
def slow_forward(
self,
x: torch.Tensor,
past_key_values: Optional[Lfm2MoeHybridConvCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
seqlen = x.shape[1]
x = apply_mask_to_padding_states(x, attention_mask)
BCx = self.in_proj(x).transpose(-1, -2)
B, C, x = BCx.chunk(3, dim=-2)
Bx = B * x
if past_key_values is not None and cache_position[0] > 0:
conv_state = past_key_values.conv_cache[self.layer_idx]
cache_position = cache_position.clamp(0, self.L_cache - 1)
conv_state = conv_state.roll(shifts=-1, dims=-1)
conv_state[:, :, cache_position] = Bx.to(device=conv_state.device, dtype=conv_state.dtype)
past_key_values.conv_cache[self.layer_idx].copy_(conv_state)
conv_out = torch.sum(conv_state.to(Bx.device) * self.conv.weight[:, 0, :], dim=-1)
if self.bias:
conv_out += self.conv.bias
conv_out = conv_out.unsqueeze(-1)
else:
if past_key_values is not None:
conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0))
past_key_values.conv_cache[self.layer_idx].copy_(conv_state)
conv_out = self.conv(Bx)[..., :seqlen]
y = C * conv_out
y = y.transpose(-1, -2).contiguous()
y = self.out_proj(y)
return y
def forward(
self,
hidden_states: torch.Tensor,
past_key_values: Optional[Lfm2MoeHybridConvCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
if is_fast_path_available and "cuda" in hidden_states.device.type and not is_torchdynamo_compiling():
return self.cuda_kernels_forward(hidden_states, past_key_values, cache_position, attention_mask)
return self.slow_forward(hidden_states, past_key_values, cache_position, attention_mask)
| Lfm2MoeShortConv |
python | Textualize__textual | src/textual/worker.py | {
"start": 2310,
"end": 2541
} | class ____:
"""Shim to insert a word into the Worker's repr."""
def __init__(self, text: str) -> None:
self.text = text
def __repr__(self) -> str:
return self.text
@rich.repr.auto(angular=True)
| _ReprText |
python | django__django | tests/model_forms/models.py | {
"start": 8108,
"end": 8400
} | class ____(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, default=0)
class Meta:
unique_together = ("suffix1", "suffix2")
abstract = True
| BookXtra |
python | huggingface__transformers | tests/models/univnet/test_modeling_univnet.py | {
"start": 7453,
"end": 13177
} | class ____(unittest.TestCase):
def tearDown(self):
super().tearDown()
cleanup(torch_device, gc_collect=True)
def _load_datasamples(self, num_samples, sampling_rate=24000):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
ds = ds.cast_column("audio", Audio(sampling_rate=sampling_rate))
# automatic decoding with librispeech
speech_samples = ds.sort("id")[:num_samples]["audio"]
return [x["array"] for x in speech_samples], [x["sampling_rate"] for x in speech_samples]
def get_inputs(self, device, num_samples: int = 3, noise_length: int = 10, seed: int = 0):
generator = torch.manual_seed(seed)
# Note: hardcode model_in_channels -> 64
if num_samples == 1:
noise_sequence_shape = (64, noise_length)
else:
noise_sequence_shape = (num_samples, 64, noise_length)
# Explicitly generate noise_sequence on CPU for consistency.
noise_sequence = torch.randn(noise_sequence_shape, generator=generator, dtype=torch.float32, device="cpu")
# Put noise_sequence on the desired device.
noise_sequence = noise_sequence.to(device)
# Note: hardcode num_mel_channels -> 100
if num_samples == 1:
spectrogram_shape = [100, noise_length]
else:
spectrogram_shape = [num_samples, 100, noise_length]
spectrogram = floats_tensor(spectrogram_shape, scale=1.0, rng=random.Random(seed))
# Note: spectrogram should already be on torch_device
# Permute to match diffusers implementation
if num_samples == 1:
noise_sequence = noise_sequence.transpose(1, 0)
spectrogram = spectrogram.transpose(1, 0)
else:
noise_sequence = noise_sequence.transpose(2, 1)
spectrogram = spectrogram.transpose(2, 1)
inputs = {
"input_features": spectrogram,
"noise_sequence": noise_sequence,
"generator": generator,
}
return inputs
def test_model_inference_batched(self):
# Load sample checkpoint from Tortoise TTS
model = UnivNetModel.from_pretrained("dg845/univnet-dev")
model.eval().to(torch_device)
# Get batched noise and spectrogram inputs.
input_speech = self.get_inputs(torch_device, num_samples=3)
with torch.no_grad():
waveform = model(**input_speech)[0]
waveform = waveform.cpu()
waveform_mean = torch.mean(waveform)
waveform_stddev = torch.std(waveform)
waveform_slice = waveform[-1, -9:].flatten()
EXPECTED_MEAN = torch.tensor(-0.19989729)
EXPECTED_STDDEV = torch.tensor(0.35230172)
EXPECTED_SLICE = torch.tensor([-0.3408, -0.6045, -0.5052, 0.1160, -0.1556, -0.0405, -0.3024, -0.5290, -0.5019])
torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, rtol=5e-4, atol=5e-4)
def test_model_inference_unbatched(self):
# Load sample checkpoint from Tortoise TTS
model = UnivNetModel.from_pretrained("dg845/univnet-dev")
model.eval().to(torch_device)
# Get unbatched noise and spectrogram inputs.
input_speech = self.get_inputs(torch_device, num_samples=1)
with torch.no_grad():
waveform = model(**input_speech)[0]
waveform = waveform.cpu()
waveform_mean = torch.mean(waveform)
waveform_stddev = torch.std(waveform)
waveform_slice = waveform[-1, -9:].flatten()
EXPECTED_MEAN = torch.tensor(-0.22895093)
EXPECTED_STDDEV = torch.tensor(0.33986747)
EXPECTED_SLICE = torch.tensor([-0.3276, -0.5504, -0.3484, 0.3574, -0.0373, -0.1826, -0.4880, -0.6431, -0.5162])
torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, rtol=1e-3, atol=1e-3)
def test_integration(self):
feature_extractor = UnivNetFeatureExtractor.from_pretrained("dg845/univnet-dev")
model = UnivNetModel.from_pretrained("dg845/univnet-dev")
model.eval().to(torch_device)
audio, sr = self._load_datasamples(1, sampling_rate=feature_extractor.sampling_rate)
input_features = feature_extractor(audio, sampling_rate=sr[0], return_tensors="pt").input_features
input_features = input_features.to(device=torch_device)
input_speech = self.get_inputs(torch_device, num_samples=1, noise_length=input_features.shape[1])
input_speech["input_features"] = input_features
with torch.no_grad():
waveform = model(**input_speech)[0]
waveform = waveform.cpu()
waveform_mean = torch.mean(waveform)
waveform_stddev = torch.std(waveform)
waveform_slice = waveform[-1, -9:].flatten()
EXPECTED_MEAN = torch.tensor(0.00051374)
EXPECTED_STDDEV = torch.tensor(0.058105603)
# fmt: off
EXPECTED_SLICE = torch.tensor([-4.3934e-04, -1.8203e-04, -3.3033e-04, -3.8716e-04, -1.6125e-04, 3.5389e-06, -3.3149e-04, -3.7613e-04, -2.3331e-04])
# fmt: on
torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, rtol=5e-6, atol=5e-6)
torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, rtol=5e-6, atol=5e-6)
| UnivNetModelIntegrationTests |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/asyncio/exc.py | {
"start": 405,
"end": 522
} | class ____(exc.InvalidRequestError):
"""a startable context manager has not been started."""
| AsyncContextNotStarted |
python | vyperlang__vyper | vyper/venom/passes/memmerging.py | {
"start": 2912,
"end": 17213
} | class ____(IRPass):
dfg: DFGAnalysis
_copies: list[_Copy]
# %1 = mload 5 => {%1: 5}
# this represents the available loads, which have not been invalidated.
_loads: dict[IRVariable, int]
def run_pass(self):
self.dfg = self.analyses_cache.request_analysis(DFGAnalysis)
self.updater = InstUpdater(self.dfg)
for bb in self.function.get_basic_blocks():
self._merge_mstore_dload(bb)
self._handle_bb_memzero(bb)
self._handle_bb(bb, "calldataload", "calldatacopy", allow_dst_overlaps_src=True)
self._handle_bb(bb, "dload", "dloadbytes", allow_dst_overlaps_src=True)
if version_check(begin="cancun"):
# mcopy is available
self._handle_bb(bb, "mload", "mcopy")
self.analyses_cache.invalidate_analysis(LivenessAnalysis)
def _flush_copies(
self, bb: IRBasicBlock, copies: list[_Copy], copy_opcode: str, load_opcode: str
):
for copy in copies:
copy.insts.sort(key=bb.instructions.index)
pin_inst = None
inst = copy.insts[-1]
if copy.length != 32 or load_opcode == "dload":
ops: list[IROperand] = [
IRLiteral(copy.length),
IRLiteral(copy.src),
IRLiteral(copy.dst),
]
self.updater.update(inst, copy_opcode, ops)
elif inst.opcode == "mstore":
# we already have a load which is the val for this mstore;
# leave it in place.
var, _ = inst.operands
assert isinstance(var, IRVariable) # help mypy
pin_inst = self.dfg.get_producing_instruction(var)
assert pin_inst is not None # help mypy
else:
# we are converting an mcopy into an mload+mstore (mload+mstore
# is 1 byte smaller than mcopy).
val = self.updater.add_before(inst, load_opcode, [IRLiteral(copy.src)])
assert val is not None # help mypy
self.updater.update(inst, "mstore", [val, IRLiteral(copy.dst)])
to_nop: list[IRInstruction] = []
for inst in copy.insts[:-1]:
if inst.opcode == load_opcode:
if inst is pin_inst:
continue
# if the load is used by any instructions besides the ones
# we are removing, we can't delete it. (in the future this
# may be handled by "remove unused effects" pass).
uses = self.dfg.get_uses(inst.output)
if not all(use in copy.insts for use in uses):
continue
to_nop.append(inst)
self.updater.nop_multi(to_nop)
# need copy, since `copies` might be the same object as `self._copies`
for c in copies.copy():
self._copies.remove(c)
def _invalidate_loads(self, interval: _Interval):
for var, ptr in self._loads.copy().items():
if _Interval(ptr, 32).overlaps(interval):
del self._loads[var]
def _write_after_write_hazards(self, new_copy: _Copy) -> list[_Copy]:
"""
check if there is an ordering hazard between new_copy
and anything in self._copies. if new_copy and any existing
copy write to the same destination, we need to preserve
both writes (unless they can be fused into a single copy).
"""
res = []
for copy in self._copies:
if copy.can_merge(new_copy) or new_copy.can_merge(copy):
# safe
continue
# note, these are the same:
# - new_copy.overwrites(copy.dst_interval())
# - copy.overwrites(new_copy.dst_interval())
if new_copy.overwrites(copy.dst_interval()):
res.append(copy)
return res
def _read_after_write_hazards(self, new_copy: _Copy) -> list[_Copy]:
"""
check if any copies in self._copies overwrite the read interval
of new_copy
"""
return self._copies_that_overwrite(new_copy.src_interval())
def _write_after_read_hazards(self, new_copy: _Copy) -> list[_Copy]:
"""
check if new_copy overwrites the read interval of anything in
self._copies
"""
res = []
for copy in self._copies:
if new_copy.overwrites(copy.src_interval()):
res.append(copy)
return res
def _find_insertion_point(self, new_copy: _Copy):
return bisect_left(self._copies, new_copy.dst, key=lambda c: c.dst)
def _add_copy(self, new_copy: _Copy):
index = self._find_insertion_point(new_copy)
self._copies.insert(index, new_copy)
i = max(index - 1, 0)
while i < min(index + 1, len(self._copies) - 1):
if self._copies[i].can_merge(self._copies[i + 1]):
self._copies[i].merge(self._copies[i + 1])
del self._copies[i + 1]
else:
i += 1
def _copies_that_overwrite(self, read_interval: _Interval) -> list[_Copy]:
# check if any of self._copies tramples the interval
return [c for c in self._copies if c.overwrites(read_interval)]
def _handle_bb(
self,
bb: IRBasicBlock,
load_opcode: str,
copy_opcode: str,
allow_dst_overlaps_src: bool = False,
):
self._loads = {}
self._copies = []
def _hard_barrier():
# hard barrier. flush everything
_barrier_for(self._copies)
assert len(self._copies) == 0
self._loads.clear()
def _barrier_for(copies: list[_Copy]):
self._flush_copies(bb, copies, copy_opcode, load_opcode)
# copy in necessary because there is a possibility
# of insertion in optimizations
for inst in bb.instructions.copy():
if inst.opcode == load_opcode:
src_op = inst.operands[0]
if not isinstance(src_op, IRLiteral):
_hard_barrier()
continue
read_interval = _Interval(src_op.value, 32)
# flush any existing copies that trample read_interval
if not allow_dst_overlaps_src:
copies = self._copies_that_overwrite(read_interval)
if len(copies) > 0:
_barrier_for(copies)
self._loads[inst.output] = src_op.value
elif inst.opcode == "mstore":
var, dst = inst.operands
if not isinstance(var, IRVariable) or not isinstance(dst, IRLiteral):
_hard_barrier()
continue
# unknown memory (not writing the result of an available load)
if var not in self._loads:
_hard_barrier()
continue
src_ptr = self._loads[var]
if not allow_dst_overlaps_src:
self._invalidate_loads(_Interval(dst.value, 32))
load_inst = self.dfg.get_producing_instruction(var)
assert load_inst is not None # help mypy
n_copy = _Copy(dst.value, src_ptr, 32, [inst, load_inst])
write_hazards = self._write_after_write_hazards(n_copy)
if len(write_hazards) > 0:
_barrier_for(write_hazards)
# for mem2mem, we need to check if n_copy overwrites any
# existing copies, or if any existing copies overwrite n_copy.
if not allow_dst_overlaps_src:
read_hazards = self._read_after_write_hazards(n_copy)
# we are performing a store, so it's impossible to have a
# read hazard. (if a read hazard happened, it was already
# handled when we handled the load instruction).
assert len(read_hazards) == 0, "read hazard should never happened here"
read_hazards = self._write_after_read_hazards(n_copy)
if len(read_hazards) > 0:
_barrier_for(read_hazards)
self._add_copy(n_copy)
elif inst.opcode == copy_opcode:
if not all(isinstance(op, IRLiteral) for op in inst.operands):
_hard_barrier()
continue
length, src, dst = inst.operands
n_copy = _Copy(dst.value, src.value, length.value, [inst])
if not allow_dst_overlaps_src:
self._invalidate_loads(_Interval(dst.value, length.value))
write_hazards = self._write_after_write_hazards(n_copy)
if len(write_hazards) > 0:
_barrier_for(write_hazards)
# for mem2mem, we need to check if n_copy overwrites any
# existing copies, or if any existing copies overwrite n_copy.
if not allow_dst_overlaps_src:
read_hazards = self._read_after_write_hazards(n_copy)
if len(read_hazards) > 0:
_barrier_for(read_hazards)
read_hazards = self._write_after_read_hazards(n_copy)
if len(read_hazards) > 0:
_barrier_for(read_hazards)
self._add_copy(n_copy)
elif _volatile_memory(inst):
_hard_barrier()
_hard_barrier()
# optimize memzeroing operations
def _optimize_memzero(self, bb: IRBasicBlock):
for copy in self._copies:
inst = copy.insts[-1]
if copy.length == 32:
new_ops: list[IROperand] = [IRLiteral(0), IRLiteral(copy.dst)]
self.updater.update(inst, "mstore", new_ops)
else:
calldatasize = self.updater.add_before(inst, "calldatasize", [])
assert calldatasize is not None # help mypy
new_ops = [IRLiteral(copy.length), calldatasize, IRLiteral(copy.dst)]
self.updater.update(inst, "calldatacopy", new_ops)
for inst in copy.insts[:-1]:
self.updater.nop(inst)
self._copies.clear()
self._loads.clear()
def _handle_bb_memzero(self, bb: IRBasicBlock):
self._loads = {}
self._copies = []
def _barrier():
self._optimize_memzero(bb)
# copy in necessary because there is a possibility
# of insertion in optimizations
for inst in bb.instructions.copy():
if inst.opcode == "mstore":
val = inst.operands[0]
dst = inst.operands[1]
is_zero_literal = isinstance(val, IRLiteral) and val.value == 0
if not (isinstance(dst, IRLiteral) and is_zero_literal):
_barrier()
continue
n_copy = _Copy.memzero(dst.value, 32, [inst])
assert len(self._write_after_write_hazards(n_copy)) == 0
self._add_copy(n_copy)
elif inst.opcode == "calldatacopy":
length, var, dst = inst.operands
if not isinstance(var, IRVariable):
_barrier()
continue
if not isinstance(dst, IRLiteral) or not isinstance(length, IRLiteral):
_barrier()
continue
src_inst = self.dfg.get_producing_instruction(var)
assert src_inst is not None, f"bad variable {var}"
if src_inst.opcode != "calldatasize":
_barrier()
continue
n_copy = _Copy.memzero(dst.value, length.value, [inst])
assert len(self._write_after_write_hazards(n_copy)) == 0
self._add_copy(n_copy)
elif _volatile_memory(inst):
_barrier()
continue
_barrier()
# This pass is necessary for trivial cases of dload/mstore merging
# where the src and dst pointers are variables, which are not handled
# in the other merging passes.
def _merge_mstore_dload(self, bb: IRBasicBlock):
for inst in bb.instructions:
if inst.opcode != "dload":
continue
dload = inst
src = dload.operands[0]
dload_out = dload.output
uses = self.dfg.get_uses(dload_out)
if len(uses) == 1:
mstore: IRInstruction = uses.first()
if mstore.opcode != "mstore":
continue
_, dst = mstore.operands
# merge simple
self.updater.update(mstore, "dloadbytes", [IRLiteral(32), src, dst])
self.updater.nop(dload)
continue
# we can only merge when the mstore is the first instruction
# that uses dload. If we would not restrain ourself to basic
# block we would have to check if the mstore dominates all of
# the other uses
uses_bb = dload.parent.get_uses().get(dload_out, OrderedSet())
if len(uses_bb) == 0:
continue
# relies on order of bb.get_uses!
# if this invariant would be broken
# it must be handled differently
mstore = uses_bb.first()
if mstore.opcode != "mstore":
continue
var, dst = mstore.operands
if var != dload_out:
continue
new_var = bb.parent.get_next_variable()
self.updater.add_before(mstore, "dloadbytes", [IRLiteral(32), src, dst])
self.updater.update(mstore, "mload", [dst], new_output=new_var)
mload = mstore # clarity
self.updater.move_uses(dload_out, mload)
self.updater.nop(dload)
def _volatile_memory(inst):
inst_effects = inst.get_read_effects() | inst.get_write_effects()
return Effects.MEMORY in inst_effects or Effects.MSIZE in inst_effects
| MemMergePass |
python | google__jax | docs/autodidax2_part1.py | {
"start": 16594,
"end": 17331
} | class ____:
parameters : list[Var] # The function's formal parameters (arguments)
equations : list[Equation] # The body of the function, a list of instructions/equations
return_val : Atom # The function's return value
def __str__(self):
lines = []
lines.append(', '.join(b for b in self.parameters) + ' ->')
for eqn in self.equations:
args_str = ', '.join(str(arg) for arg in eqn.args)
lines.append(f' {eqn.var} = {eqn.op}({args_str})')
lines.append(self.return_val)
return '\n'.join(lines)
# -
# To build the IR from a Python function we define a `StagingInterpreter` that
# takes each operation and adds it to a growing list of all the operations we've
# seen so far:
# +
| Jaxpr |
python | allegroai__clearml | examples/distributed/pytorch_distributed_example.py | {
"start": 1209,
"end": 1527
} | class ____(object):
""" Dataset partitioning helper """
def __init__(self, data, index):
self.data = data
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, index):
data_idx = self.index[index]
return self.data[data_idx]
| Partition |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_looker.py | {
"start": 1583,
"end": 5336
} | class ____:
def setup_method(self):
with mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection") as conn:
conn.return_value.extra_dejson = CONN_EXTRA
self.hook = LookerHook(looker_conn_id="test")
@mock.patch("airflow.providers.google.cloud.hooks.looker.requests_transport")
def test_get_looker_sdk(self, _):
"""
Test that get_looker_sdk is setting up the sdk properly
Note: `requests_transport` is mocked so we don't have to test
looker_sdk's functionality, just LookerHook's usage of it
"""
self.hook.get_connection = mock.MagicMock()
sdk = self.hook.get_looker_sdk()
# Attempting to use the instantiated SDK should not throw an error
with does_not_raise():
sdk.get(path="/", structure=None)
sdk.delete(path="/", structure=None)
# The post/patch/put methods call a method internally to serialize
# the body if LookerHook sets the wrong serialize function on init
# there will be TypeErrors thrown
# The body we pass here must be a list, dict, or model.Model
sdk.post(path="/", structure=None, body=[])
sdk.patch(path="/", structure=None, body=[])
sdk.put(path="/", structure=None, body=[])
@mock.patch(HOOK_PATH.format("pdt_build_status"))
def test_wait_for_job(self, mock_pdt_build_status):
# replace pdt_build_status invocation with mock status
mock_pdt_build_status.side_effect = [
{"status": JobStatus.RUNNING.value},
{"status": JobStatus.ERROR.value, "message": "test"},
]
# call hook in mock context (w/ no wait b/w job checks)
with pytest.raises(AirflowException):
self.hook.wait_for_job(
materialization_id=JOB_ID,
wait_time=0,
)
# assert pdt_build_status called twice: first RUNNING, then ERROR
calls = [
mock.call(materialization_id=JOB_ID),
mock.call(materialization_id=JOB_ID),
]
mock_pdt_build_status.assert_has_calls(calls)
@mock.patch(HOOK_PATH.format("get_looker_sdk"))
def test_check_pdt_build(self, mock_sdk):
# call hook in mock context
self.hook.check_pdt_build(materialization_id=JOB_ID)
# assert sdk constructor called once
mock_sdk.assert_called_once_with()
# assert sdk.check_pdt_build called once
mock_sdk.return_value.check_pdt_build.assert_called_once_with(materialization_id=JOB_ID)
@mock.patch(HOOK_PATH.format("get_looker_sdk"))
def test_start_pdt_build(self, mock_sdk):
# replace looker version invocation with mock response
mock_sdk.return_value.versions.return_value.looker_release_version = "22.2.0"
# call hook in mock context
self.hook.start_pdt_build(
model=MODEL,
view=VIEW,
)
# assert sdk constructor called once
mock_sdk.assert_called_once_with()
# assert sdk.start_pdt_build called once
mock_sdk.return_value.start_pdt_build.assert_called_once_with(
model_name=MODEL,
view_name=VIEW,
source=SOURCE,
)
@mock.patch(HOOK_PATH.format("get_looker_sdk"))
def test_stop_pdt_build(self, mock_sdk):
# call hook in mock context
self.hook.stop_pdt_build(materialization_id=JOB_ID)
# assert sdk constructor called once
mock_sdk.assert_called_once_with()
# assert sdk.stop_pdt_build called once
mock_sdk.return_value.stop_pdt_build.assert_called_once_with(
materialization_id=JOB_ID,
source=SOURCE,
)
| TestLookerHook |
python | Textualize__textual | tests/test_command.py | {
"start": 250,
"end": 791
} | class ____(ModalScreen[bool]):
"""Screen with a dialog to quit."""
def compose(self) -> ComposeResult:
yield Grid(
Label("Are you sure you want to quit?", id="question"),
Button("Quit", variant="error", id="quit"),
Button("Cancel", variant="primary", id="cancel"),
id="dialog",
)
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button.id == "quit":
self.dismiss(True)
else:
self.dismiss(False)
| QuitScreen |
python | tensorflow__tensorflow | tensorflow/python/framework/experimental/thread_local_stack.py | {
"start": 767,
"end": 1140
} | class ____(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(ThreadLocalStack, self).__init__()
self._stack = []
def peek(self):
return self._stack[-1] if self._stack else None
def push(self, ctx):
return self._stack.append(ctx)
def pop(self):
self._stack.pop()
| ThreadLocalStack |
python | pydantic__pydantic | tests/test_forward_ref.py | {
"start": 13148,
"end": 15590
} | class ____(BaseModel):
base: Base
"""
)
m = module.What(base=module.Base(literal=1))
assert m.base.literal == 1
def test_nested_forward_ref():
class NestedTuple(BaseModel):
x: tuple[int, Optional['NestedTuple']]
obj = NestedTuple.model_validate({'x': ('1', {'x': ('2', {'x': ('3', None)})})})
assert obj.model_dump() == {'x': (1, {'x': (2, {'x': (3, None)})})}
def test_discriminated_union_forward_ref(create_module):
@create_module
def module():
from typing import Literal, Union
from pydantic import BaseModel, Field
class Pet(BaseModel):
pet: Union['Cat', 'Dog'] = Field(discriminator='type')
class Cat(BaseModel):
type: Literal['cat']
class Dog(BaseModel):
type: Literal['dog']
assert module.Pet.__pydantic_complete__ is False
with pytest.raises(
ValidationError,
match="Input tag 'pika' found using 'type' does not match any of the expected tags: 'cat', 'dog'",
):
module.Pet.model_validate({'pet': {'type': 'pika'}})
# Ensure the rebuild has happened automatically despite validation failure
assert module.Pet.__pydantic_complete__ is True
# insert_assert(module.Pet.model_json_schema())
assert module.Pet.model_json_schema() == {
'title': 'Pet',
'required': ['pet'],
'type': 'object',
'properties': {
'pet': {
'title': 'Pet',
'discriminator': {'mapping': {'cat': '#/$defs/Cat', 'dog': '#/$defs/Dog'}, 'propertyName': 'type'},
'oneOf': [{'$ref': '#/$defs/Cat'}, {'$ref': '#/$defs/Dog'}],
}
},
'$defs': {
'Cat': {
'title': 'Cat',
'type': 'object',
'properties': {'type': {'const': 'cat', 'title': 'Type', 'type': 'string'}},
'required': ['type'],
},
'Dog': {
'title': 'Dog',
'type': 'object',
'properties': {'type': {'const': 'dog', 'title': 'Type', 'type': 'string'}},
'required': ['type'],
},
},
}
def test_class_var_as_string(create_module):
module = create_module(
# language=Python
"""
from __future__ import annotations
from typing import Annotated, ClassVar, ClassVar as CV
from pydantic import BaseModel
| What |
python | python-openxml__python-docx | tests/oxml/text/test_hyperlink.py | {
"start": 225,
"end": 1467
} | class ____:
"""Unit-test suite for the CT_Hyperlink (<w:hyperlink>) element."""
def it_has_a_relationship_that_contains_the_hyperlink_address(self):
cxml = 'w:hyperlink{r:id=rId6}/w:r/w:t"post"'
hyperlink = cast(CT_Hyperlink, element(cxml))
rId = hyperlink.rId
assert rId == "rId6"
@pytest.mark.parametrize(
("cxml", "expected_value"),
[
# -- default (when omitted) is True, somewhat surprisingly --
("w:hyperlink{r:id=rId6}", True),
("w:hyperlink{r:id=rId6,w:history=0}", False),
("w:hyperlink{r:id=rId6,w:history=1}", True),
],
)
def it_knows_whether_it_has_been_clicked_on_aka_visited(self, cxml: str, expected_value: bool):
hyperlink = cast(CT_Hyperlink, element(cxml))
assert hyperlink.history is expected_value
def it_has_zero_or_more_runs_containing_the_hyperlink_text(self):
cxml = 'w:hyperlink{r:id=rId6,w:history=1}/(w:r/w:t"blog",w:r/w:t" post")'
hyperlink = cast(CT_Hyperlink, element(cxml))
rs = hyperlink.r_lst
assert [type(r) for r in rs] == [CT_R, CT_R]
assert rs[0].text == "blog"
assert rs[1].text == " post"
| DescribeCT_Hyperlink |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/tests/core/test_tracker.py | {
"start": 6144,
"end": 6889
} | class ____:
"""Test task result management functionality."""
@pytest.mark.parametrize(
"result_value",
[
Mock(spec=State),
None,
],
)
def test_task_result_set_and_get(self, sample_node_id, result_value):
"""Test that set_task_result stores and get_task_result retrieves the result."""
tracker = NodeTaskTracker()
# Set result
tracker.set_task_result(sample_node_id, result_value)
# Get result
retrieved_result = tracker.get_task_result(sample_node_id)
# Verify result matches
assert retrieved_result == result_value
assert tracker._task_results[sample_node_id] == result_value
| TestNodeTaskTrackerTaskResults |
python | pytorch__pytorch | torch/_inductor/autoheuristic/artifacts/_PadMMA100.py | {
"start": 427,
"end": 4931
} | class ____(LearnedHeuristicRegression):
def __init__(self) -> None:
pass
def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool:
return (
metadata.name == self.get_name()
and metadata.shared_memory == 166912
and str(metadata.device_capa) == "(8, 0)"
)
def get_feedback(self, context: AHContext, choice: Choice) -> float:
context.context_dict[CHOICE_COL] = choice
return self.predict(context)
def get_confidence_threshold(self) -> float:
return 1.7025303314066
def get_name(self) -> str:
return 'pad_mm'
def predict(self, context: AHContext) -> float:
if str(context.get_value('choice')) != 'pad':
if str(context.get_value('using_tf32')) != 'False':
if context.get_value('m*n') <= 4171264.0:
if context.get_value('m*k') <= 3999308.0:
return 1.8751469764071178
else:
if str(context.get_value('n_multiple_32')) != 'True':
return 0.9117231355626345
else:
return 1.1607689608873861
else:
if str(context.get_value('n_multiple_2')) != 'True':
if str(context.get_value('using_tf32')) != 'True':
return 0.7430382200435992
else:
return 0.8531269794448678
else:
if str(context.get_value('k_multiple_2')) != 'True':
return 0.7577181972719917
else:
return 0.8977349440424219
else:
if context.get_value('m*n') <= 1299712.0:
return 1.1669723418995592
else:
if context.get_value('mat2_stride_1') <= 45217.5:
if context.get_value('m*n') <= 55884158.0:
return 1.0262769936909601
else:
return 1.0022677428470845
else:
if context.get_value('m') <= 18478.0:
return 1.1127066261894312
else:
return 1.0337740659894263
else:
if str(context.get_value('mat1_dtype')) != 'torch.float32':
if str(context.get_value('n_multiple_2')) != 'False':
if str(context.get_value('k_multiple_2')) != 'True':
if context.get_value('mat1_stride_0') <= 561.0:
return 1.2900382135142956
else:
return 1.5761737616057887
else:
if context.get_value('num_dims_needs_padding') <= 1.5:
return 1.0472263310239422
else:
return 1.1727673465762514
else:
if context.get_value('k') <= 28238.5:
if context.get_value('k/(m*n)') <= 0.00026227018679492176:
return 1.6770542505397175
else:
return 1.3974785435105923
else:
if str(context.get_value('mat1_dtype')) != 'torch.bfloat16':
return 1.3952699800111992
else:
return 1.5759286511628336
else:
if str(context.get_value('using_tf32')) != 'False':
if context.get_value('m*n') <= 14119424.0:
return 0.8875772670422478
else:
if str(context.get_value('mat2_innermost_needs_padding')) != 'True':
return 1.1467728924377265
else:
return 1.215842963532998
else:
if context.get_value('arith_intensity') <= 396.8774871826172:
return 0.89940161869551
else:
if context.get_value('mat2_stride_1') <= 45217.5:
return 0.9964328169353532
else:
return 0.9493479238294826
| PadMMA100 |
python | pytorch__pytorch | torch/fx/experimental/proxy_tensor.py | {
"start": 64115,
"end": 65309
} | class ____(fx.proxy.GraphAppendingTracer):
script_object_tracker: MutableMapping[_AnyScriptObjectType, Proxy]
symnode_tracker: MutableMapping[PySymType, _PySymProxyType]
tensor_tracker: MutableMapping[Tensor, _ProxyTensor]
sympy_expr_tracker: dict[sympy.Symbol, _SympyExprTrackerValue]
torch_fn_metadata: Optional[OpOverload]
torch_fn_counts: dict[OpOverload, int]
enable_thunkify: bool = False
def __init__(self, graph: fx.graph.Graph) -> None:
super().__init__(graph)
self.symnode_tracker = weakref.WeakKeyDictionary()
self.tensor_tracker = WeakTensorKeyDictionary()
self.sympy_expr_tracker = {}
self.script_object_tracker = WeakIdKeyDictionary(
dict=None, ref_type=_WeakHashRef
)
# Stores the torch function that was called during tracing
self.torch_fn_metadata = None
# Stores the counts for every torch function called. This is to help
# distinguish between different calls to the same torch function.
self.torch_fn_counts = {}
# TODO: I'm not sure what the point of this class is; you can just
# make_fx through a regular Interpreter
| _GraphAppendingTracerEx |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/organization_code_mappings.py | {
"start": 1537,
"end": 5248
} | class ____(CamelSnakeModelSerializer):
repository_id = serializers.IntegerField(required=True)
project_id = serializers.IntegerField(required=True)
stack_root = gen_path_regex_field()
source_root = gen_path_regex_field()
default_branch = serializers.RegexField(
r"^(^(?![\/]))([\w\.\/-]+)(?<![\/])$",
required=False, # Validated in validate_default_branch based on integration type
allow_blank=True, # Perforce allows empty streams
error_messages={"invalid": _(BRANCH_NAME_ERROR_MESSAGE)},
)
instance: RepositoryProjectPathConfig | None
class Meta:
model = RepositoryProjectPathConfig
fields = [
"repository_id",
"project_id",
"stack_root",
"source_root",
"default_branch",
]
extra_kwargs: dict[str, Any] = {}
@property
def org_integration(self):
return self.context["organization_integration"]
@property
def organization(self):
return self.context["organization"]
def validate(self, attrs):
query = RepositoryProjectPathConfig.objects.filter(
project_id=attrs.get("project_id"), stack_root=attrs.get("stack_root")
)
if self.instance:
query = query.exclude(id=self.instance.id)
if query.exists():
raise serializers.ValidationError(
"Code path config already exists with this project and stack trace root"
)
return attrs
def validate_repository_id(self, repository_id):
# validate repo exists on this org
repo_query = Repository.objects.filter(
id=repository_id, organization_id=self.organization.id
)
# validate that repo exists on integration
repo_query = repo_query.filter(
integration_id=self.org_integration.integration_id,
)
if not repo_query.exists():
raise serializers.ValidationError("Repository does not exist")
return repository_id
def validate_project_id(self, project_id):
# validate project exists on this org
project_query = Project.objects.filter(id=project_id, organization_id=self.organization.id)
if not project_query.exists():
raise serializers.ValidationError("Project does not exist")
return project_id
def validate_default_branch(self, default_branch):
# Get the integration to check if it's Perforce
integration = integration_service.get_integration(
integration_id=self.org_integration.integration_id
)
# For Perforce, allow empty branch (streams are part of depot path)
# For other integrations, branch is required
if (
not default_branch
and integration
and integration.provider != IntegrationProviderSlug.PERFORCE
):
raise serializers.ValidationError("This field is required.")
return default_branch
def create(self, validated_data):
return RepositoryProjectPathConfig.objects.create(
organization_integration_id=self.org_integration.id,
organization_id=self.context["organization"].id,
integration_id=self.context["organization_integration"].integration_id,
**validated_data,
)
def update(self, instance, validated_data):
if "id" in validated_data:
validated_data.pop("id")
if self.instance:
for key, value in validated_data.items():
setattr(self.instance, key, value)
self.instance.save()
return self.instance
| RepositoryProjectPathConfigSerializer |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/functional.py | {
"start": 2119,
"end": 56661
} | class ____(training_lib.Model):
"""A `Functional` model is a `Model` defined as a directed graph of layers.
Three types of `Model` exist: subclassed `Model`, `Functional` model,
and `Sequential` (a special case of `Functional`).
In general, more Keras features are supported with `Functional`
than with subclassed `Model`s, specifically:
- Model cloning (`keras.models.clone`)
- Serialization (`model.get_config()/from_config`, `model.to_json()`
- Whole-model saving (`model.save()`)
A `Functional` model can be instantiated by passing two arguments to
`__init__`. The first argument is the `keras.Input` Tensors that represent
the inputs to the model. The second argument specifies the output
tensors that represent the outputs of this model. Both arguments can be a
nested structure of tensors.
Example:
```
inputs = {'x1': keras.Input(shape=(10,)), 'x2': keras.Input(shape=(1,))}
t = keras.layers.Dense(1, activation='relu')(inputs['x1'])
outputs = keras.layers.Add()([t, inputs['x2'])
model = keras.Model(inputs, outputs)
```
A `Functional` model constructed using the Functional API can also include raw
TensorFlow functions, with the exception of functions that create Variables
or assign ops.
Example:
```
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(1)(inputs)
outputs = tf.nn.relu(x)
model = keras.Model(inputs, outputs)
```
Args:
inputs: List of input tensors (must be created via `tf.keras.Input()`).
outputs: List of output tensors.
name: String, optional. Name of the model.
trainable: Boolean, optional. If the model's variables should be trainable.
"""
# See tf.Module for the usage of this property.
# The key of _layer_call_argspecs is a layer. tf.Module._flatten will fail to
# flatten the key since it is trying to convert Trackable/Layer to a string.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_layer_call_argspecs', '_compiled_trainable_state',
'_output_mask_cache', '_output_tensor_cache', '_output_shape_cache'),
training_lib.Model._TF_MODULE_IGNORED_PROPERTIES
))
@trackable.no_automatic_dependency_tracking
def __init__(self, inputs, outputs, name=None, trainable=True,
**kwargs):
# This is used by the Model class, since we have some logic to swap the
# class in the __new__ method, which will lead to __init__ get invoked
# twice. Using the skip_init to skip one of the invocation of __init__ to
# avoid any side effects
skip_init = kwargs.pop('skip_init', False)
if skip_init:
return
generic_utils.validate_kwargs(kwargs, {})
super(Functional, self).__init__(name=name, trainable=trainable)
self._init_graph_network(inputs, outputs)
@trackable.no_automatic_dependency_tracking
def _init_graph_network(self, inputs, outputs):
# This method is needed for Sequential to reinitialize graph network when
# layer is added or removed.
self._is_graph_network = True
# Normalize and set self.inputs, self.outputs.
if isinstance(inputs, list) and len(nest.flatten(inputs)) == 1:
inputs = inputs[0]
if isinstance(outputs, list) and len(nest.flatten(outputs)) == 1:
outputs = outputs[0]
self._nested_inputs = inputs
self._nested_outputs = outputs
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
# Models constructed with a single Tensor or list of Tensors can
# be called with a dict, where the keys of the dict are the names
# of the `Input` objects. Extra keys are ignored with warning.
if not nest.is_nested(self._nested_inputs):
self._enable_dict_to_input_mapping = True
elif (isinstance(self._nested_inputs, (list, tuple)) and
not any(nest.is_nested(t) for t in self._nested_inputs)):
self._enable_dict_to_input_mapping = True
elif (isinstance(self._nested_inputs, dict) and
not any(nest.is_nested(t) for t in self._nested_inputs.values())):
self._enable_dict_to_input_mapping = True
else:
self._enable_dict_to_input_mapping = False
if not ops.executing_eagerly_outside_functions():
if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs):
base_layer_utils.create_keras_history(self._nested_outputs)
self._validate_graph_inputs_and_outputs()
# A Network does not create weights of its own, thus it is already
# built.
self.built = True
self._build_input_shape = nest.map_structure(lambda x: x.shape, inputs)
self._compute_output_and_mask_jointly = True
# `_expects_training_arg` is True since the `training` argument is always
# present in the signature of the `call` method of a graph network.
self._expects_training_arg = True
self._expects_mask_arg = True
# A graph network does not autocast inputs, as its layers will cast them
# instead.
self._autocast = False
self._input_layers = []
self._output_layers = []
self._input_coordinates = []
self._output_coordinates = []
# This is for performance optimization when calling the Network on new
# inputs. Every time the Network is called on a set on input tensors,
# we compute the output tensors, output masks and output shapes in one pass,
# then cache them here. When any of these outputs is queried later, we
# retrieve it from there instead of recomputing it.
self._output_mask_cache = {}
self._output_tensor_cache = {}
self._output_shape_cache = {}
# Build self._output_layers:
for x in self.outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
self._output_layers.append(layer)
self._output_coordinates.append((layer, node_index, tensor_index))
# Build self._input_layers:
for x in self.inputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
# It's supposed to be an input layer, so only one node
# and one tensor output.
assert node_index == 0
assert tensor_index == 0
self._input_layers.append(layer)
self._input_coordinates.append((layer, node_index, tensor_index))
# Keep track of the network's nodes and layers.
nodes, nodes_by_depth, layers, _ = _map_graph_network(
self.inputs, self.outputs)
self._network_nodes = nodes
self._nodes_by_depth = nodes_by_depth
self._self_tracked_trackables = layers
self._layer_call_argspecs = {}
for layer in self._self_tracked_trackables:
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
# Build self.input_names and self.output_names.
self._set_output_names()
self.input_names = []
self._feed_input_names = []
self._feed_inputs = []
self._feed_input_shapes = []
for layer in self._input_layers:
self.input_names.append(layer.name)
if layer.is_placeholder:
self._feed_input_names.append(layer.name)
# Use batch_input_shape here because non-eager composite tensors may not
# have a shape attribute that's meaningful (sparse, for instance, has
# a tensor that's non-constant and needs to be fed). This means that
# input layers that create placeholders will need to have the
# batch_input_shape attr to allow for input shape validation.
self._feed_input_shapes.append(layer._batch_input_shape)
self._feed_inputs.append(layer.input)
self._compute_tensor_usage_count()
self._set_save_spec(self._nested_inputs)
tf_utils.assert_no_legacy_layers(self.layers)
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
return self._nested_inputs
@property
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
return nest.map_structure(backend.int_shape, self.input)
@property
def input_spec(self):
if hasattr(self, '_manual_input_spec'):
return self._manual_input_spec
if (isinstance(self._nested_inputs, (dict, list, tuple)) and
len(self._nested_inputs) != len(self.inputs)):
# Case where we have a nested structure.
# In such a case we can't safely run any checks.
return None
if isinstance(self._nested_inputs, dict):
# Case where `_nested_inputs` is a plain dict of Inputs.
names = sorted(self._nested_inputs.keys())
return [input_spec.InputSpec(
shape=shape_with_no_batch_size(self._nested_inputs[name]),
allow_last_axis_squeeze=True, name=name) for name in names]
else:
# Single input, or list / tuple of inputs.
# The data may be passed as a dict keyed by input name.
return [input_spec.InputSpec(
shape=shape_with_no_batch_size(x), allow_last_axis_squeeze=True,
name=x._keras_history.layer.name) for x in self.inputs]
@input_spec.setter
def input_spec(self, value):
self._manual_input_spec = value
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
return self._nested_outputs
@property
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
return nest.map_structure(backend.int_shape, self.output)
def _set_output_names(self):
"""Assigns unique names to the Network's outputs.
Output layers with multiple output tensors would otherwise lead to duplicate
names in self.output_names.
"""
uniquified = []
output_names = set()
prefix_count = {}
for layer in self._output_layers:
proposal = layer.name
while proposal in output_names:
existing_count = prefix_count.get(layer.name, 1)
proposal = '{}_{}'.format(layer.name, existing_count)
prefix_count[layer.name] = existing_count + 1
output_names.add(proposal)
uniquified.append(proposal)
self.output_names = uniquified
@property
def _layer_checkpoint_dependencies(self):
"""Dictionary of layer dependencies to be included in the checkpoint."""
weight_layer_index = 0
dependencies = collections.OrderedDict()
for layer_index, layer in enumerate(self.layers):
try:
if layer.weights:
# Keep a separate index for layers which have weights. This allows
# users to insert Layers without weights anywhere in the network
# without breaking checkpoints.
dependencies['layer_with_weights-%d' % weight_layer_index] = layer
weight_layer_index += 1
except ValueError:
# The layer might have weights, but may not be built yet. We just treat
# it as layer without weight.
pass
# Even if it doesn't have weights, we should still track everything in
# case it has/will have Trackable dependencies.
dependencies['layer-%d' % layer_index] = layer
return dependencies
def _trackable_children(self,
save_type=trackable.SaveType.CHECKPOINT,
**kwargs):
dependencies = self._layer_checkpoint_dependencies
dependencies.update(
super(Functional, self)._trackable_children(save_type, **kwargs))
return dependencies
def _lookup_dependency(self, name):
layer_dependencies = self._layer_checkpoint_dependencies
if name in layer_dependencies:
return layer_dependencies[name]
return super(Functional, self)._lookup_dependency(name)
def _handle_deferred_layer_dependencies(self, layers):
"""Handles layer checkpoint dependencies that are added after init."""
layer_checkpoint_dependencies = self._layer_checkpoint_dependencies
layer_to_name = {v: k for k, v in layer_checkpoint_dependencies.items()}
for layer in layers:
if layer in layer_to_name:
self._handle_deferred_dependencies(name=layer_to_name[layer],
trackable=layer)
@property
def _should_compute_mask(self):
return True
def compute_mask(self, inputs, mask):
# TODO(omalleyt): b/123540974 This function is not really safe to call
# by itself because it will duplicate any updates and losses in graph
# mode by `call`ing the Layers again.
output_tensors = self._run_internal_graph(inputs, mask=mask)
return nest.map_structure(lambda t: getattr(t, '_keras_mask', None),
output_tensors)
@doc_controls.do_not_doc_inheritable
def call(self, inputs, training=None, mask=None):
"""Calls the model on new inputs.
In this case `call` just reapplies
all ops in the graph to the new inputs
(e.g. build a new computational graph from the provided inputs).
Args:
inputs: A tensor or list of tensors.
training: Boolean or boolean scalar tensor, indicating whether to run
the `Network` in training mode or inference mode.
mask: A mask or list of masks. A mask can be
either a tensor or None (no mask).
Returns:
A tensor if there is a single output, or
a list of tensors if there are more than one outputs.
"""
return self._run_internal_graph(
inputs, training=training, mask=mask)
def compute_output_shape(self, input_shape):
# Convert any shapes in tuple format to TensorShapes.
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
if len(nest.flatten(input_shape)) != len(nest.flatten(self._input_layers)):
raise ValueError('Invalid input_shape argument ' + str(input_shape) +
': model has ' + str(len(self._input_layers)) +
' tensor inputs.')
# Use the tuple of TensorShape as the cache key, since tuple is hashable
# and can be used as hash key.
try:
cache_key = tuple(tf_utils.convert_shapes(input_shape, to_tuples=True))
if cache_key in self._output_shape_cache:
# Cache hit. Return shapes as TensorShapes.
return self._output_shape_cache[cache_key]
except ValueError:
# In case there are unknown TensorShape, eg for sparse tensor input,
# We skip the caching since the shape is unknown.
pass
layers_to_output_shapes = {}
for layer, shape in zip(self._input_layers, nest.flatten(input_shape)):
# It's an input layer: then `compute_output_shape` is identity,
# and there is only one node and one tensor..
shape_key = layer.name + '_0_0'
layers_to_output_shapes[shape_key] = shape
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Iterate over nodes, by depth level.
if len(depth_keys) > 1:
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
layer = node.layer
if layer in self._input_layers:
# We've already covered the input layers
# a few lines above.
continue
# Get the input shapes for the first argument of the node
layer_input_shapes = []
layer_inputs = node.call_args[0]
for layer_input in nest.flatten(layer_inputs):
kh = layer_input._keras_history
input_layer_key = kh.layer.name + '_%s_%s' % (kh.node_index,
kh.tensor_index)
layer_input_shapes.append(layers_to_output_shapes[input_layer_key])
layer_input_shapes = nest.pack_sequence_as(layer_inputs,
layer_input_shapes)
# Layers expect shapes to be tuples for `compute_output_shape`.
layer_input_shapes = tf_utils.convert_shapes(
layer_input_shapes, to_tuples=True)
layer_output_shapes = layer.compute_output_shape(layer_input_shapes)
# Convert back to TensorShapes.
layer_output_shapes = tf_utils.convert_shapes(
layer_output_shapes, to_tuples=False)
node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access
for j, shape in enumerate(nest.flatten(layer_output_shapes)):
shape_key = layer.name + '_%s_%s' % (node_index, j)
layers_to_output_shapes[shape_key] = shape
# Read final output shapes from layers_to_output_shapes.
output_shapes = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)
output_shapes.append(layers_to_output_shapes[shape_key])
output_shapes = nest.pack_sequence_as(self._nested_outputs, output_shapes)
# Store in cache.
self._output_shape_cache[cache_key] = output_shapes
# Return shapes as TensorShapes.
return output_shapes
def _init_set_name(self, name, zero_based=True):
if not name:
cls_name = self.__class__.__name__
if self.__class__ == Functional:
# Hide the functional class name from user, since its not a public
# visible class. Use "Model" instead,
cls_name = 'Model'
self._name = backend.unique_object_name(
generic_utils.to_snake_case(cls_name),
zero_based=zero_based)
else:
self._name = name
def _run_internal_graph(self, inputs, training=None, mask=None):
"""Computes output tensors for new inputs.
# Note:
- Can be run on non-Keras tensors.
Args:
inputs: Tensor or nested structure of Tensors.
training: Boolean learning phase.
mask: (Optional) Tensor or nested structure of Tensors.
Returns:
output_tensors
"""
inputs = self._flatten_to_reference_inputs(inputs)
if mask is None:
masks = [None] * len(inputs)
else:
masks = self._flatten_to_reference_inputs(mask)
for input_t, mask in zip(inputs, masks):
input_t._keras_mask = mask
# Dictionary mapping reference tensors to computed tensors.
tensor_dict = {}
tensor_usage_count = self._tensor_usage_count
for x, y in zip(self.inputs, inputs):
y = self._conform_to_reference_input(y, ref_input=x)
x_id = str(id(x))
tensor_dict[x_id] = [y] * tensor_usage_count[x_id]
nodes_by_depth = self._nodes_by_depth
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = nodes_by_depth[depth]
for node in nodes:
if node.is_input:
continue # Input tensors already exist.
if any(t_id not in tensor_dict for t_id in node.flat_input_ids):
continue # Node is not computable, try skipping.
args, kwargs = node.map_arguments(tensor_dict)
outputs = node.layer(*args, **kwargs)
# Update tensor_dict.
for x_id, y in zip(node.flat_output_ids, nest.flatten(outputs)):
tensor_dict[x_id] = [y] * tensor_usage_count[x_id]
output_tensors = []
for x in self.outputs:
x_id = str(id(x))
assert x_id in tensor_dict, 'Could not compute output ' + str(x)
output_tensors.append(tensor_dict[x_id].pop())
return nest.pack_sequence_as(self._nested_outputs, output_tensors)
def _flatten_to_reference_inputs(self, tensors):
"""Maps `tensors` to their respective `keras.Input`."""
if self._enable_dict_to_input_mapping and isinstance(tensors, dict):
ref_inputs = self._nested_inputs
if not nest.is_nested(ref_inputs):
ref_inputs = [self._nested_inputs]
if isinstance(ref_inputs, dict):
# In the case that the graph is constructed with dict input tensors,
# We will use the original dict key to map with the keys in the input
# data. Note that the model.inputs is using nest.flatten to process the
# input tensors, which means the dict input tensors are ordered by their
# keys.
ref_input_names = sorted(ref_inputs.keys())
else:
ref_input_names = [inp._keras_history.layer.name for inp in ref_inputs]
# Raise an warning if there are more input data comparing to input tensor
if len(tensors) > len(ref_input_names):
warnings.warn(
'Input dict contained keys {} which did not match any model input. '
'They will be ignored by the model.'.format(
[n for n in tensors.keys() if n not in ref_input_names])
)
try:
# Flatten in the order `Input`s were passed during Model construction.
return [tensors[n] for n in ref_input_names]
except KeyError:
# TODO(b/151582614)
return nest.flatten(tensors)
# Otherwise both self.inputs and tensors will already be in same order.
return nest.flatten(tensors)
def _conform_to_reference_input(self, tensor, ref_input):
"""Set shape and dtype based on `keras.Input`s."""
if isinstance(tensor, tensor_lib.Tensor):
# Allow (None,) and (None, 1) Tensors to be passed interchangeably. Use
# the shape specified by the `keras.Input`.
t_shape = tensor.shape
t_rank = t_shape.rank
ref_shape = ref_input.shape
ref_rank = ref_shape.rank
keras_history = getattr(tensor, '_keras_history', None)
if t_rank is not None and ref_rank is not None:
# Should squeeze last dimension.
# True if tensor is (BATCH, ..., 1) and reference is (BATCH, ...).
if (t_rank == ref_rank + 1 and t_shape[-1] == 1):
tensor = array_ops.squeeze_v2(tensor, axis=-1)
# Should expand last_dimension.
# True if tensor is (BATCH, ...) and reference is (BATCH, ..., 1).
elif (t_rank == ref_rank - 1 and ref_shape[-1] == 1):
tensor = array_ops.expand_dims_v2(tensor, axis=-1)
if keras_history is not None: # Restore keras history.
tensor._keras_history = keras_history
# Add shape hints to Tensors that may have None shape dims but have shapes
# defined by the `keras.Input` (not applicable in eager mode).
if not context.executing_eagerly():
try:
tensor.set_shape(tensor.shape.merge_with(ref_input.shape))
except ValueError:
logging.warning(
'Model was constructed with shape {} for input {}, but it was '
'called on an input with incompatible shape {}.'.format(
ref_input.shape, ref_input, tensor.shape))
# Dtype casting.
tensor = math_ops.cast(tensor, dtype=ref_input.dtype)
elif tf_utils.is_extension_type(tensor):
# Dtype casting (If the extension type has a non-variant dtype and
# supports being cast)
ref_input_dtype = getattr(ref_input, 'dtype', None)
if ref_input_dtype is not None and ref_input_dtype != dtypes.variant:
tensor = math_ops.cast(tensor, dtype=ref_input_dtype)
return tensor
def get_config(self):
return copy.deepcopy(get_network_config(self))
@classmethod
def from_config(cls, config, custom_objects=None):
"""Instantiates a Model from its config (output of `get_config()`).
Args:
config: Model config dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A model instance.
Raises:
ValueError: In case of improperly formatted config dict.
"""
with generic_utils.SharedObjectLoadingScope():
input_tensors, output_tensors, created_layers = reconstruct_from_config(
config, custom_objects)
model = cls(inputs=input_tensors, outputs=output_tensors,
name=config.get('name'))
connect_ancillary_layers(model, created_layers)
return model
def _validate_graph_inputs_and_outputs(self):
"""Validates the inputs and outputs of a Graph Network."""
# Check for redundancy in inputs.
if len({id(i) for i in self.inputs}) != len(self.inputs):
raise ValueError('The list of inputs passed to the model '
'is redundant. '
'All inputs should only appear once.'
' Found: ' + str(self.inputs))
for x in self.inputs:
# Check that x has appropriate `_keras_history` metadata.
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Input tensors to a ' + cls_name + ' ' +
'must come from `tf.keras.Input`. '
'Received: ' + str(x) +
' (missing previous layer metadata).')
# Check that x is an input tensor.
# pylint: disable=protected-access
layer = x._keras_history.layer
if len(layer._inbound_nodes) > 1 or (
layer._inbound_nodes and not layer._inbound_nodes[0].is_input):
cls_name = self.__class__.__name__
logging.warning(cls_name + ' model inputs must come from '
'`tf.keras.Input` (thus holding past layer metadata), '
'they cannot be the output of '
'a previous non-Input layer. '
'Here, a tensor specified as '
'input to "' + self.name + '" was not an Input tensor, '
'it was generated by layer ' + layer.name + '.\n'
'Note that input tensors are '
'instantiated via `tensor = tf.keras.Input(shape)`.\n'
'The tensor that caused the issue was: ' + str(x.name))
# Check compatibility of batch sizes of Input Layers.
input_batch_sizes = [
training_utils.get_static_batch_size(x._keras_history.layer)
for x in self.inputs
]
consistent_batch_size = None
for batch_size in input_batch_sizes:
if batch_size is not None:
if (consistent_batch_size is not None and
batch_size != consistent_batch_size):
raise ValueError('The specified batch sizes of the Input Layers'
' are incompatible. Found batch sizes: {}'.format(
input_batch_sizes))
consistent_batch_size = batch_size
for x in self.outputs:
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Output tensors of a ' + cls_name + ' model must be '
'the output of a TensorFlow `Layer` '
'(thus holding past layer metadata). Found: ' + str(x))
def _insert_layers(self, layers, relevant_nodes=None):
"""Inserts Layers into the Network after Network creation.
This is only valid for Keras Graph Networks. Layers added via this function
will be included in the `call` computation and `get_config` of this Network.
They will not be added to the Network's outputs.
Args:
layers: Arbitrary nested structure of Layers. Layers must be reachable
from one or more of the `keras.Input` Tensors that correspond to this
Network's inputs.
relevant_nodes: Nodes from the Layers that should be considered part of
this Network. If `None`, all Nodes will be considered part of this
Network.
Raises:
ValueError: If the layers depend on `Input`s not found in this Model.
"""
layers = nest.flatten(layers)
tf_utils.assert_no_legacy_layers(layers)
node_to_depth = {}
for depth, nodes in self._nodes_by_depth.items():
node_to_depth.update({node: depth for node in nodes})
# The nodes of these Layers that are relevant to this Network. If not
# provided, assume all Nodes are relevant
if not relevant_nodes:
relevant_nodes = nest.flatten([layer._inbound_nodes for layer in layers])
network_nodes = set(relevant_nodes + list(node_to_depth.keys()))
def _get_min_depth(node):
"""Gets the minimum depth at which node can be computed."""
min_depth = 0
for layer, node_id, _, _ in node.iterate_inbound():
inbound_node = layer._inbound_nodes[node_id]
if inbound_node in node_to_depth:
min_depth = min(min_depth, node_to_depth[inbound_node])
elif inbound_node not in network_nodes:
continue
else:
# Previous relevant nodes haven't been processed yet.
return None
# New node is one shallower than its shallowest input.
return min_depth - 1
# Insert nodes into `_nodes_by_depth` and other node attrs.
unprocessed_nodes = copy.copy(relevant_nodes)
i = 0
while unprocessed_nodes:
i += 1
# Do a sanity check. This can occur if `Input`s from outside this Model
# are being relied on.
if i > 10000:
raise ValueError('Layers could not be added due to missing '
'dependencies.')
node = unprocessed_nodes.pop(0)
depth = _get_min_depth(node)
if depth is None: # Defer until inbound nodes are processed.
unprocessed_nodes.append(node)
continue
node_key = _make_node_key(node.layer.name,
node.layer._inbound_nodes.index(node))
if node_key not in self._network_nodes:
node_to_depth[node] = depth
self._network_nodes.add(node_key)
self._nodes_by_depth[depth].append(node)
# Insert layers and update other layer attrs.
layer_set = set(self._self_tracked_trackables)
deferred_layers = []
for layer in layers:
if layer not in layer_set:
self._self_tracked_trackables.append(layer)
deferred_layers.append(layer)
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
layer_set.add(layer)
self._handle_deferred_layer_dependencies(deferred_layers)
self._compute_tensor_usage_count()
def _compute_tensor_usage_count(self):
"""Compute the #. of tensor usages for all the output tensors of layers.
The computed tensor usage count is saved as `self._tensor_usage_count`. This
is later used for saving memory in eager computation by releasing
no-longer-needed tensors as early as possible.
"""
tensor_usage_count = collections.Counter()
available_tensors = set(str(id(tensor)) for tensor in self.inputs)
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
depth_keys = depth_keys[1:]
for depth in depth_keys:
for node in self._nodes_by_depth[depth]:
input_tensors = {
str(id(tensor)) for tensor in nest.flatten(node.keras_inputs)
}
if input_tensors.issubset(available_tensors):
for tensor in nest.flatten(node.keras_inputs):
tensor_usage_count[str(id(tensor))] += 1
for output_tensor in nest.flatten(node.outputs):
available_tensors.add(str(id(output_tensor)))
for tensor in self.outputs:
tensor_usage_count[str(id(tensor))] += 1
self._tensor_usage_count = tensor_usage_count
def _assert_weights_created(self):
# Override the implementation in Model.
# The Functional model should always have weight created already.
return
def _graph_network_add_loss(self, symbolic_loss):
new_nodes, new_layers = _map_subgraph_network(self.inputs, [symbolic_loss])
# Losses must be keyed on inputs no matter what in order to be supported in
# DistributionStrategy.
add_loss_layer = base_layer.AddLoss(
unconditional=False, dtype=symbolic_loss.dtype)
add_loss_layer(symbolic_loss)
new_nodes.extend(add_loss_layer.inbound_nodes)
new_layers.append(add_loss_layer)
self._insert_layers(new_layers, new_nodes)
def _graph_network_add_metric(self, value, aggregation, name):
new_nodes, new_layers = _map_subgraph_network(self.inputs, [value])
add_metric_layer = base_layer.AddMetric(
aggregation, name, dtype=value.dtype)
add_metric_layer(value)
new_nodes.extend(add_metric_layer.inbound_nodes)
new_layers.append(add_metric_layer)
self._insert_layers(new_layers, new_nodes)
@property
def _trackable_saved_model_saver(self):
return network_serialization.NetworkSavedModelSaver(self)
def _get_save_spec(self, dynamic_batch=True):
if getattr(self, '_has_explicit_input_shape', True):
# Functional models and Sequential models that have an explicit input
# shape should use the batch size set by the input layer.
dynamic_batch = False
return super(Functional, self)._get_save_spec(dynamic_batch)
def _make_node_key(layer_name, node_index):
return layer_name + '_ib-' + str(node_index)
def _map_graph_network(inputs, outputs):
"""Validates a network's topology and gather its layers and nodes.
Args:
inputs: List of input tensors.
outputs: List of outputs tensors.
Returns:
A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.
- nodes: list of Node instances.
- nodes_by_depth: dict mapping ints (depth) to lists of node instances.
- layers: list of Layer instances.
- layers_by_depth: dict mapping ints (depth) to lists of layer instances.
Raises:
ValueError: In case the network is not valid (e.g. disconnected graph).
"""
# "depth" is number of layers between output Node and the Node.
# Nodes are ordered from inputs -> outputs.
nodes_in_decreasing_depth, layer_indices = _build_map(outputs)
network_nodes = {
_make_node_key(node.layer.name, node.layer._inbound_nodes.index(node))
for node in nodes_in_decreasing_depth
}
nodes_depths = {} # dict {node: depth value}
layers_depths = {} # dict {layer: depth value}
for node in reversed(nodes_in_decreasing_depth):
# If the depth is not set, the node has no outbound nodes (depth 0).
depth = nodes_depths.setdefault(node, 0)
# Update the depth of the corresponding layer
previous_depth = layers_depths.get(node.layer, 0)
# If we've seen this layer before at a higher depth,
# we should use that depth instead of the node depth.
# This is necessary for shared layers that have inputs at different
# depth levels in the graph.
depth = max(depth, previous_depth)
layers_depths[node.layer] = depth
nodes_depths[node] = depth
# Update the depth of inbound nodes.
# The "depth" of a node is the max of the depths
# of all nodes it is connected to + 1.
for node_dep in node.parent_nodes:
previous_depth = nodes_depths.get(node_dep, 0)
nodes_depths[node_dep] = max(depth + 1, previous_depth)
# Handle inputs that are not connected to outputs.
# We do not error out here because the inputs may be used to compute losses
# and metrics.
for input_t in inputs:
input_layer = input_t._keras_history[0]
if input_layer not in layers_depths:
layers_depths[input_layer] = 0
layer_indices[input_layer] = -1
nodes_depths[input_layer._inbound_nodes[0]] = 0
network_nodes.add(_make_node_key(input_layer.name, 0))
# Build a dict {depth: list of nodes with this depth}
nodes_by_depth = collections.defaultdict(list)
for node, depth in nodes_depths.items():
nodes_by_depth[depth].append(node)
# Build a dict {depth: list of layers with this depth}
layers_by_depth = collections.defaultdict(list)
for layer, depth in layers_depths.items():
layers_by_depth[depth].append(layer)
# Get sorted list of layer depths.
depth_keys = list(layers_by_depth.keys())
depth_keys.sort(reverse=True)
# Set self.layers ordered by depth.
layers = []
for depth in depth_keys:
layers_for_depth = layers_by_depth[depth]
# Network.layers needs to have a deterministic order:
# here we order them by traversal order.
layers_for_depth.sort(key=lambda x: layer_indices[x])
layers.extend(layers_for_depth)
# Get sorted list of node depths.
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Check that all tensors required are computable.
# computable_tensors: all tensors in the graph
# that can be computed from the inputs provided.
computable_tensors = set()
for x in inputs:
computable_tensors.add(id(x))
layers_with_complete_input = [] # To provide a better error msg.
for depth in depth_keys:
for node in nodes_by_depth[depth]:
layer = node.layer
if layer and not node.is_input:
for x in nest.flatten(node.keras_inputs):
if id(x) not in computable_tensors:
raise ValueError('Graph disconnected: '
'cannot obtain value for tensor ' + str(x) +
' at layer "' + layer.name + '". '
'The following previous layers '
'were accessed without issue: ' +
str(layers_with_complete_input))
for x in nest.flatten(node.outputs):
computable_tensors.add(id(x))
layers_with_complete_input.append(layer.name)
# Ensure name unicity, which will be crucial for serialization
# (since serialized nodes refer to layers by their name).
all_names = [layer.name for layer in layers]
for name in all_names:
if all_names.count(name) != 1:
raise ValueError('The name "' + name + '" is used ' +
str(all_names.count(name)) + ' times in the model. '
'All layer names should be unique.')
return network_nodes, nodes_by_depth, layers, layers_by_depth
def _build_map(outputs):
"""This method topologically sorts nodes in order from inputs to outputs.
It uses a depth-first search to topologically sort nodes that appear in the
_keras_history connectivity metadata of `outputs`.
Args:
outputs: the output tensors whose _keras_history metadata should be walked.
This may be an arbitrary nested structure.
Returns:
A tuple like (ordered_nodes, layer_to_first_traversal_index)
ordered_nodes: list of nodes appearing in the keras history, topologically
sorted from original inputs to the `outputs`.
(If outputs have different sets of ancestors, the inputs to one output
may appear after a different output).
layer_to_first_traversal_index:
A dict mapping layer to the traversal index in the DFS where it is
seen. Note: if a layer is shared by several nodes, the dict will only
store the index corresponding to the *first* time the layer seen.
"""
finished_nodes = set()
nodes_in_progress = set()
nodes_in_decreasing_depth = [] # nodes from inputs -> outputs.
layer_indices = {} # layer -> in traversal order.
for output in nest.flatten(outputs):
_build_map_helper(output, finished_nodes, nodes_in_progress,
nodes_in_decreasing_depth, layer_indices)
return nodes_in_decreasing_depth, layer_indices
def _build_map_helper(tensor, finished_nodes, nodes_in_progress,
nodes_in_decreasing_depth, layer_indices):
"""Recursive helper for `_build_map`."""
layer, node_index, _ = tensor._keras_history # pylint: disable=protected-access
node = layer._inbound_nodes[node_index] # pylint: disable=protected-access
# Don't repeat work for shared subgraphs
if node in finished_nodes:
return
# Prevent cycles.
if node in nodes_in_progress:
raise ValueError('The tensor ' + str(tensor) + ' at layer "' + layer.name +
'" is part of a cycle.')
# Store the traversal order for layer sorting.
if layer not in layer_indices:
layer_indices[layer] = len(layer_indices)
# Propagate to all previous tensors connected to this node.
nodes_in_progress.add(node)
if not node.is_input:
for tensor in node.keras_inputs:
_build_map_helper(tensor, finished_nodes, nodes_in_progress,
nodes_in_decreasing_depth, layer_indices)
finished_nodes.add(node)
nodes_in_progress.remove(node)
nodes_in_decreasing_depth.append(node)
def _map_subgraph_network(inputs, outputs):
"""Returns the nodes and layers in the topology from `inputs` to `outputs`.
Args:
inputs: List of input tensors.
outputs: List of output tensors.
Returns:
A tuple of List{Node] and List[Layer].
"""
if not ops.executing_eagerly_outside_functions():
base_layer_utils.create_keras_history(outputs)
# Keep only nodes and layers in the topology between inputs and outputs.
_, nodes_by_depth, layers, _ = _map_graph_network(inputs, outputs)
return nest.flatten([nodes for nodes in nodes_by_depth.values()]), layers
def _should_skip_first_node(layer):
"""Returns True if the first layer node should not be saved or loaded."""
# Networks that are constructed with an Input layer/shape start with a
# pre-existing node linking their input to output. This node is excluded from
# the network config.
if layer._self_tracked_trackables:
return (isinstance(layer, Functional) and
# Filter out Sequential models without an input shape.
isinstance(layer._self_tracked_trackables[0],
input_layer_module.InputLayer))
else:
return isinstance(layer, Functional)
def connect_ancillary_layers(model, created_layers):
"""Adds layers that are not connected to the outputs to the model."""
# Layers not connected to outputs, such as those added in `add_loss`.
ancillary_layers = [
layer for layer in created_layers.values() if layer not in model.layers
]
if ancillary_layers:
relevant_nodes = nest.flatten([
layer.inbound_nodes[1:]
if _should_skip_first_node(layer) else layer.inbound_nodes
for layer in created_layers.values()
])
model._insert_layers(ancillary_layers, relevant_nodes)
return model
def reconstruct_from_config(config, custom_objects=None, created_layers=None):
"""Reconstructs graph from config object.
Args:
config: Dictionary returned from Network.get_config()
custom_objects: Optional dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization.
created_layers: Optional dictionary mapping names to Layer objects. Any
layer not in this dictionary will be created and added to the dict.
This function will add new nodes to all layers (excluding InputLayers),
instead of re-using pre-existing nodes in the layers.
Returns:
Tuple of (input tensors, output tensors, dictionary of created layers)
"""
# Layer instances created during the graph reconstruction process.
created_layers = created_layers or collections.OrderedDict()
# Maps input data (tuple of inbound layer name, node index) from the config
# to node indices in the newly generated model. The node indices may be
# different if the layers have already been called previously.
node_index_map = {}
node_count_by_layer = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def get_node_index(layer, config_node_index):
"""Returns node index in layer (might differ from config_node_index)."""
if isinstance(layer, input_layer_module.InputLayer):
return 0
return node_index_map.get((layer.name, config_node_index), None)
def _deserialize_keras_tensors(kwargs, layer_map):
"""Deserializes Keras Tensors passed to `call`.."""
def _deserialize_keras_tensor(t):
"""Deserializes a single Keras Tensor passed to `call`."""
if isinstance(t, tf_utils.ListWrapper):
t = t.as_list()
layer_name = t[0]
node_index = t[1]
tensor_index = t[2]
layer = layer_map[layer_name]
new_node_index = get_node_index(layer, node_index)
if new_node_index is None:
# The inbound node may not have been processed yet,
# (This can happen e.g. if it depends on a different set
# of inputs than those that have been processed already).
# raise an IndexError so that the current node puts itself
# back on the unprocessed queue.
# Caution: This may lead to infinite loops for malformed
# network configurations! (or when there is a bug in
# the network config loading code).
raise IndexError
node = layer._inbound_nodes[new_node_index]
return nest.flatten(node.outputs)[tensor_index]
return t
kwargs = tf_utils.convert_inner_node_data(kwargs, wrap=True)
return nest.map_structure(_deserialize_keras_tensor, kwargs)
def process_node(layer, node_data):
"""Deserialize a node.
Args:
layer: layer instance.
node_data: Nested structure of `ListWrapper`.
Raises:
ValueError: In case of improperly formatted `node_data`.
"""
input_tensors = []
for input_data in nest.flatten(node_data):
input_data = input_data.as_list()
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
try:
kwargs = _deserialize_keras_tensors(kwargs, created_layers)
except IndexError:
# Happens if keras tensors in kwargs are still unprocessed
add_unprocessed_node(layer, node_data)
return
else:
raise ValueError('Improperly formatted model config.')
if inbound_layer_name != node_module._CONSTANT_VALUE:
inbound_layer = created_layers[inbound_layer_name]
inbound_node_index = get_node_index(inbound_layer, inbound_node_index)
if inbound_node_index is None:
add_unprocessed_node(layer, node_data)
return
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(
nest.flatten(inbound_node.outputs)[inbound_tensor_index])
else:
# We received a constant w/ no Keras history attached
input_tensors.append(inbound_tensor_index)
input_tensors = nest.pack_sequence_as(node_data, input_tensors)
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
if input_tensors is not None:
if not layer._preserve_input_structure_in_config:
input_tensors = (
base_layer_utils.unnest_if_single_tensor(input_tensors))
output_tensors = layer(input_tensors, **kwargs)
# Update node index map.
output_index = nest.flatten(output_tensors)[0]._keras_history.node_index
node_index_map[(layer.name, node_count_by_layer[layer])] = output_index
node_count_by_layer[layer] += 1
def process_layer(layer_data):
"""Deserializes a layer, then call it on appropriate inputs.
Args:
layer_data: layer config dict.
Raises:
ValueError: In case of improperly formatted `layer_data` dict.
"""
layer_name = layer_data['name']
if layer_name in created_layers:
layer = created_layers[layer_name]
else:
# Instantiate layer.
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(layer_data, custom_objects=custom_objects)
created_layers[layer_name] = layer
node_count_by_layer[layer] = int(_should_skip_first_node(layer))
# Gather layer inputs and convert to `ListWrapper` objects.
inbound_nodes_data = layer_data['inbound_nodes']
inbound_nodes_data = tf_utils.convert_inner_node_data(
inbound_nodes_data, wrap=True)
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
# on the fly because the inbound node may not yet exist,
# in case of layer shared at different topological depths
# (e.g. a model such as A(B(A(B(x)))))
add_unprocessed_node(layer, node_data)
# First, we create all layers and enqueue nodes to be processed
for layer_data in config['layers']:
process_layer(layer_data)
# Then we process nodes in order of layer depth.
# Nodes that cannot yet be processed (if the inbound node
# does not yet exist) are re-enqueued, and the process
# is repeated until all nodes are processed.
while unprocessed_nodes:
for layer_data in config['layers']:
layer = created_layers[layer_data['name']]
if layer in unprocessed_nodes:
for node_data in unprocessed_nodes.pop(layer):
process_node(layer, node_data)
input_tensors = []
output_tensors = []
input_layers = tf_utils.convert_inner_node_data(
config['input_layers'], wrap=True)
for layer_data in nest.flatten(input_layers):
layer_name, node_index, tensor_index = layer_data.as_list()
assert layer_name in created_layers
layer = created_layers[layer_name]
node_index = get_node_index(layer, node_index)
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
input_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])
output_layers = tf_utils.convert_inner_node_data(
config['output_layers'], wrap=True)
for layer_data in nest.flatten(output_layers):
layer_name, node_index, tensor_index = layer_data.as_list()
assert layer_name in created_layers
layer = created_layers[layer_name]
node_index = get_node_index(layer, node_index)
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
output_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])
input_tensors = nest.pack_sequence_as(input_layers, input_tensors)
output_tensors = nest.pack_sequence_as(output_layers, output_tensors)
return input_tensors, output_tensors, created_layers
def get_network_config(network, serialize_layer_fn=None):
"""Builds the config, which consists of the node graph and serialized layers.
Args:
network: A Network object.
serialize_layer_fn: Function used to serialize layers.
Returns:
Config dictionary.
"""
serialize_layer_fn = (
serialize_layer_fn or generic_utils.serialize_keras_object)
config = {
'name': network.name,
}
node_conversion_map = {}
for layer in network.layers:
kept_nodes = 1 if _should_skip_first_node(layer) else 0
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in network._network_nodes:
node_conversion_map[node_key] = kept_nodes
kept_nodes += 1
layer_configs = []
with generic_utils.SharedObjectSavingScope():
for layer in network.layers: # From the earliest layers on.
filtered_inbound_nodes = []
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in network._network_nodes and not node.is_input:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
node_data = node.serialize(_make_node_key, node_conversion_map)
filtered_inbound_nodes.append(node_data)
layer_config = serialize_layer_fn(layer)
layer_config['name'] = layer.name
layer_config['inbound_nodes'] = filtered_inbound_nodes
layer_configs.append(layer_config)
config['layers'] = layer_configs
# Gather info about inputs and outputs.
model_inputs = []
for i in range(len(network._input_layers)):
layer, node_index, tensor_index = network._input_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in network._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_inputs.append(
tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))
model_inputs = nest.pack_sequence_as(network._nested_inputs, model_inputs)
# Preserve external Keras compat for Models with single input.
if not nest.is_nested(model_inputs):
model_inputs = [model_inputs]
model_inputs = tf_utils.convert_inner_node_data(model_inputs)
config['input_layers'] = model_inputs
model_outputs = []
for i in range(len(network._output_layers)):
layer, node_index, tensor_index = network._output_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in network._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_outputs.append(
tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))
model_outputs = nest.pack_sequence_as(network._nested_outputs, model_outputs)
# Preserve external Keras compat for Models with single output.
if not nest.is_nested(model_outputs):
model_outputs = [model_outputs]
model_outputs = tf_utils.convert_inner_node_data(model_outputs)
config['output_layers'] = model_outputs
return config
def shape_with_no_batch_size(x):
if x.shape.rank is None:
return None
shape = x.shape.as_list()
if shape:
shape[0] = None
return shape
| Functional |
python | joke2k__faker | faker/providers/internet/es_ES/__init__.py | {
"start": 46,
"end": 481
} | class ____(InternetProvider):
safe_email_tlds = ("com", "net", "es", "es")
tlds = ("com", "com", "com", "net", "org", "es", "es", "es")
replacements = (
("à", "a"),
("â", "a"),
("ã", "a"),
("á", "a"),
("ç", "c"),
("é", "e"),
("ê", "e"),
("í", "i"),
("ô", "o"),
("ö", "o"),
("õ", "o"),
("ó", "o"),
("ú", "u"),
)
| Provider |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.