language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | jazzband__django-oauth-toolkit | tests/test_scopes.py | {
"start": 11832,
"end": 16043
} | class ____(BaseTest):
def get_access_token(self, scopes):
self.oauth2_settings.PKCE_REQUIRED = False
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": scopes,
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
return content["access_token"]
def test_improperly_configured(self):
self.oauth2_settings.SCOPES = {"scope1": "Scope 1"}
request = self.factory.get("/fake")
view = ReadWriteResourceView.as_view()
self.assertRaises(ImproperlyConfigured, view, request)
self.oauth2_settings.SCOPES = {"read": "Read Scope", "write": "Write Scope"}
self.oauth2_settings.READ_SCOPE = "ciccia"
view = ReadWriteResourceView.as_view()
self.assertRaises(ImproperlyConfigured, view, request)
def test_properly_configured(self):
self.oauth2_settings.SCOPES = {"scope1": "Scope 1"}
request = self.factory.get("/fake")
view = ReadWriteResourceView.as_view()
self.assertRaises(ImproperlyConfigured, view, request)
self.oauth2_settings.SCOPES = {"read": "Read Scope", "write": "Write Scope"}
self.oauth2_settings.READ_SCOPE = "ciccia"
view = ReadWriteResourceView.as_view()
self.assertRaises(ImproperlyConfigured, view, request)
def test_has_read_scope(self):
access_token = self.get_access_token("read")
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ReadWriteResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a read protected resource")
def test_no_read_scope(self):
access_token = self.get_access_token("scope1")
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ReadWriteResourceView.as_view()
response = view(request)
self.assertEqual(response.status_code, 403)
def test_has_write_scope(self):
access_token = self.get_access_token("write")
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.post("/fake-resource", **auth_headers)
request.user = self.test_user
view = ReadWriteResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a write protected resource")
def test_no_write_scope(self):
access_token = self.get_access_token("scope1")
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.post("/fake-resource", **auth_headers)
request.user = self.test_user
view = ReadWriteResourceView.as_view()
response = view(request)
self.assertEqual(response.status_code, 403)
| TestReadWriteScope |
python | django__django | tests/timezones/models.py | {
"start": 401,
"end": 538
} | class ____(models.Model):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
| Timestamp |
python | redis__redis-py | tests/test_parsers/test_errors.py | {
"start": 4062,
"end": 5939
} | class ____:
def setup_method(self):
"""Set up test fixtures with mocked sockets."""
self.mock_sockets = []
self.original_socket = socket.socket
# Mock socket creation to return our mock sockets
def mock_socket_factory(*args, **kwargs):
mock_sock = MockSocket()
self.mock_sockets.append(mock_sock)
return mock_sock
self.socket_patcher = patch("socket.socket", side_effect=mock_socket_factory)
self.socket_patcher.start()
# Mock select.select to simulate data availability for reading
def mock_select(rlist, wlist, xlist, timeout=0):
# Check if any of the sockets in rlist have data available
ready_sockets = []
for sock in rlist:
if hasattr(sock, "connected") and sock.connected and not sock.closed:
# Only return socket as ready if it actually has data to read
if hasattr(sock, "pending_responses") and sock.pending_responses:
ready_sockets.append(sock)
# Don't return socket as ready just because it received commands
# Only when there are actual responses available
return (ready_sockets, [], [])
self.select_patcher = patch("select.select", side_effect=mock_select)
self.select_patcher.start()
def teardown_method(self):
"""Clean up test fixtures."""
self.socket_patcher.stop()
self.select_patcher.stop()
@pytest.mark.parametrize("protocol_version", [2, 3])
def test_external_auth_provider_error(self, protocol_version):
client = Redis(
protocol=protocol_version,
)
client.set("hello", "world")
with pytest.raises(ExternalAuthProviderError):
client.get("ldap_error")
| TestErrorParsing |
python | sympy__sympy | sympy/functions/special/hyper.py | {
"start": 32285,
"end": 33055
} | class ____(HyperRep):
""" Represent hyper([1, 1], [3/2], z) == asin(sqrt(z))/sqrt(z)/sqrt(1-z). """
# TODO this can be nicer
@classmethod
def _expr_small(cls, z):
return HyperRep_asin1._expr_small(z) \
/HyperRep_power1._expr_small(S.Half, z)
@classmethod
def _expr_small_minus(cls, z):
return HyperRep_asin1._expr_small_minus(z) \
/HyperRep_power1._expr_small_minus(S.Half, z)
@classmethod
def _expr_big(cls, z, n):
return HyperRep_asin1._expr_big(z, n) \
/HyperRep_power1._expr_big(S.Half, z, n)
@classmethod
def _expr_big_minus(cls, z, n):
return HyperRep_asin1._expr_big_minus(z, n) \
/HyperRep_power1._expr_big_minus(S.Half, z, n)
| HyperRep_asin2 |
python | fastai__fastai | fastai/layers.py | {
"start": 24086,
"end": 24501
} | class ____(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return _swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
return _swish_jit_bwd(x, grad_output)
# %% ../nbs/01_layers.ipynb 160
def swish(x, inplace=False): F.silu(x, inplace=inplace)
# %% ../nbs/01_layers.ipynb 161
| _SwishJitAutoFn |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 6727,
"end": 9392
} | class ____(GroupByApplyConcatApply, GroupByBase):
"""Single groupby aggregation
This is an abstract class. Sub-classes must implement
the following methods:
- `groupby_chunk`: Applied to each group within
the `chunk` method of `GroupByApplyConcatApply`
- `groupby_aggregate`: Applied to each group within
the `aggregate` method of `GroupByApplyConcatApply`
Parameters
----------
frame: Expr
Dataframe- or series-like expression to group.
by: str, list or Series
The key for grouping
observed:
Passed through to dataframe backend.
dropna:
Whether rows with NA values should be dropped.
chunk_kwargs:
Key-word arguments to pass to `groupby_chunk`.
aggregate_kwargs:
Key-word arguments to pass to `aggregate_chunk`.
"""
_parameters = [
"frame",
"observed",
"dropna",
"chunk_kwargs",
"aggregate_kwargs",
"_slice",
"split_every",
"split_out",
"sort",
"shuffle_method",
]
_defaults = {
"observed": None,
"dropna": None,
"chunk_kwargs": None,
"aggregate_kwargs": None,
"_slice": None,
"split_every": 8,
"split_out": None,
"sort": None,
"shuffle_method": None,
}
groupby_chunk: Callable | None = None
groupby_aggregate: Callable | None = None
@classmethod
def chunk(cls, df, *by, **kwargs):
return _apply_chunk(df, *by, **kwargs)
@classmethod
def aggregate(cls, inputs, **kwargs):
return _groupby_aggregate(_concat(inputs), **kwargs)
@property
def chunk_kwargs(self) -> dict: # type: ignore[override]
chunk_kwargs = self.operand("chunk_kwargs") or {}
columns = self._slice
return {
"chunk": self.groupby_chunk,
"columns": columns,
**_as_dict("observed", self.observed),
**_as_dict("dropna", self.dropna),
**chunk_kwargs,
}
@property
def aggregate_kwargs(self) -> dict: # type: ignore[override]
aggregate_kwargs = self.operand("aggregate_kwargs") or {}
groupby_aggregate = self.groupby_aggregate or self.groupby_chunk
return {
"aggfunc": groupby_aggregate,
"levels": self.levels,
"sort": self.sort,
**_as_dict("observed", self.observed),
**_as_dict("dropna", self.dropna),
**aggregate_kwargs,
}
def _simplify_up(self, parent, dependents):
return groupby_projection(self, parent, dependents)
| SingleAggregation |
python | cherrypy__cherrypy | cherrypy/_cprequest.py | {
"start": 2493,
"end": 6385
} | class ____(dict):
"""A map of call points to lists of callbacks (Hook objects)."""
def __new__(cls, points=None):
"""Construct a fresh hook map instance."""
d = dict.__new__(cls)
for p in points or []:
d[p] = []
return d
def __init__(self, *a, **kw):
"""Initialize a hook map instance post-construction."""
pass
def attach(self, point, callback, failsafe=None, priority=None, **kwargs):
"""Append a new Hook made from the supplied arguments."""
self[point].append(Hook(callback, failsafe, priority, **kwargs))
def run(self, point):
"""Execute all registered Hooks (callbacks) for the given point."""
self.run_hooks(iter(sorted(self[point])))
@classmethod
def run_hooks(cls, hooks):
"""Execute the indicated hooks, trapping errors.
Hooks with ``.failsafe == True`` are guaranteed to run
even if others at the same hookpoint fail. In this case,
log the failure and proceed on to the next hook. The only
way to stop all processing from one of these hooks is
to raise a BaseException like SystemExit or
KeyboardInterrupt and stop the whole server.
"""
assert isinstance(hooks, collections.abc.Iterator)
quiet_errors = (
cherrypy.HTTPError,
cherrypy.HTTPRedirect,
cherrypy.InternalRedirect,
)
safe = filter(operator.attrgetter('failsafe'), hooks)
for hook in hooks:
try:
hook()
except quiet_errors:
cls.run_hooks(safe)
raise
except Exception:
cherrypy.log(traceback=True, severity=40)
cls.run_hooks(safe)
raise
def __copy__(self):
"""Duplicate object per the copy protocol."""
newmap = self.__class__()
# We can't just use 'update' because we want copies of the
# mutable values (each is a list) as well.
for k, v in self.items():
newmap[k] = v[:]
return newmap
copy = __copy__
def __repr__(self):
"""Render a string representation of :class:`HookMap`."""
cls = self.__class__
return '%s.%s(points=%r)' % (cls.__module__, cls.__name__, list(self))
# Config namespace handlers
def hooks_namespace(k, v):
"""Attach bare hooks declared in config."""
# Use split again to allow multiple hooks for a single
# hookpoint per path (e.g. "hooks.before_handler.1").
# Little-known fact you only get from reading source ;)
hookpoint = k.split('.', 1)[0]
if isinstance(v, str):
v = cherrypy.lib.reprconf.attributes(v)
if not isinstance(v, Hook):
v = Hook(v)
cherrypy.serving.request.hooks[hookpoint].append(v)
def request_namespace(k, v):
"""Attach request attributes declared in config."""
# Provides config entries to set request.body attrs (like
# attempt_charsets).
if k[:5] == 'body.':
setattr(cherrypy.serving.request.body, k[5:], v)
else:
setattr(cherrypy.serving.request, k, v)
def response_namespace(k, v):
"""Attach response attributes declared in config."""
# Provides config entries to set default response headers
# http://cherrypy.dev/ticket/889
if k[:8] == 'headers.':
cherrypy.serving.response.headers[k.split('.', 1)[1]] = v
else:
setattr(cherrypy.serving.response, k, v)
def error_page_namespace(k, v):
"""Attach error pages declared in config."""
if k != 'default':
k = int(k)
cherrypy.serving.request.error_page[k] = v
hookpoints = [
'on_start_resource',
'before_request_body',
'before_handler',
'before_finalize',
'on_end_resource',
'on_end_request',
'before_error_response',
'after_error_response',
]
| HookMap |
python | python-openxml__python-docx | src/docx/oxml/numbering.py | {
"start": 1381,
"end": 1901
} | class ____(BaseOxmlElement):
"""``<w:lvlOverride>`` element, which identifies a level in a list definition to
override with settings it contains."""
startOverride = ZeroOrOne("w:startOverride", successors=("w:lvl",))
ilvl = RequiredAttribute("w:ilvl", ST_DecimalNumber)
def add_startOverride(self, val):
"""Return a newly added CT_DecimalNumber element having tagname
``w:startOverride`` and ``val`` attribute set to `val`."""
return self._add_startOverride(val=val)
| CT_NumLvl |
python | great-expectations__great_expectations | great_expectations/core/validation_definition.py | {
"start": 2204,
"end": 15992
} | class ____(BaseModel):
"""
Responsible for running a suite against data and returning a validation result.
Args:
name: The name of the validation.
data: A batch definition to validate.
suite: A grouping of expectations to validate against the data.
id: A unique identifier for the validation; added when persisted with a store.
"""
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True # Necessary for compatibility with suite's Marshmallow dep
copy_on_model_validation = (
"none" # Necessary to prevent cloning when passing to a checkpoint
)
validate_assignment = True
"""
When serialized, the suite and data fields should be encoded as a set of identifiers.
These will be used as foreign keys to retrieve the actual objects from the appropriate stores.
Example:
{
"name": "my_validation",
"data": {
"datasource": {
"name": "my_datasource",
"id": "a758816-64c8-46cb-8f7e-03c12cea1d67"
},
"asset": {
"name": "my_asset",
"id": "b5s8816-64c8-46cb-8f7e-03c12cea1d67"
},
"batch_definition": {
"name": "my_batch_definition",
"id": "3a758816-64c8-46cb-8f7e-03c12cea1d67"
}
},
"suite": {
"name": "my_suite",
"id": "8r2g816-64c8-46cb-8f7e-03c12cea1d67"
},
"id": "20dna816-64c8-46cb-8f7e-03c12cea1d67"
}
""" # noqa: E501 # FIXME CoP
json_encoders = {
ExpectationSuite: lambda e: e.identifier_bundle(),
BatchDefinition: lambda b: b.identifier_bundle(),
}
name: str
data: BatchDefinition
suite: ExpectationSuite
id: Union[str, None] = None
@property
@public_api
def batch_definition(self) -> BatchDefinition:
"""
The Batch Definition to validate.
"""
return self.data
@property
@public_api
def asset(self) -> DataAsset:
"""
The parent Data Asset of the Batch Definition.
"""
return self.data.data_asset
@property
def data_source(self) -> Datasource:
return self.asset.datasource
@property
def _validation_results_store(self) -> ValidationResultsStore:
return project_manager.get_validation_results_store()
def is_fresh(self) -> ValidationDefinitionFreshnessDiagnostics:
validation_definition_diagnostics = ValidationDefinitionFreshnessDiagnostics(
errors=[] if self.id else [ValidationDefinitionNotAddedError(name=self.name)]
)
suite_diagnostics = self.suite.is_fresh()
data_diagnostics = self.data.is_fresh()
validation_definition_diagnostics.update_with_children(suite_diagnostics, data_diagnostics)
if not validation_definition_diagnostics.success:
return validation_definition_diagnostics
store = project_manager.get_validation_definition_store()
key = store.get_key(name=self.name, id=self.id)
try:
validation_definition = store.get(key=key)
except (
StoreBackendError, # Generic error from stores
InvalidKeyError, # Ephemeral context error
):
return ValidationDefinitionFreshnessDiagnostics(
errors=[ValidationDefinitionNotFoundError(name=self.name)]
)
return ValidationDefinitionFreshnessDiagnostics(
errors=[]
if self == validation_definition
else [ValidationDefinitionNotFreshError(name=self.name)]
)
@validator("suite", pre=True)
def _validate_suite(cls, v: dict | ExpectationSuite):
# Input will be a dict of identifiers if being deserialized or a suite object if being constructed by a user. # noqa: E501 # FIXME CoP
if isinstance(v, dict):
return cls._decode_suite(v)
elif isinstance(v, ExpectationSuite):
return v
raise ValueError( # noqa: TRY003 # FIXME CoP
"Suite must be a dictionary (if being deserialized) or an ExpectationSuite object."
)
@validator("data", pre=True)
def _validate_data(cls, v: dict | BatchDefinition):
# Input will be a dict of identifiers if being deserialized or a rich type if being constructed by a user. # noqa: E501 # FIXME CoP
if isinstance(v, dict):
return cls._decode_data(v)
elif isinstance(v, BatchDefinition):
return v
raise ValueError( # noqa: TRY003 # FIXME CoP
"Data must be a dictionary (if being deserialized) or a BatchDefinition object."
)
@classmethod
def _decode_suite(cls, suite_dict: dict) -> ExpectationSuite:
# Take in raw JSON, ensure it contains appropriate identifiers, and use them to retrieve the actual suite. # noqa: E501 # FIXME CoP
try:
suite_identifiers = _IdentifierBundle.parse_obj(suite_dict)
except ValidationError as e:
raise ValueError("Serialized suite did not contain expected identifiers") from e # noqa: TRY003 # FIXME CoP
name = suite_identifiers.name
id = suite_identifiers.id
expectation_store = project_manager.get_expectations_store()
key = expectation_store.get_key(name=name, id=id)
try:
config: dict = expectation_store.get(key)
except gx_exceptions.InvalidKeyError as e:
raise ValueError(f"Could not find suite with name: {name} and id: {id}") from e # noqa: TRY003 # FIXME CoP
suite = ExpectationSuite(**config)
if suite._include_rendered_content:
suite.render()
return suite
@classmethod
def _decode_data(cls, data_dict: dict) -> BatchDefinition:
# Take in raw JSON, ensure it contains appropriate identifiers, and use them to retrieve the actual data. # noqa: E501 # FIXME CoP
try:
data_identifiers = _EncodedValidationData.parse_obj(data_dict)
except ValidationError as e:
raise ValueError("Serialized data did not contain expected identifiers") from e # noqa: TRY003 # FIXME CoP
ds_name = data_identifiers.datasource.name
asset_name = data_identifiers.asset.name
batch_definition_name = data_identifiers.batch_definition.name
datasource_dict = project_manager.get_datasources()
try:
ds = datasource_dict[ds_name]
except KeyError as e:
raise ValueError(f"Could not find datasource named '{ds_name}'.") from e # noqa: TRY003 # FIXME CoP
try:
asset = ds.get_asset(asset_name)
except LookupError as e:
raise ValueError( # noqa: TRY003 # FIXME CoP
f"Could not find asset named '{asset_name}' within '{ds_name}' datasource."
) from e
try:
batch_definition = asset.get_batch_definition(batch_definition_name)
except KeyError as e:
raise ValueError( # noqa: TRY003 # FIXME CoP
f"Could not find batch definition named '{batch_definition_name}' within '{asset_name}' asset and '{ds_name}' datasource." # noqa: E501 # FIXME CoP
) from e
return batch_definition
@public_api
def run(
self,
*,
checkpoint_id: Optional[str] = None,
batch_parameters: Optional[BatchParameters] = None,
expectation_parameters: Optional[SuiteParameterDict] = None,
result_format: ResultFormatUnion = DEFAULT_RESULT_FORMAT,
run_id: RunIdentifier | None = None,
) -> ExpectationSuiteValidationResult:
"""
Runs a validation using the configured data and suite.
Args:
batch_parameters: The dictionary of parameters necessary for selecting the
correct batch to run the validation on. The keys are strings that are determined
by the BatchDefinition used to instantiate this ValidationDefinition. For example:
- whole table -> None
- yearly -> year
- monthly -> year, month
- daily -> year, month, day
expectation_parameters: A dictionary of parameters values for any expectations using
parameterized values (the $PARAMETER syntax). The keys are the parameter names
and the values are the values to be used for this validation run.
result_format: A parameter controlling how much diagnostic information the result
contains.
checkpoint_id: This is used by the checkpoints code when it runs a validation
definition. Otherwise, it should be None.
run_id: An identifier for this run. Typically, this should be set to None and it will
be generated by this call.
"""
diagnostics = self.is_fresh()
if not diagnostics.success:
# The validation definition itself is not added but all children are - we can add it for the user # noqa: E501 # FIXME CoP
if not diagnostics.parent_added and diagnostics.children_added:
self._add_to_store()
else:
diagnostics.raise_for_error()
validator = Validator(
batch_definition=self.batch_definition,
batch_parameters=batch_parameters,
result_format=result_format,
)
results = validator.validate_expectation_suite(self.suite, expectation_parameters)
results.meta["validation_id"] = self.id
results.meta["checkpoint_id"] = checkpoint_id
# NOTE: We should promote this to a top-level field of the result.
# Meta should be reserved for user-defined information.
if not run_id:
run_id = RunIdentifier(run_time=datetime.datetime.now(datetime.timezone.utc))
results.meta["run_id"] = run_id
results.meta["validation_time"] = run_id.run_time
if batch_parameters:
batch_parameters_copy = {k: v for k, v in batch_parameters.items()}
if "dataframe" in batch_parameters_copy:
batch_parameters_copy["dataframe"] = DATAFRAME_REPLACEMENT_STR
results.meta["batch_parameters"] = batch_parameters_copy
else:
results.meta["batch_parameters"] = None
(
expectation_suite_identifier,
validation_result_id,
) = self._get_expectation_suite_and_validation_result_ids(
validator=validator, run_id=run_id
)
ref = self._validation_results_store.store_validation_results(
suite_validation_result=results,
suite_validation_result_identifier=validation_result_id,
expectation_suite_identifier=expectation_suite_identifier,
)
if isinstance(ref, GXCloudResourceRef):
results.id = ref.id
# FIXME(cdkini): There is currently a bug in GX Cloud where the result_url is None
results.result_url = self._validation_results_store.parse_result_url_from_gx_cloud_ref(
ref
)
return results
def _get_expectation_suite_and_validation_result_ids(
self,
validator: Validator,
run_id: RunIdentifier | None = None,
) -> (
tuple[GXCloudIdentifier, GXCloudIdentifier]
| tuple[ExpectationSuiteIdentifier, ValidationResultIdentifier]
):
expectation_suite_identifier: GXCloudIdentifier | ExpectationSuiteIdentifier
validation_result_id: GXCloudIdentifier | ValidationResultIdentifier
if self._validation_results_store.cloud_mode:
expectation_suite_identifier = GXCloudIdentifier(
resource_type=GXCloudRESTResource.EXPECTATION_SUITE,
id=self.suite.id,
)
validation_result_id = GXCloudIdentifier(
resource_type=GXCloudRESTResource.VALIDATION_RESULT
)
return expectation_suite_identifier, validation_result_id
else:
run_id = run_id or RunIdentifier(
run_time=datetime.datetime.now(tz=datetime.timezone.utc)
)
expectation_suite_identifier = ExpectationSuiteIdentifier(name=self.suite.name)
validation_result_id = ValidationResultIdentifier(
batch_identifier=validator.active_batch_id,
expectation_suite_identifier=expectation_suite_identifier,
run_id=run_id,
)
return expectation_suite_identifier, validation_result_id
def identifier_bundle(self) -> _IdentifierBundle:
# Utilized as a custom json_encoder
diagnostics = self.is_fresh()
diagnostics.raise_for_error()
return _IdentifierBundle(name=self.name, id=self.id)
@public_api
def save(self) -> None:
"""Save the current state of this ValidationDefinition."""
store = project_manager.get_validation_definition_store()
key = store.get_key(name=self.name, id=self.id)
store.update(key=key, value=self)
def _add_to_store(self) -> None:
"""This is used to persist a validation_definition before we run it.
We need to persist a validation_definition before it can be run. If user calls runs but
hasn't persisted it we add it for them."""
store = project_manager.get_validation_definition_store()
key = store.get_key(name=self.name, id=self.id)
store.add(key=key, value=self)
| ValidationDefinition |
python | ipython__ipython | tests/test_oinspect.py | {
"start": 3531,
"end": 3758
} | class ____(object):
"""This is the class docstring."""
__signature__ = Signature([Parameter("test", Parameter.POSITIONAL_OR_KEYWORD)])
def __init__(self, *args):
"""This is the init docstring"""
| HasSignature |
python | EpistasisLab__tpot | tpot/search_spaces/pipelines/tree.py | {
"start": 1828,
"end": 2303
} | class ____(GraphPipelineIndividual):
def __init__(self,
**kwargs) -> None:
super().__init__(**kwargs)
self.crossover_methods_list = [self._crossover_swap_branch, self._crossover_swap_node, self._crossover_nodes]
self.mutate_methods_list = [self._mutate_insert_leaf, self._mutate_insert_inner_node, self._mutate_remove_node, self._mutate_node]
self.merge_duplicated_nodes_toggle = False
| TreePipelineIndividual |
python | facebook__pyre-check | client/commands/tests/language_server_test.py | {
"start": 28790,
"end": 35271
} | class ____(testslide.TestCase):
@setup.async_test
async def test_clear_type_errors_for_client(self) -> None:
server_state = server_setup.create_server_state_with_options()
server_state.diagnostics = {
Path("/foo.py"): [],
}
bytes_writer = connections.MemoryBytesWriter()
client_output_channel = connections.AsyncTextWriter(bytes_writer)
error_handler = type_error_handler.ClientTypeErrorHandler(
client_output_channel=client_output_channel,
server_state=server_state,
)
await error_handler.clear_type_errors_for_client()
client_messages = [x.decode("utf-8") for x in bytes_writer.items()]
self.assertEqual(len(client_messages), 1)
print(client_messages)
message = client_messages[0]
self.assertIn("textDocument/publishDiagnostics", message)
self.assertIn('{"uri": "file:///foo.py", "diagnostics": []}}', message)
@setup.async_test
async def test_update_type_errors_and_show_type_errors_to_client(self) -> None:
server_state = server_setup.create_server_state_with_options()
bytes_writer = connections.MemoryBytesWriter()
client_output_channel = connections.AsyncTextWriter(bytes_writer)
error_handler = type_error_handler.ClientTypeErrorHandler(
client_output_channel=client_output_channel,
server_state=server_state,
)
error_handler.update_type_errors(
[
error.Error(
line=1,
column=1,
stop_line=2,
stop_column=2,
path=Path("derp.py"),
code=42,
name="name",
description="first error",
),
error.Error(
line=1,
column=1,
stop_line=2,
stop_column=2,
path=Path("derp.py"),
code=42,
name="name",
description="second error",
),
]
)
await error_handler.show_type_errors_to_client()
self.assertDictEqual(
server_state.diagnostics,
{
Path("derp.py"): [
lsp.Diagnostic(
range=lsp.LspRange(
start=lsp.LspPosition(line=0, character=1),
end=lsp.LspPosition(line=1, character=2),
),
message="first error",
severity=lsp.DiagnosticSeverity.ERROR,
code="name [42]",
source="Pyre",
code_description=lsp.CodeDescription(
href="https://pyre-check.org/docs/errors/#42-missing-overload-implementation"
),
),
lsp.Diagnostic(
range=lsp.LspRange(
start=lsp.LspPosition(line=0, character=1),
end=lsp.LspPosition(line=1, character=2),
),
message="second error",
severity=lsp.DiagnosticSeverity.ERROR,
code="name [42]",
source="Pyre",
code_description=lsp.CodeDescription(
href="https://pyre-check.org/docs/errors/#42-missing-overload-implementation"
),
),
]
},
)
client_messages = [x.decode("utf-8") for x in bytes_writer.items()]
print(client_messages)
self.assertEqual(len(client_messages), 1)
message = client_messages[0]
self.assertIn("textDocument/publishDiagnostics", message)
@setup.async_test
async def test_show_overlay_type_errors__non_empty(self) -> None:
server_state = server_setup.create_server_state_with_options()
bytes_writer = connections.MemoryBytesWriter()
client_output_channel = connections.AsyncTextWriter(bytes_writer)
error_handler = type_error_handler.ClientTypeErrorHandler(
client_output_channel=client_output_channel,
server_state=server_state,
)
await error_handler.show_overlay_type_errors(
Path("derp.py"),
[
error.Error(
line=1,
column=1,
stop_line=2,
stop_column=2,
path=Path("derp.py"),
code=42,
name="name",
description="first error",
),
error.Error(
line=1,
column=1,
stop_line=2,
stop_column=2,
path=Path("derp.py"),
code=42,
name="name",
description="second error",
),
],
)
client_messages = [x.decode("utf-8") for x in bytes_writer.items()]
print(client_messages)
self.assertEqual(len(client_messages), 1)
message = client_messages[0]
self.assertIn("textDocument/publishDiagnostics", message)
self.assertIn("derp.py", message)
self.assertIn("first error", message)
@setup.async_test
async def test_show_overlay_type_errors__empty(self) -> None:
server_state = server_setup.create_server_state_with_options()
bytes_writer = connections.MemoryBytesWriter()
client_output_channel = connections.AsyncTextWriter(bytes_writer)
error_handler = type_error_handler.ClientTypeErrorHandler(
client_output_channel=client_output_channel,
server_state=server_state,
)
await error_handler.show_overlay_type_errors(Path("derp.py"), [])
client_messages = [x.decode("utf-8") for x in bytes_writer.items()]
print(client_messages)
self.assertEqual(len(client_messages), 1)
message = client_messages[0]
self.assertIn("textDocument/publishDiagnostics", message)
self.assertIn("derp.py", message)
self.assertIn('"diagnostics": []', message)
| ClientTypeErrorHandlerTest |
python | pandas-dev__pandas | asv_bench/benchmarks/package.py | {
"start": 84,
"end": 624
} | class ____:
def time_import(self):
# on py37+ we the "-X importtime" usage gives us a more precise
# measurement of the import time we actually care about,
# without the subprocess or interpreter overhead
cmd = [sys.executable, "-X", "importtime", "-c", "import pandas as pd"]
p = subprocess.run(cmd, stderr=subprocess.PIPE, check=True)
line = p.stderr.splitlines()[-1]
field = line.split(b"|")[-2].strip()
total = int(field) # microseconds
return total
| TimeImport |
python | huggingface__transformers | src/transformers/models/mvp/modeling_mvp.py | {
"start": 69121,
"end": 73665
} | class ____(MvpPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.decoder.embed_tokens.weight"}
def __init__(self, config):
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = MvpDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def set_lightweight_tuning(self):
self.model.set_lightweight_tuning()
self.lm_head.requires_grad_(False)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, MvpForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp")
>>> model = MvpForCausalLM.from_pretrained("RUCAIBox/mvp", add_cross_attention=False)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 8, 50267]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
# Only compute necessary logits
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
__all__ = [
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
| MvpForCausalLM |
python | getsentry__sentry | src/sentry/issues/endpoints/group_integrations.py | {
"start": 1269,
"end": 3374
} | class ____(IntegrationSerializer):
def __init__(self, group: Group) -> None:
self.group = group
def get_attrs(
self,
item_list: Sequence[RpcIntegration],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> MutableMapping[RpcIntegration, MutableMapping[str, Any]]:
external_issues = ExternalIssue.objects.filter(
id__in=GroupLink.objects.get_group_issues(self.group).values_list(
"linked_id", flat=True
),
integration_id__in=[i.id for i in item_list],
)
issues_by_integration = defaultdict(list)
for ei in external_issues:
# TODO(jess): move into an external issue serializer?
integration = integration_service.get_integration(integration_id=ei.integration_id)
if integration is None:
continue
installation = integration.get_installation(organization_id=self.group.organization.id)
if hasattr(installation, "get_issue_url") and hasattr(
installation, "get_issue_display_name"
):
issues_by_integration[ei.integration_id].append(
{
"id": str(ei.id),
"key": ei.key,
"url": installation.get_issue_url(ei.key),
"title": ei.title,
"description": ei.description,
"displayName": installation.get_issue_display_name(ei),
}
)
return {
item: {"external_issues": issues_by_integration.get(item.id, [])} for item in item_list
}
def serialize(
self,
obj: Integration | RpcIntegration,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> MutableMapping[str, Any]:
data = super().serialize(obj, attrs, user)
data["externalIssues"] = attrs.get("external_issues", [])
return data
@region_silo_endpoint
| IntegrationIssueSerializer |
python | kamyu104__LeetCode-Solutions | Python/course-schedule-iv.py | {
"start": 33,
"end": 833
} | class ____(object):
def checkIfPrerequisite(self, n, prerequisites, queries):
"""
:type n: int
:type prerequisites: List[List[int]]
:type queries: List[List[int]]
:rtype: List[bool]
"""
def floydWarshall(n, graph):
reachable = set(map(lambda x: x[0]*n+x[1], graph))
for k in xrange(n):
for i in xrange(n):
for j in xrange(n):
if i*n+j not in reachable and (i*n+k in reachable and k*n+j in reachable):
reachable.add(i*n+j)
return reachable
reachable = floydWarshall(n, prerequisites)
return [i*n+j in reachable for i, j in queries]
# Time: O(n * q)
# Space: O(p + n)
import collections
| Solution |
python | networkx__networkx | networkx/classes/tests/test_multidigraph.py | {
"start": 303,
"end": 9009
} | class ____(BaseMultiGraphTester):
def test_edges(self):
G = self.K3
edges = [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
assert sorted(G.edges()) == edges
assert sorted(G.edges(0)) == [(0, 1), (0, 2)]
pytest.raises((KeyError, nx.NetworkXError), G.edges, -1)
def test_edges_data(self):
G = self.K3
edges = [(0, 1, {}), (0, 2, {}), (1, 0, {}), (1, 2, {}), (2, 0, {}), (2, 1, {})]
assert sorted(G.edges(data=True)) == edges
assert sorted(G.edges(0, data=True)) == [(0, 1, {}), (0, 2, {})]
pytest.raises((KeyError, nx.NetworkXError), G.neighbors, -1)
def test_edges_multi(self):
G = self.K3
assert sorted(G.edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
assert sorted(G.edges(0)) == [(0, 1), (0, 2)]
G.add_edge(0, 1)
assert sorted(G.edges()) == [
(0, 1),
(0, 1),
(0, 2),
(1, 0),
(1, 2),
(2, 0),
(2, 1),
]
def test_out_edges(self):
G = self.K3
assert sorted(G.out_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
assert sorted(G.out_edges(0)) == [(0, 1), (0, 2)]
pytest.raises((KeyError, nx.NetworkXError), G.out_edges, -1)
assert sorted(G.out_edges(0, keys=True)) == [(0, 1, 0), (0, 2, 0)]
def test_out_edges_multi(self):
G = self.K3
assert sorted(G.out_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
assert sorted(G.out_edges(0)) == [(0, 1), (0, 2)]
G.add_edge(0, 1, 2)
assert sorted(G.out_edges()) == [
(0, 1),
(0, 1),
(0, 2),
(1, 0),
(1, 2),
(2, 0),
(2, 1),
]
def test_out_edges_data(self):
G = self.K3
assert sorted(G.edges(0, data=True)) == [(0, 1, {}), (0, 2, {})]
G.remove_edge(0, 1)
G.add_edge(0, 1, data=1)
assert sorted(G.edges(0, data=True)) == [(0, 1, {"data": 1}), (0, 2, {})]
assert sorted(G.edges(0, data="data")) == [(0, 1, 1), (0, 2, None)]
assert sorted(G.edges(0, data="data", default=-1)) == [(0, 1, 1), (0, 2, -1)]
def test_in_edges(self):
G = self.K3
assert sorted(G.in_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
assert sorted(G.in_edges(0)) == [(1, 0), (2, 0)]
pytest.raises((KeyError, nx.NetworkXError), G.in_edges, -1)
G.add_edge(0, 1, 2)
assert sorted(G.in_edges()) == [
(0, 1),
(0, 1),
(0, 2),
(1, 0),
(1, 2),
(2, 0),
(2, 1),
]
assert sorted(G.in_edges(0, keys=True)) == [(1, 0, 0), (2, 0, 0)]
def test_in_edges_no_keys(self):
G = self.K3
assert sorted(G.in_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
assert sorted(G.in_edges(0)) == [(1, 0), (2, 0)]
G.add_edge(0, 1, 2)
assert sorted(G.in_edges()) == [
(0, 1),
(0, 1),
(0, 2),
(1, 0),
(1, 2),
(2, 0),
(2, 1),
]
assert sorted(G.in_edges(data=True, keys=False)) == [
(0, 1, {}),
(0, 1, {}),
(0, 2, {}),
(1, 0, {}),
(1, 2, {}),
(2, 0, {}),
(2, 1, {}),
]
def test_in_edges_data(self):
G = self.K3
assert sorted(G.in_edges(0, data=True)) == [(1, 0, {}), (2, 0, {})]
G.remove_edge(1, 0)
G.add_edge(1, 0, data=1)
assert sorted(G.in_edges(0, data=True)) == [(1, 0, {"data": 1}), (2, 0, {})]
assert sorted(G.in_edges(0, data="data")) == [(1, 0, 1), (2, 0, None)]
assert sorted(G.in_edges(0, data="data", default=-1)) == [(1, 0, 1), (2, 0, -1)]
def is_shallow(self, H, G):
# graph
assert G.graph["foo"] == H.graph["foo"]
G.graph["foo"].append(1)
assert G.graph["foo"] == H.graph["foo"]
# node
assert G.nodes[0]["foo"] == H.nodes[0]["foo"]
G.nodes[0]["foo"].append(1)
assert G.nodes[0]["foo"] == H.nodes[0]["foo"]
# edge
assert G[1][2][0]["foo"] == H[1][2][0]["foo"]
G[1][2][0]["foo"].append(1)
assert G[1][2][0]["foo"] == H[1][2][0]["foo"]
def is_deep(self, H, G):
# graph
assert G.graph["foo"] == H.graph["foo"]
G.graph["foo"].append(1)
assert G.graph["foo"] != H.graph["foo"]
# node
assert G.nodes[0]["foo"] == H.nodes[0]["foo"]
G.nodes[0]["foo"].append(1)
assert G.nodes[0]["foo"] != H.nodes[0]["foo"]
# edge
assert G[1][2][0]["foo"] == H[1][2][0]["foo"]
G[1][2][0]["foo"].append(1)
assert G[1][2][0]["foo"] != H[1][2][0]["foo"]
def test_to_undirected(self):
# MultiDiGraph -> MultiGraph changes number of edges so it is
# not a copy operation... use is_shallow, not is_shallow_copy
G = self.K3
self.add_attributes(G)
H = nx.MultiGraph(G)
# self.is_shallow(H,G)
# the result is traversal order dependent so we
# can't use the is_shallow() test here.
try:
assert edges_equal(H.edges(), [(0, 1), (1, 2), (2, 0)])
except AssertionError:
assert edges_equal(H.edges(), [(0, 1), (1, 2), (1, 2), (2, 0)])
H = G.to_undirected()
self.is_deep(H, G)
def test_has_successor(self):
G = self.K3
assert G.has_successor(0, 1)
assert not G.has_successor(0, -1)
def test_successors(self):
G = self.K3
assert sorted(G.successors(0)) == [1, 2]
pytest.raises((KeyError, nx.NetworkXError), G.successors, -1)
def test_has_predecessor(self):
G = self.K3
assert G.has_predecessor(0, 1)
assert not G.has_predecessor(0, -1)
def test_predecessors(self):
G = self.K3
assert sorted(G.predecessors(0)) == [1, 2]
pytest.raises((KeyError, nx.NetworkXError), G.predecessors, -1)
def test_degree(self):
G = self.K3
assert sorted(G.degree()) == [(0, 4), (1, 4), (2, 4)]
assert dict(G.degree()) == {0: 4, 1: 4, 2: 4}
assert G.degree(0) == 4
assert list(G.degree(iter([0]))) == [(0, 4)]
G.add_edge(0, 1, weight=0.3, other=1.2)
assert sorted(G.degree(weight="weight")) == [(0, 4.3), (1, 4.3), (2, 4)]
assert sorted(G.degree(weight="other")) == [(0, 5.2), (1, 5.2), (2, 4)]
def test_in_degree(self):
G = self.K3
assert sorted(G.in_degree()) == [(0, 2), (1, 2), (2, 2)]
assert dict(G.in_degree()) == {0: 2, 1: 2, 2: 2}
assert G.in_degree(0) == 2
assert list(G.in_degree(iter([0]))) == [(0, 2)]
assert G.in_degree(0, weight="weight") == 2
def test_out_degree(self):
G = self.K3
assert sorted(G.out_degree()) == [(0, 2), (1, 2), (2, 2)]
assert dict(G.out_degree()) == {0: 2, 1: 2, 2: 2}
assert G.out_degree(0) == 2
assert list(G.out_degree(iter([0]))) == [(0, 2)]
assert G.out_degree(0, weight="weight") == 2
def test_size(self):
G = self.K3
assert G.size() == 6
assert G.number_of_edges() == 6
G.add_edge(0, 1, weight=0.3, other=1.2)
assert round(G.size(weight="weight"), 2) == 6.3
assert round(G.size(weight="other"), 2) == 7.2
def test_to_undirected_reciprocal(self):
G = self.Graph()
G.add_edge(1, 2)
assert G.to_undirected().has_edge(1, 2)
assert not G.to_undirected(reciprocal=True).has_edge(1, 2)
G.add_edge(2, 1)
assert G.to_undirected(reciprocal=True).has_edge(1, 2)
def test_reverse_copy(self):
G = nx.MultiDiGraph([(0, 1), (0, 1)])
R = G.reverse()
assert sorted(R.edges()) == [(1, 0), (1, 0)]
R.remove_edge(1, 0)
assert sorted(R.edges()) == [(1, 0)]
assert sorted(G.edges()) == [(0, 1), (0, 1)]
def test_reverse_nocopy(self):
G = nx.MultiDiGraph([(0, 1), (0, 1)])
R = G.reverse(copy=False)
assert sorted(R.edges()) == [(1, 0), (1, 0)]
pytest.raises(nx.NetworkXError, R.remove_edge, 1, 0)
def test_di_attributes_cached(self):
G = self.K3.copy()
assert id(G.in_edges) == id(G.in_edges)
assert id(G.out_edges) == id(G.out_edges)
assert id(G.in_degree) == id(G.in_degree)
assert id(G.out_degree) == id(G.out_degree)
assert id(G.succ) == id(G.succ)
assert id(G.pred) == id(G.pred)
| BaseMultiDiGraphTester |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/io_ops/reader_ops_test.py | {
"start": 13922,
"end": 21740
} | class ____(TFCompressionTestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
self._num_files = 2
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
self._hop_bytes = 2
def _Record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _OverlappedRecord(self, f, r):
record_str = "".join([
str(i)[0]
for i in range(r * self._hop_bytes,
r * self._hop_bytes + self._record_bytes)
])
return compat.as_bytes(record_str)
# gap_bytes=hop_bytes-record_bytes
def _CreateFiles(self, num_records, gap_bytes):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_records > 0:
f.write(self._Record(i, 0))
for j in range(1, num_records):
if gap_bytes > 0:
f.write(b"G" * gap_bytes)
f.write(self._Record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
def _CreateOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _CreateGzipFiles(self, num_records, gap_bytes):
filenames = self._CreateFiles(num_records, gap_bytes)
for fn in filenames:
# compress inplace.
self._GzipCompressFile(fn, fn)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _CreateZlibFiles(self, num_records, gap_bytes):
filenames = self._CreateFiles(num_records, gap_bytes)
for fn in filenames:
# compress inplace.
self._ZlibCompressFile(fn, fn)
return filenames
def _CreateGzipOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with gzip.GzipFile(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
return filenames
def _CreateZlibOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with open(fn + ".tmp", "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
self._ZlibCompressFile(fn + ".tmp", fn)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _TestOneEpoch(self, files, num_records, gap_bytes, encoding=None):
hop_bytes = 0 if gap_bytes == 0 else self._record_bytes + gap_bytes
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=hop_bytes,
encoding=encoding,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
self.evaluate(queue.enqueue_many([files]))
self.evaluate(queue.close())
for i in range(self._num_files):
for j in range(num_records):
k, v = self.evaluate([key, value])
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = self.evaluate([key, value])
def _TestOneEpochWithHopBytes(self,
files,
num_overlapped_records,
encoding=None):
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=self._hop_bytes,
encoding=encoding,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
self.evaluate(queue.enqueue_many([files]))
self.evaluate(queue.close())
for i in range(self._num_files):
for j in range(num_overlapped_records):
k, v = self.evaluate([key, value])
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._OverlappedRecord(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = self.evaluate([key, value])
@test_util.run_deprecated_v1
def testOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes)
@test_util.run_deprecated_v1
def testGzipOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateGzipFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes, encoding="GZIP")
@test_util.run_deprecated_v1
def testZlibOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateZlibFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes, encoding="ZLIB")
@test_util.run_deprecated_v1
def testOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateOverlappedRecordFiles(num_overlapped_records)
self._TestOneEpochWithHopBytes(files, num_overlapped_records)
@test_util.run_deprecated_v1
def testGzipOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateGzipOverlappedRecordFiles(num_overlapped_records,)
self._TestOneEpochWithHopBytes(
files, num_overlapped_records, encoding="GZIP")
@test_util.run_deprecated_v1
def testZlibOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateZlibOverlappedRecordFiles(num_overlapped_records)
self._TestOneEpochWithHopBytes(
files, num_overlapped_records, encoding="ZLIB")
| FixedLengthRecordReaderTest |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_estimator_checks.py | {
"start": 16750,
"end": 17048
} | class ____(BaseEstimator):
def fit(self, X, y):
validate_data(self, X, y)
return self
def partial_fit(self, X, y):
reset = not hasattr(self, "_fitted")
validate_data(self, X, y, reset=reset)
self._fitted = True
return self
| PartialFitChecksName |
python | django__django | tests/middleware_exceptions/tests.py | {
"start": 5816,
"end": 6201
} | class ____(SimpleTestCase):
@override_settings(ROOT_URLCONF=None)
def test_missing_root_urlconf(self):
# Removing ROOT_URLCONF is safe, as override_settings will restore
# the previously defined settings.
del settings.ROOT_URLCONF
with self.assertRaises(AttributeError):
self.client.get("/middleware_exceptions/view/")
| RootUrlconfTests |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 50398,
"end": 50821
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("project_id", "content_id", "client_mutation_id")
project_id = sgqlc.types.Field(ID, graphql_name="projectId")
content_id = sgqlc.types.Field(ID, graphql_name="contentId")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| AddProjectNextItemInput |
python | huggingface__transformers | src/transformers/utils/logging.py | {
"start": 10703,
"end": 11257
} | class ____:
"""Dummy tqdm which doesn't do anything."""
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
self._iterator = args[0] if args else None
def __iter__(self):
return iter(self._iterator)
def __getattr__(self, _):
"""Return empty function."""
def empty_fn(*args, **kwargs): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
return
| EmptyTqdm |
python | walkccc__LeetCode | solutions/2081. Sum of k-Mirror Numbers/2081.py | {
"start": 0,
"end": 643
} | class ____:
def kMirror(self, k: int, n: int) -> int:
ans = 0
A = ['0']
def nextKMirror(A: list[str]) -> list[str]:
for i in range(len(A) // 2, len(A)):
nextNum = int(A[i]) + 1
if nextNum < k:
A[i] = str(nextNum)
A[~i] = str(nextNum)
for j in range(len(A) // 2, i):
A[j] = '0'
A[~j] = '0'
return A
return ['1'] + ['0'] * (len(A) - 1) + ['1']
for _ in range(n):
while True:
A = nextKMirror(A)
num = int(''.join(A), k)
if str(num)[::-1] == str(num):
break
ans += num
return ans
| Solution |
python | ijl__orjson | test/test_dataclass.py | {
"start": 786,
"end": 852
} | class ____(Dataclass1):
additional: bool
@dataclass
| Datasubclass |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/relativity/relativity.py | {
"start": 7580,
"end": 7931
} | class ____(pTypes.GroupParameter):
def __init__(self):
pTypes.GroupParameter.__init__(self, name="Objects", addText="Add New..", addList=['Clock', 'Grid'])
def addNew(self, typ):
if typ == 'Clock':
self.addChild(ClockParam())
elif typ == 'Grid':
self.addChild(GridParam())
| ObjectGroupParam |
python | walkccc__LeetCode | solutions/2155. All Divisions With the Highest Score of a Binary Array/2155.py | {
"start": 0,
"end": 562
} | class ____:
def maxScoreIndices(self, nums: list[int]) -> list[int]:
zeros = nums.count(0)
ones = len(nums) - zeros
ans = [0] # the division at index 0
leftZeros = 0
leftOnes = 0
maxScore = ones # `leftZeros` + `rightOnes`
for i, num in enumerate(nums):
leftZeros += num == 0
leftOnes += num == 1
rightOnes = ones - leftOnes
score = leftZeros + rightOnes
if maxScore == score:
ans.append(i + 1)
elif maxScore < score:
maxScore = score
ans = [i + 1]
return ans
| Solution |
python | django__django | django/test/runner.py | {
"start": 21348,
"end": 23488
} | class ____:
"""
This class implements shuffling with a special consistency property.
Consistency means that, for a given seed and key function, if two sets of
items are shuffled, the resulting order will agree on the intersection of
the two sets. For example, if items are removed from an original set, the
shuffled order for the new set will be the shuffled order of the original
set restricted to the smaller set.
"""
# This doesn't need to be cryptographically strong, so use what's fastest.
hash_algorithm = "md5"
@classmethod
def _hash_text(cls, text):
h = hashlib.new(cls.hash_algorithm, usedforsecurity=False)
h.update(text.encode("utf-8"))
return h.hexdigest()
def __init__(self, seed=None):
if seed is None:
# Limit seeds to 10 digits for simpler output.
seed = random.randint(0, 10**10 - 1)
seed_source = "generated"
else:
seed_source = "given"
self.seed = seed
self.seed_source = seed_source
@property
def seed_display(self):
return f"{self.seed!r} ({self.seed_source})"
def _hash_item(self, item, key):
text = "{}{}".format(self.seed, key(item))
return self._hash_text(text)
def shuffle(self, items, key):
"""
Return a new list of the items in a shuffled order.
The `key` is a function that accepts an item in `items` and returns
a string unique for that item that can be viewed as a string id. The
order of the return value is deterministic. It depends on the seed
and key function but not on the original order.
"""
hashes = {}
for item in items:
hashed = self._hash_item(item, key)
if hashed in hashes:
msg = "item {!r} has same hash {!r} as item {!r}".format(
item,
hashed,
hashes[hashed],
)
raise RuntimeError(msg)
hashes[hashed] = item
return [hashes[hashed] for hashed in sorted(hashes)]
| Shuffler |
python | pytorch__pytorch | test/inductor/test_helion_kernels.py | {
"start": 343,
"end": 2369
} | class ____(TestCase):
@requires_helion()
def test_add_kernel(self):
@helion.kernel(config=helion.Config(block_sizes=[1, 2]))
def add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
# match pytorch broadcasting rules
x, y = torch.broadcast_tensors(x, y)
out = torch.empty(
x.shape,
# match type promotion of torch.add
dtype=torch.promote_types(x.dtype, y.dtype),
device=x.device,
)
# tile will be a tuple of blocks
for tile in hl.tile(out.size()):
out[tile] = x[tile] + y[tile]
return out
def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return add(x, y)
x = torch.randn(4, 8, device=GPU_TYPE, dtype=torch.float16)
y = torch.randn(4, 8, device=GPU_TYPE, dtype=torch.float16)
out = add(x, y)
compiled_add = torch.compile(f, fullgraph=True, backend="inductor")
compiled_out = compiled_add(x, y)
self.assertEqual(out, x + y)
self.assertEqual(compiled_out, x + y)
@requires_helion()
def test_softmax_view_reshape(self):
@helion.kernel(config={"block_size": 1})
def softmax(x: torch.Tensor) -> torch.Tensor:
n, _m = x.size()
out = torch.empty_like(x)
for tile_n in hl.tile(n):
values = x[tile_n, :]
amax = torch.amax(values, dim=1).view(tile_n, 1)
exp = torch.exp(values - amax)
sum_exp = torch.reshape(torch.sum(exp, dim=1), [tile_n, 1])
out[tile_n, :] = exp / sum_exp
return out
x = torch.randn([1024, 1024], device=GPU_TYPE, dtype=torch.float16)
result = softmax(x)
self.assertEqual(
result, torch.nn.functional.softmax(x, dim=1), rtol=1e-2, atol=1e-1
)
instantiate_parametrized_tests(HelionTests)
if __name__ == "__main__":
run_tests()
| HelionTests |
python | huggingface__transformers | src/transformers/models/lfm2_moe/modeling_lfm2_moe.py | {
"start": 10438,
"end": 19383
} | class ____:
"""
Attention and conv cache for Lfm2Moe.
It stores the Key and Value states as a list of tensors, one for each layer.
Attention layer cache shape: `[batch_size, num_heads, seq_len, head_dim]`.
Conv layer cache shape: `[batch_size, hidden_size, L_cache-1]`.
"""
# Override @property existing in Cache
max_batch_size = None
is_compileable = False
key_cache = None
value_cache = None
def __init__(
self,
config: Lfm2MoeConfig,
max_batch_size: int,
dtype: torch.dtype = torch.float32,
device: Union[torch.device, str, None] = None,
):
self.key_cache = []
self.value_cache = []
self.max_batch_size = max_batch_size
self.layer_types = config.layer_types
self.first_attention_layer = self.layer_types.index("full_attention")
self.conv_L_cache = config.conv_L_cache
self._dtype = dtype
self.conv_cache: list[torch.Tensor] = []
device = torch.device(device) if device is not None else None
for _ in range(config.num_hidden_layers):
conv_state = torch.zeros(
self.max_batch_size,
config.hidden_size,
self.conv_L_cache,
dtype=self._dtype,
device=device,
)
self.conv_cache.append(conv_state)
self.key_cache.append(torch.tensor([]))
self.value_cache.append(torch.tensor([]))
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: Optional[dict[str, Any]] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
Parameters:
key_states (`torch.Tensor`):
The new key states to cache.
value_states (`torch.Tensor`):
The new value states to cache.
layer_idx (`int`):
The index of the layer to cache the states for.
cache_kwargs (`Dict[str, Any]`, `optional`):
Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
Return:
A tuple containing the updated key and value states.
"""
# Update the cache
if self.key_cache[layer_idx].numel() == 0:
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
for layer_idx in range(len(self.key_cache)):
if self.key_cache[layer_idx].numel():
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.value_cache[layer_idx].device
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
if self.conv_cache[layer_idx].numel():
device = self.conv_cache[layer_idx].device
self.conv_cache[layer_idx] = self.conv_cache[layer_idx].index_select(0, beam_idx.to(device))
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
# take any layer that contains cache and not empty tensor
layer_idx = self.first_attention_layer if self.layer_types[layer_idx] != "full_attention" else layer_idx
if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0:
return 0
return self.key_cache[layer_idx].shape[-2]
def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]:
"""
Return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for
the given layer at `layer_idx`.
The masks are then prepared according to the given lengths (kv_length, kv_offset) and patterns (i.e. sliding_window, chunk_size),
for each layer.
"""
full_mask_kv_offset = 0
query_length = cache_position.shape[0]
past_seen_tokens = self.get_seq_length()
kv_length = query_length + past_seen_tokens
return kv_length, full_mask_kv_offset
def crop(self, max_length: int):
"""Crop the cache to the given length"""
if max_length < 0:
max_length = self.get_seq_length() - abs(max_length)
if self.get_seq_length() <= max_length:
return
for idx in range(len(self.key_cache)):
if self.key_cache[idx].numel():
self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
def __len__(self) -> int:
return len(self.key_cache)
def reset(self):
for layer_idx in range(len(self.conv_cache)):
# In-place ops prevent breaking the static address
self.conv_cache[layer_idx].zero_()
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| Lfm2MoeHybridConvCache |
python | pallets__flask | src/flask/sansio/app.py | {
"start": 1928,
"end": 39459
} | class ____(Scaffold):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the :file:`__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea of what
belongs to your application. This name is used to find resources
on the filesystem, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in :file:`yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
.. versionadded:: 0.11
The `root_path` parameter was added.
.. versionadded:: 1.0
The ``host_matching`` and ``static_host`` parameters were added.
.. versionadded:: 1.0
The ``subdomain_matching`` parameter was added. Subdomain
matching needs to be enabled manually now. Setting
:data:`SERVER_NAME` does not implicitly enable it.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: The folder with static files that is served at
``static_url_path``. Relative to the application ``root_path``
or an absolute path. Defaults to ``'static'``.
:param static_host: the host to use when adding the static route.
Defaults to None. Required when using ``host_matching=True``
with a ``static_folder`` configured.
:param host_matching: set ``url_map.host_matching`` attribute.
Defaults to False.
:param subdomain_matching: consider the subdomain relative to
:data:`SERVER_NAME` when matching routes. Defaults to False.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to ``True`` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
:param root_path: The path to the root of the application files.
This should only be set manually when it can't be detected
automatically, such as for namespace packages.
"""
#: The class of the object assigned to :attr:`aborter`, created by
#: :meth:`create_aborter`. That object is called by
#: :func:`flask.abort` to raise HTTP errors, and can be
#: called directly as well.
#:
#: Defaults to :class:`werkzeug.exceptions.Aborter`.
#:
#: .. versionadded:: 2.2
aborter_class = Aborter
#: The class that is used for the Jinja environment.
#:
#: .. versionadded:: 0.11
jinja_environment = Environment
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on unexpected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: .. versionadded:: 0.10
#: Renamed from ``request_globals_class`.
app_ctx_globals_class = _AppCtxGlobals
#: The class that is used for the ``config`` attribute of this app.
#: Defaults to :class:`~flask.Config`.
#:
#: Example use cases for a custom class:
#:
#: 1. Default values for certain config options.
#: 2. Access to config values through attributes in addition to keys.
#:
#: .. versionadded:: 0.11
config_class = Config
#: The testing flag. Set this to ``True`` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate test helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: ``TESTING`` configuration key. Defaults to ``False``.
testing = ConfigAttribute[bool]("TESTING")
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: :data:`SECRET_KEY` configuration key. Defaults to ``None``.
secret_key = ConfigAttribute[str | bytes | None]("SECRET_KEY")
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute[timedelta](
"PERMANENT_SESSION_LIFETIME",
get_converter=_make_timedelta, # type: ignore[arg-type]
)
json_provider_class: type[JSONProvider] = DefaultJSONProvider
"""A subclass of :class:`~flask.json.provider.JSONProvider`. An
instance is created and assigned to :attr:`app.json` when creating
the app.
The default, :class:`~flask.json.provider.DefaultJSONProvider`, uses
Python's built-in :mod:`json` library. A different provider can use
a different JSON library.
.. versionadded:: 2.2
"""
#: Options that are passed to the Jinja environment in
#: :meth:`create_jinja_environment`. Changing these options after
#: the environment is created (accessing :attr:`jinja_env`) will
#: have no effect.
#:
#: .. versionchanged:: 1.1.0
#: This is a ``dict`` instead of an ``ImmutableDict`` to allow
#: easier configuration.
#:
jinja_options: dict[str, t.Any] = {}
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: The map object to use for storing the URL rules and routing
#: configuration parameters. Defaults to :class:`werkzeug.routing.Map`.
#:
#: .. versionadded:: 1.1.0
url_map_class = Map
#: The :meth:`test_client` method creates an instance of this test
#: client class. Defaults to :class:`~flask.testing.FlaskClient`.
#:
#: .. versionadded:: 0.7
test_client_class: type[FlaskClient] | None = None
#: The :class:`~click.testing.CliRunner` subclass, by default
#: :class:`~flask.testing.FlaskCliRunner` that is used by
#: :meth:`test_cli_runner`. Its ``__init__`` method should take a
#: Flask app object as the first argument.
#:
#: .. versionadded:: 1.0
test_cli_runner_class: type[FlaskCliRunner] | None = None
default_config: dict[str, t.Any]
response_class: type[Response]
def __init__(
self,
import_name: str,
static_url_path: str | None = None,
static_folder: str | os.PathLike[str] | None = "static",
static_host: str | None = None,
host_matching: bool = False,
subdomain_matching: bool = False,
template_folder: str | os.PathLike[str] | None = "templates",
instance_path: str | None = None,
instance_relative_config: bool = False,
root_path: str | None = None,
) -> None:
super().__init__(
import_name=import_name,
static_folder=static_folder,
static_url_path=static_url_path,
template_folder=template_folder,
root_path=root_path,
)
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError(
"If an instance path is provided it must be absolute."
" A relative path was given instead."
)
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
#: An instance of :attr:`aborter_class` created by
#: :meth:`make_aborter`. This is called by :func:`flask.abort`
#: to raise HTTP errors, and can be called directly as well.
#:
#: .. versionadded:: 2.2
#: Moved from ``flask.abort``, which calls this object.
self.aborter = self.make_aborter()
self.json: JSONProvider = self.json_provider_class(self)
"""Provides access to JSON methods. Functions in ``flask.json``
will call methods on this provider when the application context
is active. Used for handling JSON requests and responses.
An instance of :attr:`json_provider_class`. Can be customized by
changing that attribute on a subclass, or by assigning to this
attribute afterwards.
The default, :class:`~flask.json.provider.DefaultJSONProvider`,
uses Python's built-in :mod:`json` library. A different provider
can use a different JSON library.
.. versionadded:: 2.2
"""
#: A list of functions that are called by
#: :meth:`handle_url_build_error` when :meth:`.url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function is called
#: with ``error``, ``endpoint`` and ``values``. If a function
#: returns ``None`` or raises a ``BuildError``, it is skipped.
#: Otherwise, its return value is returned by ``url_for``.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers: list[
t.Callable[[Exception, str, dict[str, t.Any]], str]
] = []
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs: list[ft.TeardownCallable] = []
#: A list of shell context processor functions that should be run
#: when a shell context is created.
#:
#: .. versionadded:: 0.11
self.shell_context_processors: list[ft.ShellContextProcessorCallable] = []
#: Maps registered blueprint names to blueprint objects. The
#: dict retains the order the blueprints were registered in.
#: Blueprints can be registered multiple times, this dict does
#: not track how often they were attached.
#:
#: .. versionadded:: 0.7
self.blueprints: dict[str, Blueprint] = {}
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things.
#:
#: The key must match the name of the extension module. For example in
#: case of a "Flask-Foo" extension in `flask_foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions: dict[str, t.Any] = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(super(ListConverter, self).to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = self.url_map_class(host_matching=host_matching)
self.subdomain_matching = subdomain_matching
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
def _check_setup_finished(self, f_name: str) -> None:
if self._got_first_request:
raise AssertionError(
f"The setup method '{f_name}' can no longer be called"
" on the application. It has already handled its first"
" request, any changes will not be applied"
" consistently.\n"
"Make sure all imports, decorators, functions, etc."
" needed to set up the application are done before"
" running it."
)
@cached_property
def name(self) -> str:
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == "__main__":
fn: str | None = getattr(sys.modules["__main__"], "__file__", None)
if fn is None:
return "__main__"
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@cached_property
def logger(self) -> logging.Logger:
"""A standard Python :class:`~logging.Logger` for the app, with
the same name as :attr:`name`.
In debug mode, the logger's :attr:`~logging.Logger.level` will
be set to :data:`~logging.DEBUG`.
If there are no handlers configured, a default handler will be
added. See :doc:`/logging` for more information.
.. versionchanged:: 1.1.0
The logger takes the same name as :attr:`name` rather than
hard-coding ``"flask.app"``.
.. versionchanged:: 1.0.0
Behavior was simplified. The logger is always named
``"flask.app"``. The level is only set during configuration,
it doesn't check ``app.debug`` each time. Only one format is
used, not different ones depending on ``app.debug``. No
handlers are removed, and a handler is only added if no
handlers are already configured.
.. versionadded:: 0.3
"""
return create_logger(self)
@cached_property
def jinja_env(self) -> Environment:
"""The Jinja environment used to load templates.
The environment is created the first time this property is
accessed. Changing :attr:`jinja_options` after that will have no
effect.
"""
return self.create_jinja_environment()
def create_jinja_environment(self) -> Environment:
raise NotImplementedError()
def make_config(self, instance_relative: bool = False) -> Config:
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
defaults = dict(self.default_config)
defaults["DEBUG"] = get_debug_flag()
return self.config_class(root_path, defaults)
def make_aborter(self) -> Aborter:
"""Create the object to assign to :attr:`aborter`. That object
is called by :func:`flask.abort` to raise HTTP errors, and can
be called directly as well.
By default, this creates an instance of :attr:`aborter_class`,
which defaults to :class:`werkzeug.exceptions.Aborter`.
.. versionadded:: 2.2
"""
return self.aborter_class()
def auto_find_instance_path(self) -> str:
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, "instance")
return os.path.join(prefix, "var", f"{self.name}-instance")
def create_global_jinja_loader(self) -> DispatchingJinjaLoader:
"""Creates the loader for the Jinja environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def select_jinja_autoescape(self, filename: str) -> bool:
"""Returns ``True`` if autoescaping should be active for the given
template name. If no template name is given, returns `True`.
.. versionchanged:: 2.2
Autoescaping is now enabled by default for ``.svg`` files.
.. versionadded:: 0.5
"""
if filename is None:
return True
return filename.endswith((".html", ".htm", ".xml", ".xhtml", ".svg"))
@property
def debug(self) -> bool:
"""Whether debug mode is enabled. When using ``flask run`` to start the
development server, an interactive debugger will be shown for unhandled
exceptions, and the server will be reloaded when code changes. This maps to the
:data:`DEBUG` config key. It may not behave as expected if set late.
**Do not enable debug mode when deploying in production.**
Default: ``False``
"""
return self.config["DEBUG"] # type: ignore[no-any-return]
@debug.setter
def debug(self, value: bool) -> None:
self.config["DEBUG"] = value
if self.config["TEMPLATES_AUTO_RELOAD"] is None:
self.jinja_env.auto_reload = value
@setupmethod
def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
"""Register a :class:`~flask.Blueprint` on the application. Keyword
arguments passed to this method will override the defaults set on the
blueprint.
Calls the blueprint's :meth:`~flask.Blueprint.register` method after
recording the blueprint in the application's :attr:`blueprints`.
:param blueprint: The blueprint to register.
:param url_prefix: Blueprint routes will be prefixed with this.
:param subdomain: Blueprint routes will match on this subdomain.
:param url_defaults: Blueprint routes will use these default values for
view arguments.
:param options: Additional keyword arguments are passed to
:class:`~flask.blueprints.BlueprintSetupState`. They can be
accessed in :meth:`~flask.Blueprint.record` callbacks.
.. versionchanged:: 2.0.1
The ``name`` option can be used to change the (pre-dotted)
name the blueprint is registered with. This allows the same
blueprint to be registered multiple times with unique names
for ``url_for``.
.. versionadded:: 0.7
"""
blueprint.register(self, options)
def iter_blueprints(self) -> t.ValuesView[Blueprint]:
"""Iterates over all blueprints by the order they were registered.
.. versionadded:: 0.11
"""
return self.blueprints.values()
@setupmethod
def add_url_rule(
self,
rule: str,
endpoint: str | None = None,
view_func: ft.RouteCallable | None = None,
provide_automatic_options: bool | None = None,
**options: t.Any,
) -> None:
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func) # type: ignore
options["endpoint"] = endpoint
methods = options.pop("methods", None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only ``GET`` as default.
if methods is None:
methods = getattr(view_func, "methods", None) or ("GET",)
if isinstance(methods, str):
raise TypeError(
"Allowed methods must be a list of strings, for"
' example: @app.route(..., methods=["POST"])'
)
methods = {item.upper() for item in methods}
# Methods that should always be added
required_methods: set[str] = set(getattr(view_func, "required_methods", ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
if provide_automatic_options is None:
provide_automatic_options = getattr(
view_func, "provide_automatic_options", None
)
if provide_automatic_options is None:
if "OPTIONS" not in methods and self.config["PROVIDE_AUTOMATIC_OPTIONS"]:
provide_automatic_options = True
required_methods.add("OPTIONS")
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
rule_obj = self.url_rule_class(rule, methods=methods, **options)
rule_obj.provide_automatic_options = provide_automatic_options # type: ignore[attr-defined]
self.url_map.add(rule_obj)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func != view_func:
raise AssertionError(
"View function mapping is overwriting an existing"
f" endpoint function: {endpoint}"
)
self.view_functions[endpoint] = view_func
@t.overload
def template_filter(self, name: T_template_filter) -> T_template_filter: ...
@t.overload
def template_filter(
self, name: str | None = None
) -> t.Callable[[T_template_filter], T_template_filter]: ...
@setupmethod
def template_filter(
self, name: T_template_filter | str | None = None
) -> T_template_filter | t.Callable[[T_template_filter], T_template_filter]:
"""Decorate a function to register it as a custom Jinja filter. The name
is optional. The decorator may be used without parentheses.
.. code-block:: python
@app.template_filter("reverse")
def reverse_filter(s):
return reversed(s)
The :meth:`add_template_filter` method may be used to register a
function later rather than decorating.
:param name: The name to register the filter as. If not given, uses the
function's name.
"""
if callable(name):
self.add_template_filter(name)
return name
def decorator(f: T_template_filter) -> T_template_filter:
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(
self, f: ft.TemplateFilterCallable, name: str | None = None
) -> None:
"""Register a function to use as a custom Jinja filter.
The :meth:`template_filter` decorator can be used to register a function
by decorating instead.
:param f: The function to register.
:param name: The name to register the filter as. If not given, uses the
function's name.
"""
self.jinja_env.filters[name or f.__name__] = f
@t.overload
def template_test(self, name: T_template_test) -> T_template_test: ...
@t.overload
def template_test(
self, name: str | None = None
) -> t.Callable[[T_template_test], T_template_test]: ...
@setupmethod
def template_test(
self, name: T_template_test | str | None = None
) -> T_template_test | t.Callable[[T_template_test], T_template_test]:
"""Decorate a function to register it as a custom Jinja test. The name
is optional. The decorator may be used without parentheses.
.. code-block:: python
@app.template_test("prime")
def is_prime_test(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
The :meth:`add_template_test` method may be used to register a function
later rather than decorating.
:param name: The name to register the filter as. If not given, uses the
function's name.
.. versionadded:: 0.10
"""
if callable(name):
self.add_template_test(name)
return name
def decorator(f: T_template_test) -> T_template_test:
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(
self, f: ft.TemplateTestCallable, name: str | None = None
) -> None:
"""Register a function to use as a custom Jinja test.
The :meth:`template_test` decorator can be used to register a function
by decorating instead.
:param f: The function to register.
:param name: The name to register the test as. If not given, uses the
function's name.
.. versionadded:: 0.10
"""
self.jinja_env.tests[name or f.__name__] = f
@t.overload
def template_global(self, name: T_template_global) -> T_template_global: ...
@t.overload
def template_global(
self, name: str | None = None
) -> t.Callable[[T_template_global], T_template_global]: ...
@setupmethod
def template_global(
self, name: T_template_global | str | None = None
) -> T_template_global | t.Callable[[T_template_global], T_template_global]:
"""Decorate a function to register it as a custom Jinja global. The name
is optional. The decorator may be used without parentheses.
.. code-block:: python
@app.template_global
def double(n):
return 2 * n
The :meth:`add_template_global` method may be used to register a
function later rather than decorating.
:param name: The name to register the global as. If not given, uses the
function's name.
.. versionadded:: 0.10
"""
if callable(name):
self.add_template_global(name)
return name
def decorator(f: T_template_global) -> T_template_global:
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(
self, f: ft.TemplateGlobalCallable, name: str | None = None
) -> None:
"""Register a function to use as a custom Jinja global.
The :meth:`template_global` decorator can be used to register a function
by decorating instead.
:param f: The function to register.
:param name: The name to register the global as. If not given, uses the
function's name.
.. versionadded:: 0.10
"""
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def teardown_appcontext(self, f: T_teardown) -> T_teardown:
"""Registers a function to be called when the app context is popped. The
context is popped at the end of a request, CLI command, or manual ``with``
block.
.. code-block:: python
with app.app_context():
...
When the ``with`` block exits (or ``ctx.pop()`` is called), the
teardown functions are called just before the app context is
made inactive.
When a teardown function was called because of an unhandled
exception it will be passed an error object. If an
:meth:`errorhandler` is registered, it will handle the exception
and the teardown will not receive it.
Teardown functions must avoid raising exceptions. If they
execute code that might fail they must surround that code with a
``try``/``except`` block and log any errors.
The return values of teardown functions are ignored.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def shell_context_processor(
self, f: T_shell_context_processor
) -> T_shell_context_processor:
"""Registers a shell context processor function.
.. versionadded:: 0.11
"""
self.shell_context_processors.append(f)
return f
def _find_error_handler(
self, e: Exception, blueprints: list[str]
) -> ft.ErrorHandlerCallable | None:
"""Return a registered error handler for an exception in this order:
blueprint handler for a specific code, app handler for a specific code,
blueprint handler for an exception class, app handler for an exception
class, or ``None`` if a suitable handler is not found.
"""
exc_class, code = self._get_exc_class_and_code(type(e))
names = (*blueprints, None)
for c in (code, None) if code is not None else (None,):
for name in names:
handler_map = self.error_handler_spec[name][c]
if not handler_map:
continue
for cls in exc_class.__mro__:
handler = handler_map.get(cls)
if handler is not None:
return handler
return None
def trap_http_exception(self, e: Exception) -> bool:
"""Checks if an HTTP exception should be trapped or not. By default
this will return ``False`` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
This is called for all HTTP exceptions raised by a view function.
If it returns ``True`` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionchanged:: 1.0
Bad request errors are not trapped by default in debug mode.
.. versionadded:: 0.8
"""
if self.config["TRAP_HTTP_EXCEPTIONS"]:
return True
trap_bad_request = self.config["TRAP_BAD_REQUEST_ERRORS"]
# if unset, trap key errors in debug mode
if (
trap_bad_request is None
and self.debug
and isinstance(e, BadRequestKeyError)
):
return True
if trap_bad_request:
return isinstance(e, BadRequest)
return False
def should_ignore_error(self, error: BaseException | None) -> bool:
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns ``True`` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
def redirect(self, location: str, code: int = 302) -> BaseResponse:
"""Create a redirect response object.
This is called by :func:`flask.redirect`, and can be called
directly as well.
:param location: The URL to redirect to.
:param code: The status code for the redirect.
.. versionadded:: 2.2
Moved from ``flask.redirect``, which calls this method.
"""
return _wz_redirect(
location,
code=code,
Response=self.response_class, # type: ignore[arg-type]
)
def inject_url_defaults(self, endpoint: str, values: dict[str, t.Any]) -> None:
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
names: t.Iterable[str | None] = (None,)
# url_for may be called outside a request context, parse the
# passed endpoint instead of using request.blueprints.
if "." in endpoint:
names = chain(
names, reversed(_split_blueprint_path(endpoint.rpartition(".")[0]))
)
for name in names:
if name in self.url_default_functions:
for func in self.url_default_functions[name]:
func(endpoint, values)
def handle_url_build_error(
self, error: BuildError, endpoint: str, values: dict[str, t.Any]
) -> str:
"""Called by :meth:`.url_for` if a
:exc:`~werkzeug.routing.BuildError` was raised. If this returns
a value, it will be returned by ``url_for``, otherwise the error
will be re-raised.
Each function in :attr:`url_build_error_handlers` is called with
``error``, ``endpoint`` and ``values``. If a function returns
``None`` or raises a ``BuildError``, it is skipped. Otherwise,
its return value is returned by ``url_for``.
:param error: The active ``BuildError`` being handled.
:param endpoint: The endpoint being built.
:param values: The keyword arguments passed to ``url_for``.
"""
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
except BuildError as e:
# make error available outside except block
error = e
else:
if rv is not None:
return rv
# Re-raise if called with an active exception, otherwise raise
# the passed in exception.
if error is sys.exc_info()[1]:
raise
raise error
| App |
python | getsentry__sentry | src/sentry/api/serializers/models/organization.py | {
"start": 31300,
"end": 33720
} | class ____(DetailedOrganizationSerializer):
def get_attrs(
self, item_list: Sequence[Organization], user: User | RpcUser | AnonymousUser, **kwargs: Any
) -> MutableMapping[Organization, MutableMapping[str, Any]]:
return super().get_attrs(item_list, user)
def _project_list(self, organization: Organization, access: Access) -> list[Project]:
project_list = list(
Project.objects.filter(organization=organization, status=ObjectStatus.ACTIVE).order_by(
"slug"
)
)
for project in project_list:
project.set_cached_field_value("organization", organization)
return project_list
def _team_list(self, organization: Organization, access: Access) -> list[Team]:
team_list = list(
Team.objects.filter(organization=organization, status=TeamStatus.ACTIVE).order_by(
"slug"
)
)
for team in team_list:
team.set_cached_field_value("organization", organization)
return team_list
def serialize( # type: ignore[override]
self,
obj: Organization,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
access: Access,
**kwargs: Any,
) -> DetailedOrganizationSerializerWithProjectsAndTeamsResponse:
from sentry.api.serializers.models.project import (
LATEST_DEPLOYS_KEY,
ProjectSummarySerializer,
)
from sentry.api.serializers.models.team import TeamSerializer
context = cast(
DetailedOrganizationSerializerWithProjectsAndTeamsResponse,
super().serialize(obj, attrs, user, access, **kwargs),
)
team_list = self._team_list(obj, access)
project_list = self._project_list(obj, access)
context["teams"] = serialize(team_list, user, TeamSerializer(access=access))
collapse_projects: set[str] = set()
if killswitch_matches_context(
"api.organization.disable-last-deploys",
{
"organization_id": obj.id,
},
):
collapse_projects = {LATEST_DEPLOYS_KEY}
context["projects"] = serialize(
project_list, user, ProjectSummarySerializer(access=access, collapse=collapse_projects)
)
return context
| DetailedOrganizationSerializerWithProjectsAndTeams |
python | google__jax | jax/_src/error_check.py | {
"start": 1215,
"end": 1695
} | class ____(ValueError):
"""Exception raised for runtime errors detected within JAX computations."""
#: The default error code for no error.
#:
#: This value is chosen because we can use `jnp.min()` to obtain the
#: first error when performing reductions.
_NO_ERROR = np.iinfo(np.uint32).max
_error_list_lock = threading.RLock()
# (error_message, traceback) pairs. Traceback is `str` when imported from AOT.
_error_list: list[tuple[str, TracebackType | str]] = []
| JaxValueError |
python | ansible__ansible | test/units/config/test_manager.py | {
"start": 1154,
"end": 7586
} | class ____:
def __eq__(self, other): ...
@pytest.mark.parametrize("value, value_type, expected_value", [
(None, 'str', None), # all types share a common short-circuit for None
(Unhashable(), 'bool', False),
('y', 'bool', True),
('yes', 'bool', True),
('on', 'bool', True),
('1', 'bool', True),
('true', 'bool', True),
('t', 'bool', True),
(1, 'bool', True),
(1.0, 'bool', True),
(True, 'bool', True),
('n', 'bool', False),
('no', 'bool', False),
('off', 'bool', False),
('0', 'bool', False),
('false', 'bool', False),
('f', 'bool', False),
(0, 'bool', False),
(0.0, 'bool', False),
(False, 'bool', False),
(False, 'boolean', False), # alias
('10', 'int', 10),
(20, 'int', 20),
(True, 'int', 1),
(False, 'int', 0),
(42.0, 'int', 42),
(-42.0, 'int', -42),
(-42.0, 'integer', -42), # alias
('2', 'float', 2.0),
('0.10', 'float', 0.10),
(0.2, 'float', 0.2),
('a,b', 'list', ['a', 'b']),
(['a', 1], 'list', ['a', 1]),
(('a', 1), 'list', ['a', 1]),
('None', 'none', None),
('/p1', 'pathspec', ['/p1']),
('/p1:/p2', 'pathspec', ['/p1', '/p2']),
('/p1:/p2', 'pathspec', ['/p1', '/p2']),
(['/p1', '/p2'], 'pathspec', ['/p1', '/p2']),
('/tmp/test.yml,/home/test2.yml', 'pathlist', ['/tmp/test.yml', '/home/test2.yml']),
('a', 'str', 'a'),
('Café', 'str', 'Café'),
('', 'str', ''),
('29', 'str', '29'),
('13.37', 'str', '13.37'),
('123j', 'str', '123j'),
('0x123', 'str', '0x123'),
('true', 'str', 'true'),
('True', 'str', 'True'),
(0, 'str', '0'),
(29, 'str', '29'),
(13.37, 'str', '13.37'),
(123j, 'str', '123j'),
(0x123, 'str', '291'),
(True, 'str', 'True'),
(True, 'string', 'True'), # alias
(CustomMapping(dict(a=1)), 'dict', dict(a=1)),
(dict(a=1), 'dict', dict(a=1)),
(dict(a=1), 'dictionary', dict(a=1)), # alias
(123, 'bogustype', 123), # unknown non-string types pass through unmodified
])
def test_ensure_type(value: object, value_type: str, expected_value: object) -> None:
value = ensure_type(value, value_type)
assert isinstance(value, type(expected_value))
assert value == expected_value
@pytest.mark.parametrize("value, value_type, expected_msg_substring", [
('a', 'int', "Invalid value provided for 'int': 'a'"),
('NaN', 'int', "Invalid value provided for 'int': 'NaN'"),
(b'10', 'int', "Invalid value provided for 'int': b'10'"),
(1.1, 'int', "Invalid value provided for 'int': 1.1"),
('1.1', 'int', "Invalid value provided for 'int': '1.1'"),
(-1.1, 'int', "Invalid value provided for 'int': -1.1"),
('a', 'float', "Invalid value provided for 'float': 'a'"),
(b'a', 'float', "Invalid value provided for 'float': b'a'"),
(1, 'list', "Invalid value provided for 'list': 1"),
(b'a', 'list', "Invalid value provided for 'list': b'a'"),
(1, 'none', "Invalid value provided for 'none': 1"),
(1, 'path', "Invalid value provided for 'path': 1"),
(1, 'tmp', "Invalid value provided for 'tmp': 1"),
(1, 'pathspec', "Invalid value provided for 'pathspec': 1"),
(b'a', 'pathspec', "Invalid value provided for 'pathspec': b'a'"),
([b'a'], 'pathspec', "Invalid value provided for 'pathspec': [b'a']"),
(1, 'pathlist', "Invalid value provided for 'pathlist': 1"),
(b'a', 'pathlist', "Invalid value provided for 'pathlist': b'a'"),
([b'a'], 'pathlist', "Invalid value provided for 'pathlist': [b'a']"),
(1, 'dict', "Invalid value provided for 'dict': 1"),
([1], 'str', "Invalid value provided for 'str': [1]"),
])
def test_ensure_type_failure(value: object, value_type: str, expected_msg_substring: str) -> None:
with pytest.raises(ValueError, match=re.escape(expected_msg_substring)):
ensure_type(value, value_type)
@pytest.mark.parametrize("value, expected_value, value_type, origin, origin_ftype", [
('"value"', '"value"', 'str', 'env: ENVVAR', None),
('"value"', '"value"', 'str', os.path.join(curdir, 'test.yml'), 'yaml'),
('"value"', 'value', 'str', cfg_file, 'ini'),
('\'value\'', 'value', 'str', cfg_file, 'ini'),
('\'\'value\'\'', '\'value\'', 'str', cfg_file, 'ini'),
('""value""', '"value"', 'str', cfg_file, 'ini'),
('"x"', 'x', 'bogustype', cfg_file, 'ini'), # unknown string types are unquoted
])
def test_ensure_type_unquoting(value: str, expected_value: str, value_type: str, origin: str | None, origin_ftype: str | None) -> None:
actual_value = ensure_type(value, value_type, origin, origin_ftype)
assert actual_value == expected_value
test_origin = Origin(description='abc')
@pytest.mark.parametrize("value, type", (
(test_origin.tag('a,b,c'), 'list'),
(test_origin.tag(('a', 'b')), 'list'),
(test_origin.tag('1'), 'int'),
(test_origin.tag('plainstr'), 'str'),
))
def test_ensure_type_tag_propagation(value: object, type: str) -> None:
result = ensure_type(value, type)
if value == result:
assert value is result # if the value wasn't transformed, it should be the same instance
if isinstance(value, str) and isinstance(result, list):
# split a str list; each value should be tagged
assert all(Origin.is_tagged_on(v) for v in result)
# the result should always be tagged
assert Origin.is_tagged_on(result)
@pytest.mark.parametrize("value, type", (
(test_origin.tag('plainstr'), 'tmp'),
))
def test_ensure_type_no_tag_propagation(value: object, type: str) -> None:
result = ensure_type(value, type, origin='/tmp')
assert not AnsibleTagHelper.tags(result)
@pytest.mark.parametrize("value, type", (
('blah1', 'temppath'),
('blah2', 'tmp'),
('blah3', 'tmppath'),
))
def test_ensure_type_temppath(value: object, type: str, tmp_path: pathlib.Path) -> None:
path = ensure_type(value, type, origin=str(tmp_path))
assert os.path.isdir(path)
assert value in path
assert os.listdir(path) == []
def test_ensure_type_vaulted(_vault_secrets_context: VaultTestHelper) -> None:
raw = "secretvalue"
origin = Origin(description='test')
es = _vault_secrets_context.make_encrypted_string(raw)
es = origin.tag(es)
result = ensure_type(es, 'str')
assert isinstance(result, str)
assert result == raw
assert VaultedValue.is_tagged_on(result)
assert Origin.get_tag(result) is origin
| Unhashable |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_dataspec.py | {
"start": 11849,
"end": 17225
} | class ____:
def test_field(self) -> None:
class Foo(HasProps):
x = bcpd.NumberSpec("xfield")
f = Foo()
assert f.x == "xfield"
assert Foo.__dict__["x"].get_value(f) == Field("xfield")
f.x = "my_x"
assert f.x == "my_x"
assert Foo.__dict__["x"].get_value(f) == Field("my_x")
def test_value(self) -> None:
class Foo(HasProps):
x = bcpd.NumberSpec("xfield")
f = Foo()
assert f.x == "xfield"
f.x = 12
assert f.x == 12
assert Foo.__dict__["x"].get_value(f) == Value(12)
f.x = 15
assert f.x == 15
assert Foo.__dict__["x"].get_value(f) == Value(15)
f.x = dict(value=32)
assert Foo.__dict__["x"].get_value(f) == Value(32)
def tests_accepts_timedelta(self):
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=BokehDeprecationWarning)
class Foo(HasProps):
dt = bcpd.NumberSpec("dt", accept_datetime=True)
ndt = bcpd.NumberSpec("ndt", accept_datetime=False)
f = Foo()
# FYI Numpy erroneously raises an annoying warning about elementwise
# comparison below because a timedelta is compared to a float.
# https://github.com/numpy/numpy/issues/10095
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=DeprecationWarning)
f.dt = datetime.timedelta(3, 54)
assert f.dt == 259254000.0
# counts as number.Real out of the box
f.dt = np.timedelta64(3000, "ms")
assert f.dt == np.timedelta64(3000, "ms")
f.ndt = datetime.timedelta(3, 54)
assert f.ndt == 259254000.0
# counts as number.Real out of the box
f.ndt = np.timedelta64(3000, "ms")
assert f.ndt == np.timedelta64(3000, "ms")
def tests_accepts_timedelta_with_pandas(self):
pd = pytest.importorskip("pandas")
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=BokehDeprecationWarning)
class Foo(HasProps):
dt = bcpd.NumberSpec("dt", accept_datetime=True)
ndt = bcpd.NumberSpec("ndt", accept_datetime=False)
f = Foo()
# counts as number.Real out of the box
f.dt = pd.Timedelta("3000ms")
assert f.dt == 3000.0
f.ndt = pd.Timedelta("3000ms")
assert f.ndt == 3000.0
def test_accepts_datetime(self) -> None:
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=BokehDeprecationWarning)
class Foo(HasProps):
dt = bcpd.NumberSpec("dt", accept_datetime=True)
ndt = bcpd.NumberSpec("ndt", accept_datetime=False)
f = Foo()
f.dt = datetime.datetime(2016, 5, 11)
assert f.dt == 1462924800000.0
f.dt = np.datetime64("2016-05-11")
assert f.dt == 1462924800000.0
f.dt = datetime.date(2016, 5, 11)
assert f.dt == 1462924800000.0
with pytest.raises(ValueError):
f.ndt = datetime.datetime(2016, 5, 11)
with pytest.raises(ValueError):
f.ndt = datetime.date(2016, 5, 11)
with pytest.raises(ValueError):
f.ndt = np.datetime64("2016-05-11")
def test_default(self) -> None:
class Foo(HasProps):
y = bcpd.NumberSpec(default=12)
f = Foo()
assert f.y == 12
assert Foo.__dict__["y"].get_value(f) == Value(12)
f.y = "y1"
assert f.y == "y1"
# Once we set a concrete value, the default is ignored, because it is unused
f.y = 32
assert f.y == 32
assert Foo.__dict__["y"].get_value(f) == Value(32)
def test_multiple_instances(self) -> None:
class Foo(HasProps):
x = bcpd.NumberSpec("xfield")
a = Foo()
b = Foo()
a.x = 13
b.x = 14
assert a.x == 13
assert b.x == 14
assert Foo.__dict__["x"].get_value(a) == Value(13)
assert Foo.__dict__["x"].get_value(b) == Value(14)
b.x = {"field": "x3"}
assert Foo.__dict__["x"].get_value(a) == Value(13)
assert Foo.__dict__["x"].get_value(b) == Field("x3")
def test_set_from_json_keeps_mode(self) -> None:
class Foo(HasProps):
x = bcpd.NumberSpec(default=-1)
a = Foo()
assert a.x == -1
# set as a value
a.x = 14
assert a.x == 14
# set_from_json keeps the previous dict-ness or lack thereof
a.set_from_json('x', dict(value=16))
assert a.x == 16
# but regular assignment overwrites the previous dict-ness
a.x = dict(value=17)
assert a.x == value(17)
# set as a field
a.x = "bar"
assert a.x == "bar"
# set_from_json keeps the previous dict-ness or lack thereof
a.set_from_json('x', dict(field="foo"))
assert a.x == "foo"
# but regular assignment overwrites the previous dict-ness
a.x = dict(field="baz")
assert a.x == field("baz")
| Test_NumberSpec |
python | getsentry__sentry | src/sentry/api/endpoints/project_trace_item_details.py | {
"start": 8459,
"end": 8773
} | class ____(serializers.Serializer):
trace_id = serializers.UUIDField(format="hex", required=True)
item_type = serializers.ChoiceField([e.value for e in SupportedTraceItemType], required=True)
referrer = serializers.CharField(required=False)
@region_silo_endpoint
| ProjectTraceItemDetailsEndpointSerializer |
python | run-llama__llama_index | llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/types.py | {
"start": 1126,
"end": 2087
} | class ____(BaseModel):
modalities: List[str] = Field(default=["text", "audio"])
instructions: str = Field(default="You are a helpful assistant.")
voice: str = Field(default="sage")
input_audio_format: str = Field(default="pcm16")
output_audio_format: str = Field(default="pcm16")
input_audio_transcription: Dict[Literal["model"], str] = Field(
max_length=1, default={"model": "whisper-1"}
)
turn_detection: ConversationVAD = Field(default_factory=ConversationVAD)
tools: List[ConversationTool] = Field(
default_factory=list,
)
tool_choice: Literal["auto", "none", "required"] = Field(default="auto")
temperature: float = Field(default=0.8, ge=0.6)
max_response_output_tokens: Union[Literal["inf"], int] = Field(
default="inf",
ge=1,
le=4096,
)
speed: float = Field(default=1.1)
tracing: Union[Literal["auto"], Dict] = Field(default="auto")
| ConversationSession |
python | pytorch__pytorch | torch/_dynamo/mutation_guard.py | {
"start": 899,
"end": 2291
} | class ____:
db: ExactWeakKeyDictionary = ExactWeakKeyDictionary()
def __init__(self) -> None:
self.mutation_count: int = 0
self.watchers: list[weakref.ReferenceType[Any]] = []
def on_mutation(self, name: str) -> None:
self.mutation_count += 1
tmp = self.watchers
self.watchers = []
for ref in tmp:
guarded = ref()
if guarded is not None:
guarded.invalidate(ref)
def track(self, guarded_code: Any) -> None:
self.watchers.append(weakref.ref(guarded_code))
def watch(obj: Any, guarded_code: Any) -> None:
"""invalidate guarded_code when obj is mutated"""
ensure_patched(type(obj))
if obj not in MutationTracker.db:
MutationTracker.db[obj] = MutationTracker()
tracker = MutationTracker.db[obj]
tracker.track(guarded_code)
def ensure_patched(cls: Any) -> None:
if getattr(cls, "___needs_mutation_patch", True):
cls.___needs_mutation_patch = False
original_setattr = cls.__setattr__
@functools.wraps(original_setattr)
def custom_setattr(self: Any, key: str, value: Any) -> None:
try:
MutationTracker.db[self].on_mutation(key)
except KeyError:
pass
return original_setattr(self, key, value)
cls.__setattr__ = custom_setattr
| MutationTracker |
python | tensorflow__tensorflow | tensorflow/python/ops/accumulate_n_benchmark.py | {
"start": 1281,
"end": 5283
} | class ____(test.Benchmark):
def _AccumulateNTemplate(self, inputs, init, shape, validate_shape):
var = gen_state_ops.temporary_variable(
shape=shape, dtype=inputs[0].dtype.base_dtype)
ref = state_ops.assign(var, init, validate_shape=validate_shape)
update_ops = [
state_ops.assign_add(
ref, tensor, use_locking=True).op for tensor in inputs
]
with ops.control_dependencies(update_ops):
return gen_state_ops.destroy_temporary_variable(ref, var_name=var.op.name)
def _AccumulateNInitializedWithFirst(self, inputs):
return self._AccumulateNTemplate(
inputs,
init=array_ops.zeros_like(inputs[0]),
shape=inputs[0].get_shape(),
validate_shape=True)
def _AccumulateNInitializedWithMerge(self, inputs):
return self._AccumulateNTemplate(
inputs,
init=array_ops.zeros_like(gen_control_flow_ops.merge(inputs)[0]),
shape=tensor_shape.TensorShape([0]),
validate_shape=False)
def _AccumulateNInitializedWithShape(self, inputs):
return self._AccumulateNTemplate(
inputs,
init=array_ops.zeros(
shape=inputs[0].get_shape(), dtype=inputs[0].dtype.base_dtype),
shape=inputs[0].get_shape(),
validate_shape=True)
def _GenerateUnorderedInputs(self, size, n):
inputs = [random_ops.random_uniform(shape=[size]) for _ in range(n)]
random.shuffle(inputs)
return inputs
def _GenerateReplicatedInputs(self, size, n):
return n * self._GenerateUnorderedInputs(size, 1)
def _GenerateOrderedInputs(self, size, n):
inputs = self._GenerateUnorderedInputs(size, 1)
queue = data_flow_ops.FIFOQueue(
capacity=1, dtypes=[inputs[0].dtype], shapes=[inputs[0].get_shape()])
for _ in range(n - 1):
op = queue.enqueue(inputs[-1])
with ops.control_dependencies([op]):
inputs.append(math_ops.tanh(1.0 + queue.dequeue()))
return inputs
def _GenerateReversedInputs(self, size, n):
inputs = self._GenerateOrderedInputs(size, n)
inputs.reverse()
return inputs
def _SetupAndRunBenchmark(self, graph, inputs, repeats, format_args):
with graph.as_default():
add_n = math_ops.add_n(inputs)
acc_n_first = self._AccumulateNInitializedWithFirst(inputs)
acc_n_merge = self._AccumulateNInitializedWithMerge(inputs)
acc_n_shape = self._AccumulateNInitializedWithShape(inputs)
test_ops = (("AddN", add_n.op),
("AccNFirst", acc_n_first.op),
("AccNMerge", acc_n_merge.op),
("AccNShape", acc_n_shape.op))
with session.Session(graph=graph):
for tag, op in test_ops:
for _ in range(100):
op.run() # Run for warm up.
start = time.time()
for _ in range(repeats):
op.run()
duration = time.time() - start
args = format_args + (tag, duration)
print(self._template.format(*args))
def _RunBenchmark(self, tag, input_fn, sizes, ninputs, repeats):
for size in sizes:
for ninput in ninputs:
graph = ops.Graph()
with graph.as_default():
inputs = input_fn(size, ninput)
format_args = (tag, size, ninput, repeats)
self._SetupAndRunBenchmark(graph, inputs, repeats, format_args)
def benchmarkAccumulateN(self):
self._template = "{:<15}" * 6
args = {
"sizes": (128, 128**2),
"ninputs": (1, 10, 100, 300),
"repeats": 100
}
benchmarks = (("Replicated", self._GenerateReplicatedInputs),
("Unordered", self._GenerateUnorderedInputs),
("Ordered", self._GenerateOrderedInputs),
("Reversed", self._GenerateReversedInputs))
print(self._template.format("", "Size", "#Inputs", "#Repeat", "Method",
"Duration"))
print("-" * 90)
for benchmark in benchmarks:
self._RunBenchmark(*benchmark, **args)
if __name__ == "__main__":
test.main()
| AccumulateNBenchmark |
python | Textualize__textual | docs/examples/widgets/input_types.py | {
"start": 79,
"end": 325
} | class ____(App):
def compose(self) -> ComposeResult:
yield Input(placeholder="An integer", type="integer")
yield Input(placeholder="A number", type="number")
if __name__ == "__main__":
app = InputApp()
app.run()
| InputApp |
python | google__pytype | pytype/file_utils_test.py | {
"start": 162,
"end": 3143
} | class ____(unittest.TestCase):
"""Test file and path utilities."""
def test_replace_extension(self):
self.assertEqual("foo.bar", file_utils.replace_extension("foo.txt", "bar"))
self.assertEqual("foo.bar", file_utils.replace_extension("foo.txt", ".bar"))
self.assertEqual(
"a.b.c.bar", file_utils.replace_extension("a.b.c.txt", ".bar")
)
self.assertEqual(
file_utils.replace_separator("a.b/c.bar"),
file_utils.replace_extension(
file_utils.replace_separator("a.b/c.d"), ".bar"
),
)
self.assertEqual("xyz.bar", file_utils.replace_extension("xyz", "bar"))
def test_tempdir(self):
with test_utils.Tempdir() as d:
filename1 = d.create_file("foo.txt")
filename2 = d.create_file("bar.txt", "\tdata2")
filename3 = d.create_file("baz.txt", "data3")
filename4 = d.create_file(
file_utils.replace_separator("d1/d2/qqsv.txt"), " data4.1\n data4.2"
)
filename5 = d.create_directory("directory")
self.assertEqual(filename1, d["foo.txt"])
self.assertEqual(filename2, d["bar.txt"])
self.assertEqual(filename3, d["baz.txt"])
self.assertEqual(
filename4, d[file_utils.replace_separator("d1/d2/qqsv.txt")]
)
self.assertTrue(path_utils.isdir(d.path))
self.assertTrue(path_utils.isfile(filename1))
self.assertTrue(path_utils.isfile(filename2))
self.assertTrue(path_utils.isfile(filename3))
self.assertTrue(path_utils.isfile(filename4))
self.assertTrue(path_utils.isdir(path_utils.join(d.path, "d1")))
self.assertTrue(path_utils.isdir(path_utils.join(d.path, "d1", "d2")))
self.assertTrue(path_utils.isdir(filename5))
self.assertEqual(
filename4, path_utils.join(d.path, "d1", "d2", "qqsv.txt")
)
for filename, contents in [
(filename1, ""),
(filename2, "data2"), # dedented
(filename3, "data3"),
(filename4, "data4.1\ndata4.2"), # dedented
]:
with open(filename) as fi:
self.assertEqual(fi.read(), contents)
self.assertFalse(path_utils.isdir(d.path))
self.assertFalse(path_utils.isfile(filename1))
self.assertFalse(path_utils.isfile(filename2))
self.assertFalse(path_utils.isfile(filename3))
self.assertFalse(path_utils.isdir(path_utils.join(d.path, "d1")))
self.assertFalse(path_utils.isdir(path_utils.join(d.path, "d1", "d2")))
self.assertFalse(path_utils.isdir(filename5))
def test_cd(self):
with test_utils.Tempdir() as d:
d.create_directory("foo")
d1 = path_utils.getcwd()
with file_utils.cd(d.path):
self.assertTrue(path_utils.isdir("foo"))
d2 = path_utils.getcwd()
self.assertEqual(d1, d2)
def test_cd_noop(self):
d = path_utils.getcwd()
with file_utils.cd(None):
self.assertEqual(path_utils.getcwd(), d)
with file_utils.cd(""):
self.assertEqual(path_utils.getcwd(), d)
| FileUtilsTest |
python | cython__cython | docs/examples/userguide/buffer/matrix.py | {
"start": 94,
"end": 355
} | class ____:
ncols: cython.uint
v: vector[cython.float]
def __cinit__(self, ncols: cython.uint):
self.ncols = ncols
def add_row(self):
"""Adds a row, initially zero-filled."""
self.v.resize(self.v.size() + self.ncols)
| Matrix |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_dispatch_test.py | {
"start": 2293,
"end": 55022
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
def assertSameShape(self, x, y):
"""Checks that x and y have the same shape (including ragged shapes)."""
if ragged_tensor.is_ragged(x):
self.assertTrue(ragged_tensor.is_ragged(y))
self.assertEqual(x.ragged_rank, y.ragged_rank)
for (x_splits, y_splits) in zip(x.nested_row_splits, y.nested_row_splits):
self.assertAllEqual(x_splits, y_splits)
self.assertAllEqual(
array_ops.shape(x.flat_values), array_ops.shape(y.flat_values))
else:
self.assertIsInstance(y, tensor.Tensor)
self.assertAllEqual(array_ops.shape(x), array_ops.shape(y))
@parameterized.parameters(
#=========================================================================
# Test different input shapes.
#=========================================================================
[
# 0-dimensional input
{'x': 12},
# 1-dimensional input
{'x': [1, -2, 3]},
# 2-dimensional input
{'x': [[-2, 3], [-3, 4]]},
{'x': ragged_factory_ops.constant_value(
[[-2, 3], [-3]], ragged_rank=1)},
# 3-dimensional inputs
{'x': [[[-2, 3], [3, 4]], [[7, 6], [5, 4]]]},
{'x': ragged_factory_ops.constant_value(
[[[-2, 3], [3, 4]], [[7, 6]]],
ragged_rank=1)},
{'x': ragged_factory_ops.constant_value(
[[[-2, 3, 4], []], [[7, 6]], []],
ragged_rank=2)},
] +
#=========================================================================
# Test each unary op.
#=========================================================================
[{'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]), 'op': op}
for op in test_ops.UNARY_FLOAT_OPS] +
[{'x': ragged_factory_ops.constant_value([[True, False], [True]]),
'op': op}
for op in test_ops.UNARY_BOOL_OPS] +
[{'x': ragged_factory_ops.constant_value([[18, 512], [12412]], np.int32),
'op': op}
for op in test_ops.UNARY_INT_OPS] +
[{'x': ragged_factory_ops.constant_value([['abcd', 'efgh'],
['aabbccdd']]),
'op': op}
for op in test_ops.UNARY_STRING_OPS] +
[
{'op': clip_ops.clip_by_value,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'clip_value_min': 0.1, 'clip_value_max': 4.0},
{'op': math_ops.cast,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'dtype': dtypes.int32},
{'op': math_ops.saturate_cast,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'dtype': dtypes.int32},
{'op': string_ops.string_to_hash_bucket,
'x': ragged_factory_ops.constant_value(
[['abcd', 'efgh'], ['aabbccdd']]),
'num_buckets': 1000},
{'op': string_ops.string_to_hash_bucket_v1,
'x': ragged_factory_ops.constant_value(
[['abcd', 'efgh'], ['aabbccdd']]),
'num_buckets': 1000},
{'op': string_ops.string_to_hash_bucket_fast,
'x': ragged_factory_ops.constant_value(
[['abcd', 'efgh'], ['aabbccdd']]),
'num_buckets': 1000},
{'op': string_ops.string_to_hash_bucket_strong,
'x': ragged_factory_ops.constant_value(
[['abcd', 'efgh'], ['aabbccdd']]),
'num_buckets': 1000,
'key': [1231, 12512]},
{'op': string_ops.string_to_number,
'x': ragged_factory_ops.constant_value([['-2.0', '3.0'], ['-3.0']])},
{'op': string_ops.regex_full_match,
'x': ragged_factory_ops.constant_value([['hello', '123'], ['1+1']]),
'pattern': r'\w+'},
{'op': string_ops.regex_replace,
'x': ragged_factory_ops.constant_value([['hello', '123'], ['1+1']]),
'pattern': r'\d',
'rewrite': '#'},
{'op': string_ops.substr,
'x': ragged_factory_ops.constant_value([['hello', '123'], ['1+1']]),
'pos': 2, 'len': 3},
{'op': string_ops.substr_deprecated,
'x': ragged_factory_ops.constant_value([['hello', '123'], ['1+1']]),
'pos': 2, 'len': 3},
{'op': string_ops.substr_v2,
'x': ragged_factory_ops.constant_value([['hello', '123'], ['1+1']]),
'pos': 2, 'len': 3},
{'op': array_ops.check_numerics,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'message': 'check-numerics'},
{'op': nn_ops.dropout,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'rate': 0.5,
'seed': 1},
{'op': nn_ops.stateless_dropout,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'rate': 0.5,
'seed': [1, 0],
'rng_alg': 'auto_select'},
{'op': math_ops.nextafter,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'x2': 0},
{'op': math_ops.to_bfloat16,
'x': ragged_factory_ops.constant_value(
[[2.0, 3.0], [3.0]], dtype=dtypes.float32),
'expected_dtype': dtypes.bfloat16},
{'op': math_ops.to_complex128,
'x': ragged_factory_ops.constant_value(
[[2.0, 3.0], [3.0]], dtype=dtypes.float32),
'expected_dtype': dtypes.complex128},
{'op': math_ops.to_complex64,
'x': ragged_factory_ops.constant_value(
[[2.0, 3.0], [3.0]], dtype=dtypes.float32),
'expected_dtype': dtypes.complex64},
{'op': math_ops.to_double,
'x': ragged_factory_ops.constant_value(
[[2.0, 3.0], [3.0]], dtype=dtypes.float32),
'expected_dtype': dtypes.double},
{'op': math_ops.to_float,
'x': ragged_factory_ops.constant_value(
[[2.0, 3.0], [3.0]], dtype=dtypes.int32),
'expected_dtype': dtypes.float32},
{'op': math_ops.to_int32,
'x': ragged_factory_ops.constant_value(
[[2, 3], [3]], dtype=dtypes.int64),
'expected_dtype': dtypes.int32},
{'op': math_ops.to_int64,
'x': ragged_factory_ops.constant_value(
[[2, 3], [3]], dtype=dtypes.int32),
'expected_dtype': dtypes.int64},
{'op': image_ops_impl.convert_image_dtype,
'x': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'dtype': dtypes.float32,
'expected_dtype': dtypes.float32},
{'op': image_ops_impl.adjust_brightness,
'x': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'delta': 0.2},
{'op': image_ops_impl.adjust_gamma,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'gamma': 2,
'gain': 1.2},
{'op': image_ops_impl.stateless_random_brightness,
'x': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'max_delta': 0.2,
'seed': (1, 2)},
{'op': image_ops_impl.random_brightness,
'x': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'max_delta': 0.2,
'seed': 12},
{'op': string_ops.unicode_transcode,
'x': ragged_factory_ops.constant_value(
[['tensor', 'flower'], ['2.0']]),
'input_encoding': 'UTF-8',
'output_encoding': 'UTF-16-BE'},
]
) # pyformat: disable
def testUnaryElementwiseOp(self,
x,
op=math_ops.abs,
expected_dtype=None,
**extra_args):
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x)
random_seed.set_random_seed(1234)
result = op(x, **extra_args)
# Run the wrapped op on the dense values, for comparison.
dense_x = x.flat_values if ragged_tensor.is_ragged(x) else x
random_seed.set_random_seed(1234)
expected_flat_values = array_ops.reshape(op(dense_x, **extra_args), [-1])
# Check that the result has the expected shape.
self.assertSameShape(x, result)
# Check that the result has the expected (flattened) values.
if ragged_tensor.is_ragged(result):
result_flat_values = array_ops.reshape(result.flat_values, [-1])
else:
result_flat_values = array_ops.reshape(result, [-1])
self.assertAllEqual(expected_flat_values, result_flat_values)
if expected_dtype is not None:
self.assertEqual(result.dtype, expected_dtype)
@parameterized.parameters(
[
#=====================================================================
# Without broadcasting -- i.e., shapes match exactly.
#=====================================================================
# Shapes: x:(), y:()
{'x': 12,
'y': 8},
# Shapes: x:(3,), y:(3,)
{'x': [7, 8, 9],
'y': [1, -2, 3]},
# Shapes: x:(2, 2), y:(2, 2)
{'x': [[-2, 3], [-3, -4]],
'y': [[1, 2], [3, 4]]},
# Shapes: x:(2, None), y:(2, None)
{'x': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'y': ragged_factory_ops.constant_value([[5, 6], [7]])},
# Shapes: x:(2, 2, 2), y:(2, 2, 2)
{'x': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
'y': [[[9, 3], [3, 4]], [[5, 2], [7, 6]]]},
# Shapes: x:(2, None, None), y: (2, None, None)
{'x': ragged_factory_ops.constant_value(
[[[1, 2], [3], [4]], [[], [5, 7, 8]]]),
'y': ragged_factory_ops.constant_value(
[[[3, 8], [2], [5]], [[], [1, 9, 8]]])},
# Shapes: x:(2, None, 2), y: (2, None, 2)
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1)},
#=====================================================================
# With broadcasting
#=====================================================================
# Shapes: x:(), y:(3,)
{'x': 12, # Broadcast () -> (3,)
'y': [1, -2, 3]},
# Shapes: x:(1,), y:(3,)
{'x': [12], # Broadcast (1,) -> (3,)
'y': [1, -2, 3]},
# Shapes: x:(), y:(2, 2)
{'x': 12, # Broadcast () -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(1,), y:(2, 2)
{'x': 12, # Broadcast (1,) -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(2, 1), y:(2, 2)
{'x': [[10], [20]], # Broadcast (2, 1) -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(), y:(2, None)
{'x': 10, # Broadcast () -> (2, None)
'y': ragged_factory_ops.constant_value(
[[1, 2], [3]], dtype=np.int32)},
# TODO(edloper): Add tests for more advanced broadcasting, once we add
# support for it.
#=====================================================================
# Keyword Args
#=====================================================================
{'x': ragged_factory_ops.constant_value(
[[[1, 2], [3], [4]], [[], [5, 7, 8]]]),
'y': ragged_factory_ops.constant_value(
[[[3, 8], [2], [5]], [[], [1, 9, 8]]]),
'use_kwargs': {'x': 'x', 'y': 'y'}},
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1),
'use_kwargs': {'x': 'x', 'y': 'y'}},
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1),
'use_kwargs': {'y': 'y'}},
] +
#=========================================================================
# Test each binary op.
#=========================================================================
[{'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'y': ragged_factory_ops.constant_value([[5.0, 1.0], [12.0]]),
'op': op}
for op in test_ops.BINARY_FLOAT_OPS] +
[{'x': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'y': ragged_factory_ops.constant_value([[5, 1], [12]]),
'op': op}
for op in test_ops.BINARY_INT_OPS] +
[{'x': ragged_factory_ops.constant_value([[True, True], [False]]),
'y': ragged_factory_ops.constant_value([[False, True], [False]]),
'op': op}
for op in test_ops.BINARY_BOOL_OPS] +
#=========================================================================
# Test each binary op.
#=========================================================================
[
{'x': 3,
'y': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'op': math_ops.scalar_mul},
{'x': 3,
'y': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'op': math_ops.scalar_mul_v2},
{'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'y': ragged_factory_ops.constant_value([[5.0, 1.0], [12.0]]),
'op': nn_impl.sigmoid_cross_entropy_with_logits_v2,
'use_kwargs': {'x': 'labels', 'y': 'logits'}},
]) # pyformat: disable
def testBinaryElementwiseOp(self, x, y, op=math_ops.add, **extra_args):
use_kwargs = extra_args.pop('use_kwargs', {})
def compute(x, y):
if 'x' in use_kwargs and 'y' in use_kwargs:
extra_args[use_kwargs['x']] = x
extra_args[use_kwargs['y']] = y
return op(**extra_args)
elif 'y' in use_kwargs:
extra_args[use_kwargs['y']] = y
return op(x, **extra_args)
else:
assert 'x' not in use_kwargs, use_kwargs
return op(x, y, **extra_args)
result = compute(x, y)
# Run the wrapped op on the dense values, for comparison.
dense_x = x.flat_values if ragged_tensor.is_ragged(x) else x
dense_y = y.flat_values if ragged_tensor.is_ragged(y) else y
expected_flat_values = array_ops.reshape(compute(dense_x, dense_y), [-1])
# Check that the result has the expected shape.
self.assertSameShape(y, result)
# Check that the result has the expected (flattened) values.
if ragged_tensor.is_ragged(result):
result_flat_values = array_ops.reshape(result.flat_values, [-1])
else:
result_flat_values = array_ops.reshape(result, [-1])
self.assertAllEqual(expected_flat_values, result_flat_values)
@parameterized.parameters(
[
#=====================================================================
# Without broadcasting -- i.e., shapes match exactly.
#=====================================================================
# Shapes: x:(), y:()
{'x': 12,
'y': 8},
# Shapes: x:(3,), y:(3,)
{'x': [7, 8, 9],
'y': [1, -2, 3]},
# Shapes: x:(2, 2), y:(2, 2)
{'x': [[-2, 3], [-3, -4]],
'y': [[1, 2], [3, 4]]},
# Shapes: x:(2, None), y:(2, None)
{'x': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'y': ragged_factory_ops.constant_value([[5, 6], [7]])},
# Shapes: x:(2, 2, 2), y:(2, 2, 2)
{'x': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
'y': [[[9, 3], [3, 4]], [[5, 2], [7, 6]]]},
# Shapes: x:(2, None, None), y: (2, None, None)
{'x': ragged_factory_ops.constant_value(
[[[1, 2], [3], [4]], [[], [5, 7, 8]]]),
'y': ragged_factory_ops.constant_value(
[[[3, 8], [2], [5]], [[], [1, 9, 8]]])},
# Shapes: x:(2, None, 2), y: (2, None, 2)
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1)},
#=====================================================================
# With broadcasting
#=====================================================================
# Shapes: x:(), y:(3,)
{'x': 12, # Broadcast () -> (3,)
'y': [1, -2, 3]},
# Shapes: x:(1,), y:(3,)
{'x': [12], # Broadcast (1,) -> (3,)
'y': [1, -2, 3]},
# Shapes: x:(), y:(2, 2)
{'x': 12, # Broadcast () -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(1,), y:(2, 2)
{'x': 12, # Broadcast (1,) -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(2, 1), y:(2, 2)
{'x': [[10], [20]], # Broadcast (2, 1) -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(), y:(2, None)
{'x': 10, # Broadcast () -> (2, None)
'y': ragged_factory_ops.constant_value(
[[1, 2], [3]], dtype=np.int32)},
#=====================================================================
# Keyword Args
#=====================================================================
{'x': ragged_factory_ops.constant_value(
[[[1, 2], [3], [4]], [[], [5, 7, 8]]]),
'y': ragged_factory_ops.constant_value(
[[[3, 8], [2], [5]], [[], [1, 9, 8]]]),
'use_kwargs': {'x': 'x', 'y': 'y'}},
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1),
'use_kwargs': {'x': 'x', 'y': 'y'}},
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1),
'use_kwargs': {'y': 'y'}},
] +
#=========================================================================
# Test each binary op.
#=========================================================================
[{'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'y': ragged_factory_ops.constant_value([[5.0, 1.0], [12.0]]),
'op': op}
for op in test_ops.BINARY_ASSERT_OPS] +
[{'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'y': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'op': op}
for op in test_ops.BINARY_ASSERT_OPS] +
[{'x': ragged_factory_ops.constant_value([[5, 1], [12]]),
'y': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'op': op}
for op in test_ops.BINARY_ASSERT_OPS] +
[{'x': ragged_factory_ops.constant_value([[True, True], [False]]),
'y': ragged_factory_ops.constant_value([[False, True], [False]]),
'op': op}
for op in (check_ops.assert_equal_v2, check_ops.assert_none_equal_v2)
]) # pyformat: disable
def testBinaryAssertOp(self, x, y, op=check_ops.assert_equal_v2,
**extra_args):
"""Test the binary assert functions for ragged tensors."""
def check_binary_assert_pass(assert_op, x, y):
assert_passed = True
try:
result = assert_op(x, y)
if result is not None: # in graph mode
with ops.control_dependencies([result]):
eval_tensor = array_ops.zeros([])
self.evaluate(eval_tensor)
except (ValueError, errors.InvalidArgumentError):
assert_passed = False
return assert_passed
op_assert_pass = check_binary_assert_pass(op, x, y)
dense_x = x.flat_values if ragged_tensor.is_ragged(x) else x
dense_y = y.flat_values if ragged_tensor.is_ragged(y) else y
# Run the wrapped op on the converted tensor values, for comparison.
expected_assert_pass = check_binary_assert_pass(op, dense_x, dense_y)
self.assertEqual(op_assert_pass, expected_assert_pass)
@parameterized.parameters(
[
{'inputs': (12, 8, 3)},
{'inputs': ([1, 2, 3], [7, 8, 9], [3, 6, 9])},
{'inputs': ([[1, 2]], [[3, 4]], [[5, 6]])},
{'inputs': (ragged_factory_ops.constant_value([[1, 3], [-3]]),
ragged_factory_ops.constant_value([[4, 7], [88]]),
ragged_factory_ops.constant_value([[2, 9], [12]]))},
{'inputs': (ragged_factory_ops.constant_value(
[[[1, 3], [-3]], [[1]]]),
ragged_factory_ops.constant_value(
[[[4, 7], [88]], [[2]]]),
ragged_factory_ops.constant_value(
[[[2, 9], [12]], [[8]]]))},
{'inputs': (
ragged_factory_ops.constant_value([[[1, 3], [3, 4]], [[1, 5]]],
ragged_rank=1),
ragged_factory_ops.constant_value([[[4, 7], [1, 2]], [[2, 2]]],
ragged_rank=1),
ragged_factory_ops.constant_value([[[2, 9], [5, 2]], [[8, 0]]],
ragged_rank=1))},
{'inputs': (
ragged_factory_ops.constant_value([[[1, 3], [-3]], [[1]]]),
ragged_factory_ops.constant_value([[[4, 7], [88]], [[2]]]),
ragged_factory_ops.constant_value([[[2, 9], [12]], [[8]]])),
'use_kwargs': True},
] + [
{'op': math_ops.add_n,
'inputs': (ragged_factory_ops.constant_value([[1, 3], [-3]]),
ragged_factory_ops.constant_value([[4, 7], [88]]),
ragged_factory_ops.constant_value([[2, 9], [12]]))},
{'op': string_ops.string_join,
'inputs': (
ragged_factory_ops.constant_value([['a', 'b'], ['c']]),
ragged_factory_ops.constant_value([['foo', 'bar'], ['baz']]),
ragged_factory_ops.constant_value([['2', '9'], ['12']]))},
]) # pyformat: disable
def testListValuedElementwiseOp(self,
inputs,
op=math_ops.add_n,
**extra_args):
use_kwargs = extra_args.pop('use_kwargs', False)
if use_kwargs:
result = op(inputs=inputs, **extra_args)
else:
result = op(inputs, **extra_args)
# Run the wrapped op on the dense values, for comparison.
dense_inputs = [
x.flat_values if ragged_tensor.is_ragged(x) else x for x in inputs
]
expected_flat_values = array_ops.reshape(
op(dense_inputs, **extra_args), [-1])
# Check that the result has the expected shape.
self.assertSameShape(inputs[0], result)
# Check that the result has the expected (flattened) values.
if ragged_tensor.is_ragged(result):
result_flat_values = array_ops.reshape(result.flat_values, [-1])
else:
result_flat_values = array_ops.reshape(result, [-1])
self.assertAllEqual(expected_flat_values, result_flat_values)
def testAllElementwiseOpsAreIncludedInRaggedTensorTestOps(self):
other_tested_ops = [
# Elementwise ops that have explicit/bespoke test cases in this file.
string_ops.string_to_hash_bucket,
string_ops.string_to_hash_bucket_v1,
string_ops.string_to_hash_bucket_fast,
string_ops.string_to_hash_bucket_strong,
string_ops.string_to_number,
string_ops.regex_full_match,
string_ops.regex_replace,
string_ops.substr,
string_ops.substr_v2,
string_ops.substr_deprecated,
string_ops.unicode_transcode,
clip_ops.clip_by_value,
array_ops.check_numerics,
math_ops.cast,
math_ops.saturate_cast,
math_ops.nextafter,
math_ops.tensor_equals,
math_ops.tensor_not_equals,
math_ops.to_bfloat16,
math_ops.to_complex128,
math_ops.to_complex64,
math_ops.to_double,
math_ops.to_float,
math_ops.to_int32,
math_ops.to_int64,
math_ops.scalar_mul,
math_ops.scalar_mul_v2,
image_ops_impl.adjust_brightness,
image_ops_impl.adjust_gamma,
image_ops_impl.stateless_random_brightness,
image_ops_impl.random_brightness,
image_ops_impl.convert_image_dtype,
nn_impl.sigmoid_cross_entropy_with_logits_v2,
]
untested_ops = (
set(dispatch.unary_elementwise_apis() +
dispatch.binary_elementwise_apis()) -
set(test_ops.UNARY_FLOAT_OPS + test_ops.UNARY_BOOL_OPS +
test_ops.UNARY_STRING_OPS + test_ops.UNARY_INT_OPS +
test_ops.BINARY_FLOAT_OPS + test_ops.BINARY_BOOL_OPS +
test_ops.BINARY_INT_OPS + other_tested_ops))
untested_ops = sorted(f'{x.__module__}.{x.__name__}' for x in untested_ops)
self.assertEmpty(
untested_ops, 'One or more ops elementwise are not tested; please'
' add them to ragged_tensor_test_ops.py or ragged_dispatch_test.py')
def testElementwiseOpUnknownRankError(self):
if context.executing_eagerly():
return
x = ragged_factory_ops.constant([[1, 2], [3]])
y = ragged_tensor.RaggedTensor.from_row_splits(
array_ops.placeholder_with_default([1, 2, 3], shape=None), x.row_splits)
with self.assertRaisesRegex(ValueError,
r'Unable to broadcast: unknown rank'):
math_ops.add(x, y)
@parameterized.parameters([
dict(
x=ragged_factory_ops.constant_value([[1, 2], [3]]),
y=[[10]],
expected=[[11, 12], [13]]),
dict(
x=ragged_factory_ops.constant_value([[[1, 2], [3, 4]], [[5]]],
ragged_rank=2),
y=ragged_factory_ops.constant_value([[[10], [20]], [[30]]],
ragged_rank=1),
expected=[[[11, 12], [23, 24]], [[35]]]),
dict(
x=ragged_factory_ops.constant_value([[[1]]]),
y=ragged_factory_ops.constant_value([[1]]),
expected=[[[2]]]),
])
def testElementwiseOpBroadcast(self, x, y, expected):
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, dtype=dtypes.int32)
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, dtype=dtypes.int32)
result = x + y
self.assertAllEqual(result, expected)
@parameterized.parameters([
dict(
x=ragged_factory_ops.constant_value([[1, 2, 3], [4, 5]],
row_splits_dtype=dtypes.int64),
y=[1],
expected=[[2, 3, 4], [5, 6]],
expected_row_splits_dtype=dtypes.int64),
dict(
x=ragged_factory_ops.constant_value([[1, 2, 3], [4, 5]],
row_splits_dtype=dtypes.int32),
y=[1],
expected=[[2, 3, 4], [5, 6]],
expected_row_splits_dtype=dtypes.int32),
dict(
x=[1],
y=ragged_factory_ops.constant_value([[1, 2, 3], [4, 5]],
row_splits_dtype=dtypes.int64),
expected=[[2, 3, 4], [5, 6]],
expected_row_splits_dtype=dtypes.int64),
dict(
x=[1],
y=ragged_factory_ops.constant_value([[1, 2, 3], [4, 5]],
row_splits_dtype=dtypes.int32),
expected=[[2, 3, 4], [5, 6]],
expected_row_splits_dtype=dtypes.int32),
])
def testElementwiseOpBroadcastTensorAndRaggedTensor(
self, x, y, expected, expected_row_splits_dtype):
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, dtype=dtypes.int32)
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, dtype=dtypes.int32)
result = x + y
self.assertAllEqual(result, expected)
self.assertEqual(result.row_splits.dtype, expected_row_splits_dtype)
def testElementwiseOpShapeMismatch(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4, 5]])
y = ragged_factory_ops.constant([[1, 2, 3], [4, 5, 6]])
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
self.evaluate(math_ops.add(x, y))
def testBinaryOpSparseAndRagged(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4, 5]])
y = sparse_tensor.SparseTensor([[0, 0], [0, 1], [2, 0]], [1, 2, 3], [3, 2])
with self.assertRaises((TypeError, ValueError)):
self.evaluate(math_ops.add(x, y))
with self.assertRaises((TypeError, ValueError)):
self.evaluate(math_ops.add_n([x, y]))
@parameterized.parameters([
dict(
op=array_ops.batch_gather,
args=(ragged_factory_ops.constant_value([[5, 6, 7], [8, 9]]),
ragged_factory_ops.constant_value([[2, 1, 0], [1]])),
expected=ragged_factory_ops.constant_value([[7, 6, 5], [9]])),
dict(
op=array_ops.concat,
args=([
ragged_factory_ops.constant_value([[1, 2, 3], [4]],
dtype=np.int32),
np.array([[5, 6]], dtype=np.int32)
],),
kwargs={'axis': 0},
expected=ragged_factory_ops.constant_value([[1, 2, 3], [4], [5, 6]])),
dict(
op=array_ops.expand_dims,
kwargs={
'input': ragged_factory_ops.constant_value([[1, 2], [3]]),
'axis': 0
},
expected=ragged_factory_ops.constant_value([[[1, 2], [3]]])),
dict(
op=array_ops.expand_dims_v2,
kwargs={
'input': ragged_factory_ops.constant_value([[1, 2], [3]]),
'axis': -1
},
expected=ragged_factory_ops.constant_value([[[1], [2]], [[3]]],
ragged_rank=1),
),
dict(
op=array_ops.gather,
kwargs={
'params': ragged_factory_ops.constant_value([[1, 2], [3]]),
'indices': [1, 0, 1]
},
expected=ragged_factory_ops.constant_value([[3], [1, 2], [3]])),
dict(
op=array_ops.gather_v2,
kwargs={
'params': ragged_factory_ops.constant_value([[1, 2], [3]]),
'indices': ragged_factory_ops.constant_value([[1, 0], [1]])
},
expected=ragged_factory_ops.constant_value([[[3], [1, 2]], [[3]]])),
dict(
op=array_ops.gather_nd,
kwargs={
'params': ragged_factory_ops.constant_value([[7, 8], [9]]),
'indices': [[0, 1], [1, 0], [0, 0]]
},
expected=ragged_factory_ops.constant_value([8, 9, 7])),
dict(
op=array_ops.one_hot,
kwargs={
'indices':
ragged_factory_ops.constant_value([[1, 2, 3], [0]],
dtype=np.int32),
'depth':
4,
'axis':
-1
},
expected=ragged_factory_ops.constant_value(
[[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], [[1, 0, 0, 0]]],
ragged_rank=1)),
dict(
op=array_ops_stack.stack,
args=([
ragged_factory_ops.constant_value([[1, 2, 3], [4]],
dtype=np.int32),
np.array([[5, 6]], dtype=np.int32)
],),
expected=ragged_factory_ops.constant_value([[[1, 2, 3], [4]],
[[5, 6]]])),
dict(
op=array_ops.tile,
args=([
ragged_factory_ops.constant_value([[1, 2], [3]], dtype=np.int32),
[2, 3]
]),
expected=ragged_factory_ops.constant_value([[1, 2, 1, 2, 1, 2],
[3, 3, 3],
[1, 2, 1, 2, 1, 2],
[3, 3, 3]])),
dict(
op=array_ops.where,
args=(ragged_factory_ops.constant_value([[True, False], [True]]),
ragged_factory_ops.constant_value([[b'A', b'B'], [b'C']]),
ragged_factory_ops.constant_value([[b'a', b'b'], [b'c']])),
expected=ragged_factory_ops.constant_value([[b'A', b'b'], [b'C']])),
dict(
op=array_ops.where,
args=(ragged_factory_ops.constant_value([[True, False], [True]]),),
expected=[[0, 0], [1, 0]]),
dict(
op=array_ops.where_v2,
args=(ragged_factory_ops.constant_value([[True, False], [True]]),
ragged_factory_ops.constant_value([[b'A', b'B'], [b'C']]),
ragged_factory_ops.constant_value([[b'a', b'b'], [b'c']])),
expected=ragged_factory_ops.constant_value([[b'A', b'b'], [b'C']])),
dict(
op=math_ops.unsorted_segment_sum,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 2], [0]]),
'num_segments': 3
},
expected=[4, 0, 2]),
dict(
op=math_ops.unsorted_segment_prod,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 2], [0]]),
'num_segments': 3
},
expected=[3, 1, 2]),
dict(
op=math_ops.unsorted_segment_min,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 1], [0]]),
'num_segments': 2
},
expected=[1, 2]),
dict(
op=math_ops.unsorted_segment_max,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 1], [0]]),
'num_segments': 2
},
expected=[3, 2]),
dict(
op=math_ops.unsorted_segment_mean,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 1], [0]]),
'num_segments': 2
},
expected=[2, 2]),
dict(
op=math_ops.unsorted_segment_sqrt_n,
kwargs={
'data':
ragged_factory_ops.constant_value([[1.0, 2.0],
[3.0, 4.0, 6.0]]),
'segment_ids':
ragged_factory_ops.constant_value([[0, 1], [0, 0, 0]]),
'num_segments':
2
},
expected=[7.0, 2.0],
rtol=1e-12,
),
dict(
op=math_ops.reduce_sum,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[3, 12]),
dict(
op=math_ops.reduce_prod,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[2, 60]),
dict(
op=math_ops.reduce_min,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[1, 3]),
dict(
op=math_ops.reduce_max,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[2, 5]),
dict(
op=math_ops.reduce_mean,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 3], [3, 4, 5]]),
'axis':
1
},
expected=[2, 4]),
dict(
op=math_ops.reduce_variance,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 3], [3, 6, 9]]),
'axis':
1
},
expected=[1., 6.]),
dict(
op=math_ops.reduce_std,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 3], [1, 2, 2, 1]]),
'axis':
1
},
expected=[1., 0.5]),
dict(
op=math_ops.reduce_any,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[True, False],
[True, True, True]]),
'axis':
1
},
expected=[True, True]),
dict(
op=math_ops.matmul,
kwargs={
'a': ragged_factory_ops.constant_value([[1, 2, 3], [4, 5, 6]]),
'b': ragged_factory_ops.constant_value([[5], [4], [3]])
},
expected=[[22], [58]]),
dict(
op=string_ops.reduce_join,
kwargs={
'inputs':
ragged_factory_ops.constant_value([[
b'this', b'is', b'a', b'test', b'for', b'ragged',
b'tensors'
], [b'please', b'do', b'not', b'panic', b'!']]),
'axis':
0,
'keepdims':
False,
'separator':
''
},
expected=[
b'thisplease', b'isdo', b'anot', b'testpanic', b'for!', b'ragged',
b'tensors'
]),
dict(
op=math_ops.reduce_all,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[True, False],
[True, True, True]]),
'axis':
1
},
expected=[False, True]),
dict(
op=array_ops.rank,
kwargs={'input': ragged_factory_ops.constant_value([[8, 3], [5]])},
expected=2),
dict(
op=array_ops.size,
kwargs={'input': ragged_factory_ops.constant_value([[8, 3], [5]])},
expected=3),
dict(
op=array_ops.size_v2,
kwargs={'input': ragged_factory_ops.constant_value([[8, 3], [5]])},
expected=3),
dict(
op=array_ops.squeeze,
kwargs={
'input': ragged_factory_ops.constant_value([[[1, 2, 3], [4, 5]]]),
'axis': [0]
},
expected=ragged_factory_ops.constant_value([[1, 2, 3], [4, 5]])),
dict(
op=array_ops.squeeze_v2,
kwargs={
'input': ragged_factory_ops.constant_value([[[1, 2, 3], [4, 5]]]),
'axis': [0]
},
expected=ragged_factory_ops.constant_value([[1, 2, 3], [4, 5]])),
dict(
op=data_flow_ops.dynamic_partition,
kwargs={
'data': ragged_factory_ops.constant_value([[1], [2, 3, 4], [5]]),
'partitions': [2, 1, 1],
'num_partitions': 3
},
expected=[
ragged_factory_ops.constant_value([], ragged_rank=1),
ragged_factory_ops.constant_value([[2, 3, 4], [5]]),
ragged_factory_ops.constant_value([[1]])
],
result_is_list=True),
dict(
op=array_ops.reverse,
kwargs={
'tensor': ragged_factory_ops.constant_value([[1, 2, 3], [4, 5]]),
'axis': [0, -1]
},
expected=ragged_factory_ops.constant_value([[5, 4], [3, 2, 1]])),
dict(
op=string_ops.string_format,
kwargs={
'template': 'Hi {}',
'inputs': [ragged_factory_ops.constant_value([[1, 2], [3]])]
},
expected='Hi [[1, 2], [3]]'),
dict(
op=nn_ops.softmax_v2,
kwargs={
'logits':
ragged_factory_ops.constant_value([[1., 2., 3.], [4., 5.]]),
},
expected=ragged_factory_ops.constant_value([
[
np.exp(1) / (np.exp(1) + np.exp(2) + np.exp(3)),
np.exp(2) / (np.exp(1) + np.exp(2) + np.exp(3)),
np.exp(3) / (np.exp(1) + np.exp(2) + np.exp(3)),
],
[
np.exp(4) / (np.exp(4) + np.exp(5)),
np.exp(5) / (np.exp(4) + np.exp(5)),
],
]),
rtol=1e-6),
dict(
op=array_ops.bitcast,
kwargs={
'input':
ragged_factory_ops.constant_value([[1, 2], [-1]],
dtype=dtypes.int64),
'type':
dtypes.uint64
},
expected=ragged_factory_ops.constant_value([[1, 2], [-1]],
dtype=dtypes.uint64)),
dict(
op=array_ops.split,
kwargs={
'value': ragged_factory_ops.constant_value([[1], [2, 3, 4]]),
'num_or_size_splits': 2,
},
result_is_list=True,
expected=[
ragged_factory_ops.constant_value([[1]]),
ragged_factory_ops.constant_value([[2, 3, 4]]),
]),
dict(
op=array_ops.reshape,
kwargs=lambda: {
'tensor': ragged_factory_ops.constant([[1, 2], [3]]),
'shape': DynamicRaggedShape.from_lengths([3, (1, 0, 2)]),
},
expected=[[1], [], [2, 3]]),
dict(
op=array_ops.reshape,
kwargs=lambda: {
'tensor': [[1, 2], [3, 4]],
'shape': DynamicRaggedShape.from_lengths([3, (1, 0, 3)]),
},
expected=[[1], [], [2, 3, 4]]),
dict(
op=array_ops.reshape,
kwargs=lambda: {
'tensor': ragged_factory_ops.constant([[1, 2], [3]]),
'shape': [3],
},
expected=[1, 2, 3]),
dict(
op=array_ops.broadcast_to,
kwargs=lambda: {
'input': 3,
'shape': DynamicRaggedShape.from_lengths([3, (1, 0, 2)])
},
expected=[[3], [], [3, 3]]),
dict(
op=array_ops.shape,
kwargs=lambda: {
'input': ragged_factory_ops.constant([(1, 2), (3,)]),
'out_type': dtypes.int64
},
expected=lambda: DynamicRaggedShape.from_lengths([2, (2, 1)])),
dict(
op=array_ops.shape_v2,
kwargs=lambda: {
'input': ragged_factory_ops.constant([(1, 2), (3,)]),
'out_type': dtypes.int64
},
expected=lambda: DynamicRaggedShape.from_lengths([2, (2, 1)])),
dict(
op=array_ops.broadcast_dynamic_shape,
kwargs=lambda: {
'shape_x': DynamicRaggedShape.from_lengths([2, (2, 3), 1]),
'shape_y': DynamicRaggedShape.from_lengths([5])
},
expected=lambda: DynamicRaggedShape.from_lengths([2, (2, 3), 5])),
dict(
op=array_ops.broadcast_dynamic_shape,
kwargs=lambda: {
'shape_x': DynamicRaggedShape.from_lengths([2, (2, 3), 1]),
'shape_y': [5],
},
expected=lambda: DynamicRaggedShape.from_lengths([2, (2, 3), 5])),
dict(
op=array_ops.ones,
kwargs=lambda: {
'shape': DynamicRaggedShape.from_lengths([2, (2, 3)]),
},
expected=[[1.0, 1.0], [1.0, 1.0, 1.0]]),
dict(
op=array_ops.zeros,
kwargs=lambda: {
'shape': DynamicRaggedShape.from_lengths([2, (2, 3)]),
},
expected=[[0.0, 0.0], [0.0, 0.0, 0.0]]),
dict(
op=array_ops.fill,
kwargs=lambda: {
'dims': DynamicRaggedShape.from_lengths([2, (2, 3)]),
'value': 5
},
expected=[[5.0, 5.0], [5.0, 5.0, 5.0]]),
])
def testRaggedDispatch(self,
op,
expected,
args=(),
result_is_list=False,
rtol=None,
kwargs=None):
# For some tests, the inputs/outputs to the function need to be
# constructed late, because they contain tensors.
if callable(kwargs):
kwargs = kwargs()
if callable(args):
args = args()
if callable(expected):
expected = expected()
kwargs = kwargs or {}
if rtol is not None:
assert_fn = lambda x, y: self.assertAllClose(x, y, rtol=rtol)
else:
assert_fn = self.assertAllEqual
result = op(*args, **kwargs)
if isinstance(expected, DynamicRaggedShape):
self.assertDynamicRaggedShapeEqual(expected, result)
elif result_is_list:
self.assertLen(result, len(expected))
for (r, e) in zip(result, expected):
assert_fn(r, e)
else:
assert_fn(result, expected)
def testTensorEquals(self):
a = ragged_factory_ops.constant([[1, 2], [3]])
b = ragged_factory_ops.constant([[4, 5], [3]])
c = 2
d = ragged_factory_ops.constant([[4, 5], [3, 2, 1]])
if tf2.enabled() and ops.executing_eagerly_outside_functions():
# Value-based equality:
self.assertAllEqual(
math_ops.tensor_equals(a, b), [[False, False], [True]])
self.assertAllEqual(
math_ops.tensor_not_equals(a, b), [[True, True], [False]])
# Value-based equality (w/ broadcasting):
self.assertAllEqual(
math_ops.tensor_equals(a, c), [[False, True], [False]])
self.assertAllEqual(
math_ops.tensor_not_equals(a, c), [[True, False], [True]])
self.assertFalse(math_ops.tensor_equals(a, d),
msg='not broadcast-compatible')
self.assertTrue(math_ops.tensor_not_equals(a, d),
msg='not broadcast-compatible')
else:
# Identity-based equality:
self.assertAllEqual(math_ops.tensor_equals(a, a), True)
self.assertAllEqual(math_ops.tensor_equals(a, b), False)
self.assertAllEqual(math_ops.tensor_not_equals(a, b), True)
def testUnaryElementwiseOpsPreserveUniformRowLength(self):
# Unary elementwise op
rt = ragged_tensor.RaggedTensor.from_uniform_row_length(
ragged_factory_ops.constant([[1, 2], [3]]), uniform_row_length=2)
self.assertAllEqual(rt.uniform_row_length,
array_ops.zeros_like(rt).uniform_row_length)
# Unary-list elementwise op
rt = ragged_tensor.RaggedTensor.from_uniform_row_length(
ragged_factory_ops.constant([[1, 2], [3]]), uniform_row_length=2)
self.assertAllEqual(rt.uniform_row_length,
math_ops.add_n([rt, rt]).uniform_row_length)
def test_ragged_op_list(self):
# Ops that should be listed as supported in both v1 and v2.
supported_ops = [
'bitcast', 'bitwise.bitwise_and', 'bitwise.bitwise_or',
'bitwise.bitwise_xor', 'bitwise.invert', 'bitwise.left_shift',
'bitwise.right_shift', 'clip_by_value', 'concat',
'debugging.assert_equal', 'debugging.assert_near',
'debugging.assert_none_equal', 'debugging.assert_greater',
'debugging.assert_greater_equal', 'debugging.assert_less',
'debugging.assert_less_equal', 'debugging.check_numerics',
'cast', 'dtypes.complex',
'dtypes.saturate_cast', 'expand_dims', 'gather_nd', 'gather',
'io.decode_base64', 'io.decode_compressed', 'io.encode_base64',
'math.abs', 'math.acos', 'math.acosh', 'math.add_n', 'math.add',
'math.angle', 'math.asin', 'math.asinh', 'math.atan2', 'math.atan',
'math.atanh', 'math.bessel_i0', 'math.bessel_i0e', 'math.bessel_i1',
'math.bessel_i1e', 'math.ceil', 'math.conj', 'math.cos', 'math.cosh',
'math.digamma', 'math.divide_no_nan', 'math.divide', 'math.equal',
'math.erf', 'math.erfc', 'math.erfcinv', 'math.erfinv', 'math.exp',
'math.expm1', 'math.floor', 'math.floordiv', 'math.floormod',
'math.greater_equal', 'math.greater', 'math.imag', 'math.is_finite',
'math.is_inf', 'math.is_nan', 'math.less_equal', 'math.less',
'math.lgamma', 'math.log1p', 'math.log_sigmoid', 'math.log',
'math.logical_and', 'math.logical_not', 'math.logical_or',
'math.logical_xor', 'math.maximum', 'math.minimum',
'math.multiply_no_nan', 'math.multiply', 'math.negative',
'math.nextafter', 'math.not_equal', 'math.pow', 'math.real',
'math.reciprocal', 'math.reciprocal_no_nan', 'math.reduce_any',
'math.reduce_max', 'math.reduce_mean', 'math.reduce_variance',
'math.reduce_std', 'math.reduce_min', 'math.reduce_prod',
'math.reduce_sum', 'math.rint', 'math.round', 'math.rsqrt', 'math.sign',
'math.sigmoid', 'math.sin', 'math.sinh', 'math.softplus', 'math.sqrt',
'math.square', 'math.squared_difference', 'math.subtract', 'math.tan',
'math.tanh', 'math.truediv', 'math.unsorted_segment_max',
'math.unsorted_segment_mean', 'math.unsorted_segment_min',
'math.unsorted_segment_prod', 'math.unsorted_segment_sqrt_n',
'math.unsorted_segment_sum', 'one_hot', 'ones_like', 'rank', 'realdiv',
'math.reduce_all', 'size', 'split', 'squeeze', 'stack',
'strings.as_string', 'strings.join', 'strings.length',
'strings.reduce_join', 'strings.regex_full_match',
'strings.regex_replace', 'strings.strip', 'strings.substr',
'strings.to_hash_bucket_fast', 'strings.to_hash_bucket_strong',
'strings.to_hash_bucket', 'strings.to_number', 'strings.unicode_script',
'tile', 'truncatediv', 'truncatemod', 'zeros_like', 'dynamic_partition',
'reverse', 'nn.dropout', 'strings.format', 'print'
]
# Ops that should be listed as supported in v1 only.
supported_ops_v1 = ['batch_gather']
# Ops that should be listed as supported in v2 only.
supported_ops_v2 = ['nn.softmax']
v1_ragged_ops = ragged_dispatch.ragged_op_list(tf_version=1)
for element in supported_ops + supported_ops_v1:
self.assertIn('`tf.' + element + '`', v1_ragged_ops)
for element in supported_ops_v2:
self.assertNotIn('`tf.' + element + '`', v1_ragged_ops)
v2_ragged_ops = ragged_dispatch.ragged_op_list(tf_version=2)
for element in supported_ops + supported_ops_v2:
self.assertIn('`tf.' + element + '`', v2_ragged_ops)
for element in supported_ops_v1:
self.assertNotIn('`tf.' + element + '`', v2_ragged_ops)
def testDispatchWithVariable(self):
x = ragged_factory_ops.constant([[1, 2], [3, 4, 5]])
v = variables.Variable(10)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(math_ops.add(x, v), [[11, 12], [13, 14, 15]])
def testAssertType(self):
x = ragged_factory_ops.constant([[1., 2.], [3.]])
with ops.control_dependencies(
[check_ops.assert_type(x, dtypes.float32)]):
y = array_ops.identity(x)
self.assertAllEqual(x, y)
def assertDynamicRaggedShapeEqual(self, expected, result):
self.assertIsInstance(result, DynamicRaggedShape)
self.assertTrue(expected._type_spec.is_compatible_with(result))
for (e, r) in zip(
nest.flatten(expected, expand_composites=True),
nest.flatten(result, expand_composites=True)):
self.assertAllEqual(e, r)
if __name__ == '__main__':
googletest.main()
| RaggedDispatchTest |
python | doocs__leetcode | solution/3200-3299/3232.Find if Digit Game Can Be Won/Solution.py | {
"start": 0,
"end": 175
} | class ____:
def canAliceWin(self, nums: List[int]) -> bool:
a = sum(x for x in nums if x < 10)
b = sum(x for x in nums if x > 9)
return a != b
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_format21.py | {
"start": 315,
"end": 1057
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("format21.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with automatic color."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format(
{
"font_color": "automatic",
"fg_color": "automatic",
"bg_color": "red",
"pattern": 6,
}
)
worksheet.write(0, 0, "Foo", format1)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 7430,
"end": 8378
} | class ____(Node):
"""A function or a method, defined by one or more PyTD signatures.
Attributes:
name: The name of this function.
signatures: Tuple of possible parameter type combinations for this function.
kind: The kind of function (e.g., MethodKind.STATICMETHOD).
flags: A bitfield of flags like is_abstract
"""
name: str
signatures: tuple[Signature, ...]
kind: MethodKind
flags: MethodFlag = MethodFlag.NONE
decorators: tuple[Alias, ...] = ()
@property
def is_abstract(self):
return bool(self.flags & MethodFlag.ABSTRACT)
@property
def is_coroutine(self):
return bool(self.flags & MethodFlag.COROUTINE)
@property
def is_final(self):
return bool(self.flags & MethodFlag.FINAL)
def with_flag(self, flag, value):
"""Return a copy of self with flag set to value."""
new_flags = self.flags | flag if value else self.flags & ~flag
return self.Replace(flags=new_flags)
| Function |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 69820,
"end": 70431
} | class ____(themeable):
"""
y-axis minor-tick padding
Parameters
----------
theme_element : float
Note
----
Padding is not applied when the
[](`~plotnine.theme.themeables.axis_ticks_minor_y`) are
blank, but it does apply when the
[](`~plotnine.theme.themeables.axis_ticks_length_minor_y`)
is zero.
"""
def apply_ax(self, ax: Axes):
super().apply_ax(ax)
val = self.properties["value"]
for t in ax.yaxis.get_minor_ticks():
_val = val if t.tick1line.get_visible() else 0
t.set_pad(_val)
| axis_ticks_pad_minor_y |
python | pytorch__pytorch | torch/_inductor/codegen/simd_kernel_features.py | {
"start": 8083,
"end": 14460
} | class ____:
"""
Estimate various properties of the kernel for use in heuristics.
We simulate the memory effects of CSE/buffer elimination in codegen.
"""
kernel_sizes: tuple[sympy.Expr, ...]
outside_loop: MemoryEstimate
loops: list[MemoryEstimate]
persistent: MemoryEstimate
symbols: list[sympy.Symbol]
def __init__(self, features: SIMDKernelFeatures, groups: Sequence[sympy.Expr]):
self.features = features
self.inside_reduction = features.is_reduction()
self.store_buffer_names: OrderedSet[str] = OrderedSet()
self.must_keep_buffers: OrderedSet[str] = OrderedSet()
self.num_reductions_dims = 1
self.groups = groups
self.symbols = [make_symbol(SymT.INDEX, i) for i in range(len(groups))]
# We are doing two estimates simultaneously:
# 1) the first is a for a non-persistent (aka looped) reduction, using self.outside_loop/self.loops
# we add an item to loops each corresponding to each reduction loop in the kernel
# outside_loop is only used for broadcasting or point-wise ops that don't use the reduction dimension
# 2) the second is for a persistent kernel, using self.persistent
# persistent kernels don't have loops, so we only have one MemoryEstimate()
# for point-wise ops the two estimates will be the same, they matter for reductions only
self.outside_loop = MemoryEstimate()
self.loops = [MemoryEstimate()]
self.persistent = MemoryEstimate()
self.simulate_codegen()
self.remove_kernel_local()
def simulate_codegen(self) -> None:
from .simd import SIMDKernel
kernel_size_outside_loop = (*self.groups[:-1], sympy.S.One)
kernel_size_inside_loop = tuple(self.groups)
self.kernel_sizes = kernel_size_inside_loop
for node in self.features.node_schedule:
if node is DisableReduction:
self.inside_reduction = False
self.kernel_sizes = kernel_size_outside_loop
continue
elif node is EnableReduction:
self.inside_reduction = True
self.kernel_sizes = kernel_size_inside_loop
self.loops.append(MemoryEstimate())
continue
assert isinstance(node, SchedulerNode)
rw = extract_loop_body_with_args(
node._body,
SIMDKernel.map_kernel_groups_to_node_sizes(
self.kernel_sizes, node.get_ranges(), self.set_ranges
),
dict(zip(self.symbols, self.kernel_sizes)),
)
for dep in rw._reads:
if not isinstance(dep, MemoryDep):
continue
dep = dep.simplify_with_ranges()
if not self.persistent.writes.get(dep.name): # cache miss?
self.persistent.reads[dep.name].add(dep)
# the cache behavior of looped kernels is more complex than the persistent case above
# some operations are lifted outside the loop (if they don't use the reduction dimension)
# other operations are inside the loop, and can only be reused within the same loop
if not (
self.outside_loop.writes.get(dep.name)
or self.loops[-1].writes.get(dep.name)
):
self.scope(dep).reads[dep.name].add(dep)
if dep.name in self.store_buffer_names and self.loops[-1].reads.get(
dep.name
):
self.must_keep_buffers.add(dep.name)
for dep in rw._writes:
if not isinstance(dep, MemoryDep):
continue
dep = dep.simplify_with_ranges()
self.store_buffer_names.add(dep.name)
self.persistent.writes[dep.name].add(dep)
self.scope(dep).writes[dep.name].add(dep)
def remove_kernel_local(self) -> None:
# Remove any kernel-local buffers
fused_node_names = OrderedSet(
[n.get_name() for n in self.features.scheduler_nodes()]
)
for name in self.store_buffer_names:
if not self.persistent.reads.get(
name
) and V.graph.scheduler.can_buffer_be_removed_through_fusion(
name, fused_node_names
):
self.persistent.remove(name)
if name not in self.must_keep_buffers:
# we can also remove this from the looped kernel
self.outside_loop.remove(name)
for loop in self.loops:
loop.remove(name)
if not self.loops[-1]:
self.loops.pop() # for pointwise ops
def scope(self, dep: MemoryDep) -> MemoryEstimate:
"""Determine how a read/write should be categorized"""
if self.inside_reduction and (
self.has_reduction_var(dep.index) or dep.is_indirect()
):
return self.loops[-1]
return self.outside_loop
def has_reduction_var(self, index: sympy.Expr) -> bool:
for sym in self.symbols[-self.num_reductions_dims :]:
if isinstance(sym, sympy.Symbol) and sym in index.free_symbols:
return True
return False
def set_ranges(self, *lengths: list[list[sympy.Expr]]) -> list[list[sympy.Expr]]:
assert len(self.kernel_sizes) == len(lengths)
return [
self.make_flat_range(sym, numel, length)
for sym, numel, length in zip(self.symbols, self.kernel_sizes, lengths)
]
@staticmethod
def make_flat_range(
sym: sympy.Symbol, numel: sympy.Expr, lengths: list[sympy.Expr]
) -> list[sympy.Expr]:
if len(lengths) == 1 and numel == lengths[0]:
return [sym]
divisor = sympy.S.One
itervars = []
for length in reversed(lengths):
if V.graph.sizevars.statically_known_equals(divisor * length, numel):
expr = FloorDiv(sym, divisor)
else:
expr = ModularIndexing(sym, divisor, length)
itervars.append(expr)
divisor = divisor * length
return [*reversed(itervars)]
@dataclasses.dataclass
| MemoryEstimator |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/splice_vh/package.py | {
"start": 217,
"end": 948
} | class ____(Package):
"""Simple package with one optional dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/splice-vh-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
variant("foo", default=False, description="nope")
variant("bar", default=False, description="nope")
variant("baz", default=False, description="nope")
depends_on("splice-z")
depends_on("splice-z+foo", when="+foo")
provides("something")
def install(self, spec, prefix):
with open(prefix.join("splice-vh"), "w", encoding="utf-8") as f:
f.write("splice-vh: {0}".format(prefix))
f.write("splice-z: {0}".format(spec["splice-z"].prefix))
| SpliceVh |
python | coleifer__peewee | tests/postgres.py | {
"start": 32467,
"end": 33086
} | class ____(ModelTestCase):
database = db
requires = [Register]
def test_postgres_cte_materialization(self):
Register.insert_many([(i,) for i in (1, 2, 3)]).execute()
for materialized in (None, False, True):
cte = Register.select().cte('t', materialized=materialized)
query = (cte
.select_from(cte.c.value)
.where(cte.c.value != 2)
.order_by(cte.c.value))
self.assertEqual([r.value for r in query], [1, 3])
@skip_unless(pg93(), 'lateral join requires pg >= 9.3')
| TestPostgresCTEMaterialization |
python | huggingface__transformers | tests/models/sam3/test_modeling_sam3.py | {
"start": 1643,
"end": 4693
} | class ____:
def __init__(
self,
parent,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=64,
num_channels=3,
image_size=224,
patch_size=14,
window_size=8,
global_attn_indexes=None,
fpn_hidden_size=32,
scale_factors=None,
batch_size=2,
is_training=False,
):
if global_attn_indexes is None:
global_attn_indexes = [0, 1]
if scale_factors is None:
scale_factors = [4.0, 2.0, 1.0, 0.5]
self.parent = parent
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.window_size = window_size
self.global_attn_indexes = global_attn_indexes
self.fpn_hidden_size = fpn_hidden_size
self.scale_factors = scale_factors
self.batch_size = batch_size
self.is_training = is_training
def get_config(self):
backbone_config = Sam3ViTConfig(
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
num_channels=self.num_channels,
image_size=self.image_size,
patch_size=self.patch_size,
window_size=self.window_size,
global_attn_indexes=self.global_attn_indexes,
)
return Sam3VisionConfig(
backbone_config=backbone_config,
fpn_hidden_size=self.fpn_hidden_size,
scale_factors=self.scale_factors,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def create_and_check_model(self, config, pixel_values):
model = Sam3VisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# Check FPN outputs
self.parent.assertEqual(len(result.fpn_hidden_states), len(self.scale_factors))
self.parent.assertEqual(len(result.fpn_position_encoding), len(self.scale_factors))
# Check last hidden state shape
expected_seq_len = (self.image_size // self.patch_size) ** 2
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| Sam3VisionModelTester |
python | ansible__ansible | test/units/module_utils/facts/test_facts.py | {
"start": 5855,
"end": 6038
} | class ____(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = network.netbsd.NetBSDNetwork
collector_class = network.netbsd.NetBSDNetworkCollector
| TestNetBSDNetwork |
python | OmkarPathak__pygorithm | pygorithm/geometry/vector2.py | {
"start": 104,
"end": 13732
} | class ____(object):
"""
Define a simple two-dimensional, mutable vector.
.. important::
Equality is not overriden on vectors, because it is expected that
vectors will be used mutably by directly modifying x and y. However, all
functions on vectors are immutable (they return a copy)
:ivar x: The first component of this vector.
:vartype x: :class:`numbers.Number`
:ivar y: The second component of this vector.
:vartype y: :class:`numbers.Number`
"""
def __init__(self, *args, **kwargs):
"""
Create a new Vector2 from the two components.
Accepts a pair of unnamed parameters, a pair of named x, y parameters,
another Vector2, or a tuple with 2 numerics. Examples of each:
.. code-block:: python
from pygorithm.geometry import vector2
# A pair of unnamed parameters
vec1 = vector2.Vector2(0, 5)
# A pair of named parameters
vec2 = vector2.Vector2(x = 0, y = 5)
# Another vector2
vec3 = vector2.Vector2(vec2)
# A tuple with two numerics
vec4 = vector2.Vector2( (0, 5) )
:param args: unnamed arguments (purpose guessed by order)
:param kwargs: named arguments (purpose known by name)
"""
if len(args) == 2:
self.x = args[0]
self.y = args[1]
elif len(args) == 1:
if type(args[0]) == tuple:
self.x = args[0][0]
self.y = args[0][1]
else:
self.x = args[0].x
self.y = args[0].y
else:
assert(len(args) == 0)
self.x = kwargs['x']
self.y = kwargs['y']
def __add__(self, other):
"""
Adds the two vectors component wise.
Example:
.. code-block:: python
from pygorithm.geometry import vector2
vec1 = vector2.Vector2(0, 3)
vec2 = vector2.Vector2(2, 4)
vec3 = vec1 + vec2
# prints <2, 7>
print(vec3)
:param other: the vector to add to this one
:type other: :class:`pygorithm.geometry.vector2.Vector2`
:returns: a new vector that is the sum of self and other
:rtype: :class:`pygorithm.geometry.vector2.Vector2`
"""
return Vector2(self.x + other.x, self.y + other.y)
def __sub__(self, other):
"""
Subtract the two vectors component wise.
Example:
.. code-block:: python
from pygorithm.geometry import vector2
vec1 = vector2.Vector2(5, 5)
vec2 = vector2.Vector2(2, 3)
vec3 = vec1 - vec2
vec4 = vec2 - vec1
# prints <3, 2>
print(vec3)
# prints <2, 3>
print(vec4)
:param other: the vector to subtract from this one
:type other: :class:`pygorithm.geometry.vector2.Vector2`
:returns: a new vector two that is the difference of self and other
:rtype: :class:`pygorithm.geometry.vector2.Vector2`
"""
return Vector2(self.x - other.x, self.y - other.y)
def __mul__(self, scale_factor):
"""
Scale the vector by the specified factor.
.. caution::
This will never perform a dot product. If scale_factor is a Vector2, an
exception is thrown.
Example:
.. code-block:: python
from pygorithm.geometry import vector2
vec1 = vector2.Vector2(4, 8)
vec2 = vec1 * 0.5
# prints <2, 4>
print(vec2)
:param: scale_factor the amount to scale this vector by
:type scale_factor: :class:`numbers.Number`
:returns: a new vector that is self scaled by scale_factor
:rtype: :class:`pygorithm.geometry.vector2.Vector2`
:raises TypeError: if scale_factor is a Vector2
"""
if type(scale_factor) == Vector2:
raise TypeError('scale_factor cannot be a Vector2 (use dot!)')
return Vector2(self.x * scale_factor, self.y * scale_factor)
def __rmul__(self, scale_factor):
"""
Scale the vector by the specified factor.
.. caution::
This will never perform a dot product. If scale_factor is a Vector2, an
exception is thrown.
Example:
.. code-block:: python
from pygorithm.geometry import vector2
vec1 = vector2.Vector2(4, 8)
vec2 = 2 * vec1
# prints <8, 16>
print(vec2)
:param: scale_factor the amount to scale this vector by
:type scale_factor: :class:`numbers.Number`
:returns: a new vector that is self scaled by scale_factor
:rtype: :class:`pygorithm.geometry.vector2.Vector2`
:raises TypeError: if scale_factor is a Vector2
"""
if type(scale_factor) == Vector2:
raise TypeError('scale_factor cannot be a Vector2 (use dot!)')
return Vector2(self.x * scale_factor, self.y * scale_factor)
def __repr__(self):
"""
Create an unambiguous representation of this vector
Example:
.. code-block:: python
from pygorithm.geometry import vector2
vec = vector2.Vector2(3, 5)
# prints vector2(x=3, y=5)
print(repr(vec))
:returns: an unambiguous representation of this vector
:rtype: string
"""
return "vector2(x={}, y={})".format(self.x, self.y)
def __str__(self):
"""
Create a human-readable representation of this vector.
Rounds to 3 decimal places if there are more.
Example:
.. code-block:: python
from pygorithm.geometry import vector2
vec = vector2.Vector2(7, 11)
# prints <7, 11>
print(str(vec))
# also prints <7, 11>
print(vec)
:returns: a human-readable representation of this vector
:rtype: string
"""
pretty_x = round(self.x * 1000) / 1000
if pretty_x == math.floor(pretty_x):
pretty_x = math.floor(pretty_x)
pretty_y = round(self.y * 1000) / 1000
if pretty_y == math.floor(pretty_y):
pretty_y = math.floor(pretty_y)
return "<{}, {}>".format(pretty_x, pretty_y)
def dot(self, other):
"""
Calculate the dot product between this vector and other.
The dot product of two vectors is calculated as so::
Let v1 be a vector such that v1 = <v1_x, v1_y>
Let v2 be a vector such that v2 = <v2_x, v2_y>
v1 . v2 = v1_x * v2_x + v1_y * v2_y
Example:
.. code-block:: python
from pygorithm.geometry import vector2
vec1 = vector2.Vector2(3, 5)
vec2 = vector2.Vector2(7, 11)
dot_12 = vec1.dot(vec2)
# prints 76
print(dot_12)
:param other: the other vector
:type other: :class:`pygorithm.geometry.vector2.Vector2`
:returns: the dot product of self and other
:rtype: :class:`numbers.Number`
"""
return self.x * other.x + self.y * other.y
def cross(self, other):
"""
Calculate the z-component of the cross product between this vector and other.
The cross product of two vectors is calculated as so::
Let v1 be a vector such that v1 = <v1_x, v1_y>
Let v2 be a vector such that v2 = <v2_x, v2_y>
v1 x v2 = v1.x * v2.y - v1.y * v2.x
.. caution::
This is the special case of a cross product in 2 dimensions returning 1
value. This is really a vector in the z direction!
"""
return self.x * other.y - self.y * other.x
def rotate(self, *args, **kwargs):
"""
The named argument "degrees" or "radians" may be passed in to rotate
this vector by the specified amount in degrees (or radians),
respectively. If both are omitted, the first unnamed argument is
assumed to be the amount to rotate in radians.
Additionally, the named argument "about" may be passed in to specify
about what the vector should be rotated. If omitted then the first
unconsumed unnamed argument is assumed to be the vector. If there are
no unconsumed unnamed arguments then the origin is assumed.
Examples:
.. code-block:: python
from pygorithm.geometry import vector2
import math
vec1 = vector2.Vector2(1, 0)
vec2 = vec1.rotate(math.pi * 0.25)
# prints <0.707, 0.707>
print(vec2)
vec3 = vec1.rotate(degrees = 45)
# prints <0.707, 0.707>
print(vec3)
# The following operations are all identical
vec4 = vec1.rotate(math.pi, vector2.Vector2(1, 1))
vec5 = vec1.rotate(radians = math.pi, about = vector2.Vector2(1, 1))
vec6 = vec1.rotate(degrees = 180, about = vector2.Vector2(1, 1))
vec7 = vec1.rotate(vector2.Vector2(1, 1), degrees = 180)
# prints <1, 2>
print(vec4)
:param args: the unnamed arguments (purpose guessed by position)
:param kwargs: the named arguments (purpose known by name)
:returns: the new vector formed by rotating this vector
:rtype: :class:`pygorithm.geometry.vector2.Vector2`
"""
args_counter = 0
deg_rads = None
about = None
if 'radians' in kwargs:
deg_rads = kwargs['radians']
elif 'degrees' in kwargs:
deg_rads = kwargs['degrees'] * math.pi / 180
else:
deg_rads = args[args_counter]
args_counter = args_counter + 1
if 'about' in kwargs:
about = kwargs['about']
else:
if len(args) > args_counter:
about = args[args_counter]
fixed_x = self.x
fixed_y = self.y
if about is not None:
fixed_x -= about.x
fixed_y -= about.y
rotated_x = fixed_x * math.cos(deg_rads) - fixed_y * math.sin(deg_rads)
rotated_y = fixed_y * math.cos(deg_rads) + fixed_x * math.sin(deg_rads)
final_x = rotated_x
final_y = rotated_y
if about is not None:
final_x += about.x
final_y += about.y
return Vector2(final_x, final_y)
def normalize(self):
"""
Create the normalized version of this vector
The normalized version will go in the same direction but will
have magnitude of 1.
.. note::
This will never return self, even if this vector is already
normalized.
Example:
.. code-block:: python
from pygorithm.geometry import vector2
vec1 = vector2.Vector2(2, 0)
vec2 = vec1.normalize()
# prints <1, 0>
print(vec2)
:returns: a new normalized version of this vector
:rtype: :class:`pygorithm.geometry.vector2.Vector2`
"""
return self * (1 / self.magnitude())
def magnitude_squared(self):
"""
Calculate the square of the magnitude of this vector.
Example:
.. code-block:: python
from pygorithm.geometry import vector2
vec1 = vector2.Vector2(5, 12)
magn_sq = vec1.magnitude_squared()
# prints 169 (13^2)
print(magn_sq)
:returns: square of the magnitude of this vector
:rtype: :class:`numbers.Number`
"""
return self.x * self.x + self.y * self.y
def magnitude(self):
"""
Calculate the magnitude of this vector
.. note::
It is substantially faster to operate on magnitude squared
where possible.
Example:
.. code-block:: python
from pygorithm.geometry import vector2
vec1 = vector2.Vector2(3, 4)
magn = vec1.magnitude()
# prints 5
print(magn)
:returns: magnitude of this vector
:rtype: :class:`numbers.Number`
"""
return math.sqrt(self.magnitude_squared()) | Vector2 |
python | huggingface__transformers | src/transformers/models/zamba/modeling_zamba.py | {
"start": 55860,
"end": 60856
} | class ____(ZambaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.model = ZambaModel(config)
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SequenceClassifierOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
__all__ = ["ZambaForCausalLM", "ZambaForSequenceClassification", "ZambaModel", "ZambaPreTrainedModel"]
| ZambaForSequenceClassification |
python | python__mypy | mypy/types.py | {
"start": 63664,
"end": 64435
} | class ____(ProperType):
"""Abstract base class for function types."""
__slots__ = ("fallback",)
fallback: Instance
def __init__(self, line: int = -1, column: int = -1) -> None:
super().__init__(line, column)
self._can_be_false = False
@abstractmethod
def is_type_obj(self) -> bool:
pass
@abstractmethod
def type_object(self) -> mypy.nodes.TypeInfo:
pass
@property
@abstractmethod
def items(self) -> list[CallableType]:
pass
@abstractmethod
def with_name(self, name: str) -> FunctionLike:
pass
@abstractmethod
def get_name(self) -> str | None:
pass
def bound(self) -> bool:
return bool(self.items) and self.items[0].is_bound
| FunctionLike |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 54156,
"end": 54471
} | class ____(themeable):
"""
Place panel background & gridlines over/under the data layers
Parameters
----------
theme_element : bool
Default is False.
"""
def apply_ax(self, ax: Axes):
super().apply_ax(ax)
ax.set_axisbelow(not self.properties["value"])
| panel_ontop |
python | doocs__leetcode | solution/1000-1099/1038.Binary Search Tree to Greater Sum Tree/Solution.py | {
"start": 192,
"end": 557
} | class ____:
def bstToGst(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
def dfs(root: Optional[TreeNode]):
if root is None:
return
dfs(root.right)
nonlocal s
s += root.val
root.val = s
dfs(root.left)
s = 0
dfs(root)
return root
| Solution |
python | python__mypy | mypy/constraints.py | {
"start": 1458,
"end": 27683
} | class ____:
"""A representation of a type constraint.
It can be either T <: type or T :> type (T is a type variable).
"""
type_var: TypeVarId
op = 0 # SUBTYPE_OF or SUPERTYPE_OF
target: Type
def __init__(self, type_var: TypeVarLikeType, op: int, target: Type) -> None:
self.type_var = type_var.id
self.op = op
# TODO: should we add "assert not isinstance(target, UnpackType)"?
# UnpackType is a synthetic type, and is never valid as a constraint target.
self.target = target
self.origin_type_var = type_var
# These are additional type variables that should be solved for together with type_var.
# TODO: A cleaner solution may be to modify the return type of infer_constraints()
# to include these instead, but this is a rather big refactoring.
self.extra_tvars: list[TypeVarLikeType] = []
def __repr__(self) -> str:
op_str = "<:"
if self.op == SUPERTYPE_OF:
op_str = ":>"
return f"{self.type_var} {op_str} {self.target}"
def __hash__(self) -> int:
return hash((self.type_var, self.op, self.target))
def __eq__(self, other: object) -> bool:
if not isinstance(other, Constraint):
return False
return (self.type_var, self.op, self.target) == (other.type_var, other.op, other.target)
def infer_constraints_for_callable(
callee: CallableType,
arg_types: Sequence[Type | None],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
formal_to_actual: list[list[int]],
context: ArgumentInferContext,
) -> list[Constraint]:
"""Infer type variable constraints for a callable and actual arguments.
Return a list of constraints.
"""
constraints: list[Constraint] = []
mapper = ArgTypeExpander(context)
param_spec = callee.param_spec()
param_spec_arg_types = []
param_spec_arg_names = []
param_spec_arg_kinds = []
incomplete_star_mapping = False
for i, actuals in enumerate(formal_to_actual): # TODO: isn't this `enumerate(arg_types)`?
for actual in actuals:
if actual is None and callee.arg_kinds[i] in (ARG_STAR, ARG_STAR2): # type: ignore[unreachable]
# We can't use arguments to infer ParamSpec constraint, if only some
# are present in the current inference pass.
incomplete_star_mapping = True # type: ignore[unreachable]
break
for i, actuals in enumerate(formal_to_actual):
if isinstance(callee.arg_types[i], UnpackType):
unpack_type = callee.arg_types[i]
assert isinstance(unpack_type, UnpackType)
# In this case we are binding all the actuals to *args,
# and we want a constraint that the typevar tuple being unpacked
# is equal to a type list of all the actuals.
actual_types = []
unpacked_type = get_proper_type(unpack_type.type)
if isinstance(unpacked_type, TypeVarTupleType):
tuple_instance = unpacked_type.tuple_fallback
elif isinstance(unpacked_type, TupleType):
tuple_instance = unpacked_type.partial_fallback
else:
assert False, "mypy bug: unhandled constraint inference case"
for actual in actuals:
actual_arg_type = arg_types[actual]
if actual_arg_type is None:
continue
expanded_actual = mapper.expand_actual_type(
actual_arg_type,
arg_kinds[actual],
callee.arg_names[i],
callee.arg_kinds[i],
allow_unpack=True,
)
if arg_kinds[actual] != ARG_STAR or isinstance(
get_proper_type(actual_arg_type), TupleType
):
actual_types.append(expanded_actual)
else:
# If we are expanding an iterable inside * actual, append a homogeneous item instead
actual_types.append(
UnpackType(tuple_instance.copy_modified(args=[expanded_actual]))
)
if isinstance(unpacked_type, TypeVarTupleType):
constraints.append(
Constraint(
unpacked_type,
SUPERTYPE_OF,
TupleType(actual_types, unpacked_type.tuple_fallback),
)
)
elif isinstance(unpacked_type, TupleType):
# Prefixes get converted to positional args, so technically the only case we
# should have here is like Tuple[Unpack[Ts], Y1, Y2, Y3]. If this turns out
# not to hold we can always handle the prefixes too.
inner_unpack = unpacked_type.items[0]
assert isinstance(inner_unpack, UnpackType)
inner_unpacked_type = get_proper_type(inner_unpack.type)
suffix_len = len(unpacked_type.items) - 1
if isinstance(inner_unpacked_type, TypeVarTupleType):
# Variadic item can be either *Ts...
constraints.append(
Constraint(
inner_unpacked_type,
SUPERTYPE_OF,
TupleType(
actual_types[:-suffix_len], inner_unpacked_type.tuple_fallback
),
)
)
else:
# ...or it can be a homogeneous tuple.
assert (
isinstance(inner_unpacked_type, Instance)
and inner_unpacked_type.type.fullname == "builtins.tuple"
)
for at in actual_types[:-suffix_len]:
constraints.extend(
infer_constraints(inner_unpacked_type.args[0], at, SUPERTYPE_OF)
)
# Now handle the suffix (if any).
if suffix_len:
for tt, at in zip(unpacked_type.items[1:], actual_types[-suffix_len:]):
constraints.extend(infer_constraints(tt, at, SUPERTYPE_OF))
else:
assert False, "mypy bug: unhandled constraint inference case"
else:
for actual in actuals:
actual_arg_type = arg_types[actual]
if actual_arg_type is None:
continue
if param_spec and callee.arg_kinds[i] in (ARG_STAR, ARG_STAR2):
# If actual arguments are mapped to ParamSpec type, we can't infer individual
# constraints, instead store them and infer single constraint at the end.
# It is impossible to map actual kind to formal kind, so use some heuristic.
# This inference is used as a fallback, so relying on heuristic should be OK.
if not incomplete_star_mapping:
param_spec_arg_types.append(
mapper.expand_actual_type(
actual_arg_type, arg_kinds[actual], None, arg_kinds[actual]
)
)
actual_kind = arg_kinds[actual]
param_spec_arg_kinds.append(
ARG_POS if actual_kind not in (ARG_STAR, ARG_STAR2) else actual_kind
)
param_spec_arg_names.append(arg_names[actual] if arg_names else None)
else:
actual_type = mapper.expand_actual_type(
actual_arg_type,
arg_kinds[actual],
callee.arg_names[i],
callee.arg_kinds[i],
)
c = infer_constraints(callee.arg_types[i], actual_type, SUPERTYPE_OF)
constraints.extend(c)
if (
param_spec
and not any(c.type_var == param_spec.id for c in constraints)
and not incomplete_star_mapping
):
# Use ParamSpec constraint from arguments only if there are no other constraints,
# since as explained above it is quite ad-hoc.
constraints.append(
Constraint(
param_spec,
SUPERTYPE_OF,
Parameters(
arg_types=param_spec_arg_types,
arg_kinds=param_spec_arg_kinds,
arg_names=param_spec_arg_names,
imprecise_arg_kinds=True,
),
)
)
if any(isinstance(v, ParamSpecType) for v in callee.variables):
# As a perf optimization filter imprecise constraints only when we can have them.
constraints = filter_imprecise_kinds(constraints)
return constraints
def infer_constraints(
template: Type, actual: Type, direction: int, skip_neg_op: bool = False
) -> list[Constraint]:
"""Infer type constraints.
Match a template type, which may contain type variable references,
recursively against a type which does not contain (the same) type
variable references. The result is a list of type constrains of
form 'T is a supertype/subtype of x', where T is a type variable
present in the template and x is a type without reference to type
variables present in the template.
Assume T and S are type variables. Now the following results can be
calculated (read as '(template, actual) --> result'):
(T, X) --> T :> X
(X[T], X[Y]) --> T <: Y and T :> Y
((T, T), (X, Y)) --> T :> X and T :> Y
((T, S), (X, Y)) --> T :> X and S :> Y
(X[T], Any) --> T <: Any and T :> Any
The constraints are represented as Constraint objects. If skip_neg_op == True,
then skip adding reverse (polymorphic) constraints (since this is already a call
to infer such constraints).
"""
if any(
get_proper_type(template) == get_proper_type(t)
and get_proper_type(actual) == get_proper_type(a)
for (t, a) in reversed(type_state.inferring)
):
return []
if has_recursive_types(template) or isinstance(get_proper_type(template), Instance):
# This case requires special care because it may cause infinite recursion.
# Note that we include Instances because the may be recursive as str(Sequence[str]).
if not has_type_vars(template):
# Return early on an empty branch.
return []
type_state.inferring.append((template, actual))
res = _infer_constraints(template, actual, direction, skip_neg_op)
type_state.inferring.pop()
return res
return _infer_constraints(template, actual, direction, skip_neg_op)
def _infer_constraints(
template: Type, actual: Type, direction: int, skip_neg_op: bool
) -> list[Constraint]:
orig_template = template
template = get_proper_type(template)
actual = get_proper_type(actual)
# Type inference shouldn't be affected by whether union types have been simplified.
# We however keep any ErasedType items, so that the caller will see it when using
# checkexpr.has_erased_component().
if isinstance(template, UnionType):
template = mypy.typeops.make_simplified_union(template.items, keep_erased=True)
if isinstance(actual, UnionType):
actual = mypy.typeops.make_simplified_union(actual.items, keep_erased=True)
# Ignore Any types from the type suggestion engine to avoid them
# causing us to infer Any in situations where a better job could
# be done otherwise. (This can produce false positives but that
# doesn't really matter because it is all heuristic anyway.)
if isinstance(actual, AnyType) and actual.type_of_any == TypeOfAny.suggestion_engine:
return []
# type[A | B] is always represented as type[A] | type[B] internally.
# This makes our constraint solver choke on type[T] <: type[A] | type[B],
# solving T as generic meet(A, B) which is often `object`. Force unwrap such unions
# if both sides are type[...] or unions thereof. See `testTypeVarType` test
type_type_unwrapped = False
if _is_type_type(template) and _is_type_type(actual):
type_type_unwrapped = True
template = _unwrap_type_type(template)
actual = _unwrap_type_type(actual)
# If the template is simply a type variable, emit a Constraint directly.
# We need to handle this case before handling Unions for two reasons:
# 1. "T <: Union[U1, U2]" is not equivalent to "T <: U1 or T <: U2",
# because T can itself be a union (notably, Union[U1, U2] itself).
# 2. "T :> Union[U1, U2]" is logically equivalent to "T :> U1 and
# T :> U2", but they are not equivalent to the constraint solver,
# which never introduces new Union types (it uses join() instead).
if isinstance(template, TypeVarType):
return [Constraint(template, direction, actual)]
if (
isinstance(actual, TypeVarType)
and not actual.id.is_meta_var()
and direction == SUPERTYPE_OF
):
# Unless template is also a type variable (or a union that contains one), using the upper
# bound for inference will usually give better result for actual that is a type variable.
if not isinstance(template, UnionType) or not any(
isinstance(t, TypeVarType) for t in template.items
):
actual = get_proper_type(actual.upper_bound)
# Now handle the case of either template or actual being a Union.
# For a Union to be a subtype of another type, every item of the Union
# must be a subtype of that type, so concatenate the constraints.
if direction == SUBTYPE_OF and isinstance(template, UnionType):
res = []
for t_item in template.items:
res.extend(infer_constraints(t_item, actual, direction))
return res
if direction == SUPERTYPE_OF and isinstance(actual, UnionType):
res = []
for a_item in actual.items:
# `orig_template` has to be preserved intact in case it's recursive.
# If we unwrapped ``type[...]`` previously, wrap the item back again,
# as ``type[...]`` can't be removed from `orig_template`.
if type_type_unwrapped:
a_item = TypeType.make_normalized(a_item)
res.extend(infer_constraints(orig_template, a_item, direction))
return res
# Now the potential subtype is known not to be a Union or a type
# variable that we are solving for. In that case, for a Union to
# be a supertype of the potential subtype, some item of the Union
# must be a supertype of it.
if direction == SUBTYPE_OF and isinstance(actual, UnionType):
# If some of items is not a complete type, disregard that.
items = simplify_away_incomplete_types(actual.items)
# We infer constraints eagerly -- try to find constraints for a type
# variable if possible. This seems to help with some real-world
# use cases.
return any_constraints(
[infer_constraints_if_possible(template, a_item, direction) for a_item in items],
eager=True,
)
if direction == SUPERTYPE_OF and isinstance(template, UnionType):
# When the template is a union, we are okay with leaving some
# type variables indeterminate. This helps with some special
# cases, though this isn't very principled.
result = any_constraints(
[
infer_constraints_if_possible(t_item, actual, direction)
for t_item in template.items
],
eager=isinstance(actual, AnyType),
)
if result:
return result
elif has_recursive_types(template) and not has_recursive_types(actual):
return handle_recursive_union(template, actual, direction)
return []
# Remaining cases are handled by ConstraintBuilderVisitor.
return template.accept(ConstraintBuilderVisitor(actual, direction, skip_neg_op))
def _is_type_type(tp: ProperType) -> TypeGuard[TypeType | UnionType]:
"""Is ``tp`` a ``type[...]`` or a union thereof?
``Type[A | B]`` is internally represented as ``type[A] | type[B]``, and this
troubles the solver sometimes.
"""
return (
isinstance(tp, TypeType)
or isinstance(tp, UnionType)
and all(isinstance(get_proper_type(o), TypeType) for o in tp.items)
)
def _unwrap_type_type(tp: TypeType | UnionType) -> ProperType:
"""Extract the inner type from ``type[...]`` expression or a union thereof."""
if isinstance(tp, TypeType):
return tp.item
return UnionType.make_union([cast(TypeType, get_proper_type(o)).item for o in tp.items])
def infer_constraints_if_possible(
template: Type, actual: Type, direction: int
) -> list[Constraint] | None:
"""Like infer_constraints, but return None if the input relation is
known to be unsatisfiable, for example if template=List[T] and actual=int.
(In this case infer_constraints would return [], just like it would for
an automatically satisfied relation like template=List[T] and actual=object.)
"""
if direction == SUBTYPE_OF and not mypy.subtypes.is_subtype(erase_typevars(template), actual):
return None
if direction == SUPERTYPE_OF and not mypy.subtypes.is_subtype(
actual, erase_typevars(template)
):
return None
if (
direction == SUPERTYPE_OF
and isinstance(template, TypeVarType)
and not mypy.subtypes.is_subtype(actual, erase_typevars(template.upper_bound))
):
# This is not caught by the above branch because of the erase_typevars() call,
# that would return 'Any' for a type variable.
return None
return infer_constraints(template, actual, direction)
def select_trivial(options: Sequence[list[Constraint] | None]) -> list[list[Constraint]]:
"""Select only those lists where each item is a constraint against Any."""
res = []
for option in options:
if option is None:
continue
if all(isinstance(get_proper_type(c.target), AnyType) for c in option):
res.append(option)
return res
def merge_with_any(constraint: Constraint) -> Constraint:
"""Transform a constraint target into a union with given Any type."""
target = constraint.target
if is_union_with_any(target):
# Do not produce redundant unions.
return constraint
# TODO: if we will support multiple sources Any, use this here instead.
any_type = AnyType(TypeOfAny.implementation_artifact)
return Constraint(
constraint.origin_type_var,
constraint.op,
UnionType.make_union([target, any_type], target.line, target.column),
)
def handle_recursive_union(template: UnionType, actual: Type, direction: int) -> list[Constraint]:
# This is a hack to special-case things like Union[T, Inst[T]] in recursive types. Although
# it is quite arbitrary, it is a relatively common pattern, so we should handle it well.
# This function may be called when inferring against such union resulted in different
# constraints for each item. Normally we give up in such case, but here we instead split
# the union in two parts, and try inferring sequentially.
non_type_var_items = [t for t in template.items if not isinstance(t, TypeVarType)]
type_var_items = [t for t in template.items if isinstance(t, TypeVarType)]
return infer_constraints(
UnionType.make_union(non_type_var_items), actual, direction
) or infer_constraints(UnionType.make_union(type_var_items), actual, direction)
def any_constraints(options: list[list[Constraint] | None], *, eager: bool) -> list[Constraint]:
"""Deduce what we can from a collection of constraint lists.
It's a given that at least one of the lists must be satisfied. A
None element in the list of options represents an unsatisfiable
constraint and is ignored. Ignore empty constraint lists if eager
is true -- they are always trivially satisfiable.
"""
if eager:
valid_options = [option for option in options if option]
else:
valid_options = [option for option in options if option is not None]
if not valid_options:
return []
if len(valid_options) == 1:
return valid_options[0]
if all(is_same_constraints(valid_options[0], c) for c in valid_options[1:]):
# Multiple sets of constraints that are all the same. Just pick any one of them.
return valid_options[0]
if all(is_similar_constraints(valid_options[0], c) for c in valid_options[1:]):
# All options have same structure. In this case we can merge-in trivial
# options (i.e. those that only have Any) and try again.
# TODO: More generally, if a given (variable, direction) pair appears in
# every option, combine the bounds with meet/join always, not just for Any.
trivial_options = select_trivial(valid_options)
if trivial_options and len(trivial_options) < len(valid_options):
merged_options = []
for option in valid_options:
if option in trivial_options:
continue
merged_options.append([merge_with_any(c) for c in option])
return any_constraints(list(merged_options), eager=eager)
# If normal logic didn't work, try excluding trivially unsatisfiable constraint (due to
# upper bounds) from each option, and comparing them again.
filtered_options = [filter_satisfiable(o) for o in options]
if filtered_options != options:
return any_constraints(filtered_options, eager=eager)
# Try harder: if that didn't work, try to strip typevars that aren't meta vars.
# Note this is what we would always do, but unfortunately some callers may not
# set the meta var status correctly (for historical reasons), so we use this as
# a fallback only.
filtered_options = [exclude_non_meta_vars(o) for o in options]
if filtered_options != options:
return any_constraints(filtered_options, eager=eager)
# Otherwise, there are either no valid options or multiple, inconsistent valid
# options. Give up and deduce nothing.
return []
def filter_satisfiable(option: list[Constraint] | None) -> list[Constraint] | None:
"""Keep only constraints that can possibly be satisfied.
Currently, we filter out constraints where target is not a subtype of the upper bound.
Since those can be never satisfied. We may add more cases in future if it improves type
inference.
"""
if not option:
return option
satisfiable = []
for c in option:
if isinstance(c.origin_type_var, TypeVarType) and c.origin_type_var.values:
if any(
mypy.subtypes.is_subtype(c.target, value) for value in c.origin_type_var.values
):
satisfiable.append(c)
elif mypy.subtypes.is_subtype(c.target, c.origin_type_var.upper_bound):
satisfiable.append(c)
if not satisfiable:
return None
return satisfiable
def exclude_non_meta_vars(option: list[Constraint] | None) -> list[Constraint] | None:
# If we had an empty list, keep it intact
if not option:
return option
# However, if none of the options actually references meta vars, better remove
# this constraint entirely.
return [c for c in option if c.type_var.is_meta_var()] or None
def is_same_constraints(x: list[Constraint], y: list[Constraint]) -> bool:
for c1 in x:
if not any(is_same_constraint(c1, c2) for c2 in y):
return False
for c1 in y:
if not any(is_same_constraint(c1, c2) for c2 in x):
return False
return True
def is_same_constraint(c1: Constraint, c2: Constraint) -> bool:
# Ignore direction when comparing constraints against Any.
skip_op_check = isinstance(get_proper_type(c1.target), AnyType) and isinstance(
get_proper_type(c2.target), AnyType
)
return (
c1.type_var == c2.type_var
and (c1.op == c2.op or skip_op_check)
and mypy.subtypes.is_same_type(c1.target, c2.target)
)
def is_similar_constraints(x: list[Constraint], y: list[Constraint]) -> bool:
"""Check that two lists of constraints have similar structure.
This means that each list has same type variable plus direction pairs (i.e we
ignore the target). Except for constraints where target is Any type, there
we ignore direction as well.
"""
return _is_similar_constraints(x, y) and _is_similar_constraints(y, x)
def _is_similar_constraints(x: list[Constraint], y: list[Constraint]) -> bool:
"""Check that every constraint in the first list has a similar one in the second.
See docstring above for definition of similarity.
"""
for c1 in x:
has_similar = False
for c2 in y:
# Ignore direction when either constraint is against Any.
skip_op_check = isinstance(get_proper_type(c1.target), AnyType) or isinstance(
get_proper_type(c2.target), AnyType
)
if c1.type_var == c2.type_var and (c1.op == c2.op or skip_op_check):
has_similar = True
break
if not has_similar:
return False
return True
def simplify_away_incomplete_types(types: Iterable[Type]) -> list[Type]:
complete = [typ for typ in types if is_complete_type(typ)]
if complete:
return complete
else:
return list(types)
def is_complete_type(typ: Type) -> bool:
"""Is a type complete?
A complete doesn't have uninhabited type components or (when not in strict
optional mode) None components.
"""
return typ.accept(CompleteTypeVisitor())
| Constraint |
python | pexpect__pexpect | pexpect/fdpexpect.py | {
"start": 1502,
"end": 5991
} | class ____(SpawnBase):
'''This is like pexpect.spawn but allows you to supply your own open file
descriptor. For example, you could use it to read through a file looking
for patterns, or to control a modem or serial device. '''
def __init__ (self, fd, args=None, timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, encoding=None, codec_errors='strict', use_poll=False):
'''This takes a file descriptor (an int) or an object that support the
fileno() method (returning an int). All Python file-like objects
support fileno(). '''
if type(fd) != type(0) and hasattr(fd, 'fileno'):
fd = fd.fileno()
if type(fd) != type(0):
raise ExceptionPexpect('The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn.')
try: # make sure fd is a valid file descriptor
os.fstat(fd)
except OSError:
raise ExceptionPexpect('The fd argument is not a valid file descriptor.')
self.args = None
self.command = None
SpawnBase.__init__(self, timeout, maxread, searchwindowsize, logfile,
encoding=encoding, codec_errors=codec_errors)
self.child_fd = fd
self.own_fd = False
self.closed = False
self.name = '<file descriptor %d>' % fd
self.use_poll = use_poll
def close (self):
"""Close the file descriptor.
Calling this method a second time does nothing, but if the file
descriptor was closed elsewhere, :class:`OSError` will be raised.
"""
if self.child_fd == -1:
return
self.flush()
os.close(self.child_fd)
self.child_fd = -1
self.closed = True
def isalive (self):
'''This checks if the file descriptor is still valid. If :func:`os.fstat`
does not raise an exception then we assume it is alive. '''
if self.child_fd == -1:
return False
try:
os.fstat(self.child_fd)
return True
except:
return False
def terminate (self, force=False): # pragma: no cover
'''Deprecated and invalid. Just raises an exception.'''
raise ExceptionPexpect('This method is not valid for file descriptors.')
# These four methods are left around for backwards compatibility, but not
# documented as part of fdpexpect. You're encouraged to use os.write
# directly.
def send(self, s):
"Write to fd, return number of bytes written"
s = self._coerce_send_string(s)
self._log(s, 'send')
b = self._encoder.encode(s, final=False)
return os.write(self.child_fd, b)
def sendline(self, s):
"Write to fd with trailing newline, return number of bytes written"
s = self._coerce_send_string(s)
return self.send(s + self.linesep)
def write(self, s):
"Write to fd, return None"
self.send(s)
def writelines(self, sequence):
"Call self.write() for each item in sequence"
for s in sequence:
self.write(s)
def read_nonblocking(self, size=1, timeout=-1):
"""
Read from the file descriptor and return the result as a string.
The read_nonblocking method of :class:`SpawnBase` assumes that a call
to os.read will not block (timeout parameter is ignored). This is not
the case for POSIX file-like objects such as sockets and serial ports.
Use :func:`select.select`, timeout is implemented conditionally for
POSIX systems.
:param int size: Read at most *size* bytes.
:param int timeout: Wait timeout seconds for file descriptor to be
ready to read. When -1 (default), use self.timeout. When 0, poll.
:return: String containing the bytes read
"""
if os.name == 'posix':
if timeout == -1:
timeout = self.timeout
rlist = [self.child_fd]
wlist = []
xlist = []
if self.use_poll:
rlist = poll_ignore_interrupts(rlist, timeout)
else:
rlist, wlist, xlist = select_ignore_interrupts(
rlist, wlist, xlist, timeout
)
if self.child_fd not in rlist:
raise TIMEOUT('Timeout exceeded.')
return super(fdspawn, self).read_nonblocking(size)
| fdspawn |
python | numba__numba | numba/cuda/simulator/kernel.py | {
"start": 1305,
"end": 1522
} | class ____(dict):
def __getitem__(self, key):
# Always return a fake overload for any signature, as we don't keep
# track of overloads in the simulator.
return FakeOverload()
| FakeOverloadDict |
python | tornadoweb__tornado | tornado/test/auth_test.py | {
"start": 6314,
"end": 6681
} | class ____(TwitterClientHandler):
@gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
if user is None:
raise Exception("user is None")
self.finish(user)
return
yield self.authorize_redirect()
| TwitterClientLoginHandler |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/test_component_scaffolding.py | {
"start": 873,
"end": 1096
} | class ____(dg.Scaffolder[TestParamsModelWithoutDefaults]):
@classmethod
def get_scaffold_params(cls) -> type[TestParamsModelWithoutDefaults]:
return TestParamsModelWithoutDefaults
| TestScaffolderWithoutDefaults |
python | ipython__ipython | IPython/extensions/deduperreload/deduperreload.py | {
"start": 5554,
"end": 25277
} | class ____(DeduperReloaderPatchingMixin):
"""
This version of autoreload detects when we can leverage targeted recompilation of a subset of a module and patching
existing function/method objects to reflect these changes.
Detects what functions/methods can be reloaded by recursively comparing the old/new AST of module-level classes,
module-level classes' methods, recursing through nested classes' methods. If other changes are made, original
autoreload algorithm is called directly.
"""
def __init__(self) -> None:
self._to_autoreload: AutoreloadTree = AutoreloadTree()
self.source_by_modname: dict[str, str] = {}
self.dependency_graph: dict[tuple[str, ...], list[DependencyNode]] = {}
self._enabled = True
@property
def enabled(self) -> bool:
return self._enabled and platform.python_implementation() == "CPython"
@enabled.setter
def enabled(self, value: bool) -> None:
self._enabled = value
def update_sources(self) -> None:
"""
Update dictionary source_by_modname with current modules' source codes.
"""
if not self.enabled:
return
for new_modname in sys.modules.keys() - self.source_by_modname.keys():
new_module = sys.modules[new_modname]
if (
(fname := get_module_file_name(new_module)) is None
or "site-packages" in fname
or "dist-packages" in fname
or not os.access(fname, os.R_OK)
):
self.source_by_modname[new_modname] = ""
continue
with open(fname, "r") as f:
try:
self.source_by_modname[new_modname] = f.read()
except Exception:
self.source_by_modname[new_modname] = ""
constexpr_detector = ConstexprDetector()
@staticmethod
def is_enum_subclass(node: ast.Module | ast.ClassDef) -> bool:
if isinstance(node, ast.Module):
return False
for base in node.bases:
if isinstance(base, ast.Name) and base.id == "Enum":
return True
elif (
isinstance(base, ast.Attribute)
and base.attr == "Enum"
and isinstance(base.value, ast.Name)
and base.value.id == "enum"
):
return True
return False
@classmethod
def is_constexpr_assign(
cls, node: ast.AST, parent_node: ast.Module | ast.ClassDef
) -> bool:
if not isinstance(node, (ast.Assign, ast.AnnAssign)) or node.value is None:
return False
if cls.is_enum_subclass(parent_node):
return False
for target in node.targets if isinstance(node, ast.Assign) else [node.target]:
if not isinstance(target, ast.Name):
return False
return cls.constexpr_detector(node.value)
@classmethod
def _gather_children(
cls, body: list[ast.stmt], parent_node: ast.Module | ast.ClassDef
) -> GatherResult:
"""
Given list of ast elements, return:
1. dict mapping function names to their ASTs.
2. dict mapping class names to their ASTs.
3. list of any other ASTs.
"""
result = GatherResult.create()
for ast_node in body:
ast_elt: ast.expr | ast.stmt = ast_node
while isinstance(ast_elt, ast.Expr):
ast_elt = ast_elt.value
if isinstance(ast_elt, (ast.FunctionDef, ast.AsyncFunctionDef)):
result.function_defs.append(((ast_elt.name,), ast_elt))
elif isinstance(ast_elt, (ast.Import, ast.ImportFrom)):
result.import_defs.append(
(tuple(name.asname or name.name for name in ast_elt.names), ast_elt)
)
elif isinstance(ast_elt, ast.ClassDef):
result.classes[ast_elt.name] = ast_elt
elif isinstance(ast_elt, ast.If):
result.unfixable.append(ast_elt.test)
result.inplace_merge(cls._gather_children(ast_elt.body, parent_node))
result.inplace_merge(cls._gather_children(ast_elt.orelse, parent_node))
elif isinstance(ast_elt, (ast.AsyncWith, ast.With)):
result.unfixable.extend(ast_elt.items)
result.inplace_merge(cls._gather_children(ast_elt.body, parent_node))
elif isinstance(ast_elt, ast.Try):
result.inplace_merge(cls._gather_children(ast_elt.body, parent_node))
result.inplace_merge(cls._gather_children(ast_elt.orelse, parent_node))
result.inplace_merge(
cls._gather_children(ast_elt.finalbody, parent_node)
)
for handler in ast_elt.handlers:
if handler.type is not None:
result.unfixable.append(handler.type)
result.inplace_merge(
cls._gather_children(handler.body, parent_node)
)
elif not isinstance(ast_elt, (ast.Constant, ast.Pass)):
if cls.is_constexpr_assign(ast_elt, parent_node):
assert isinstance(ast_elt, (ast.Assign, ast.AnnAssign))
targets = (
ast_elt.targets
if isinstance(ast_elt, ast.Assign)
else [ast_elt.target]
)
result.assign_defs.append(
(
tuple(cast(ast.Name, target).id for target in targets),
ast_elt,
)
)
else:
result.unfixable.append(ast_elt)
return result
def detect_autoreload(
self,
old_node: ast.Module | ast.ClassDef,
new_node: ast.Module | ast.ClassDef,
prefixes: list[str] | None = None,
) -> bool:
"""
Returns
-------
`True` if we can run our targeted autoreload algorithm safely.
`False` if we should instead use IPython's original autoreload implementation.
"""
if not self.enabled:
return False
prefixes = prefixes or []
old_result = self._gather_children(old_node.body, old_node)
new_result = self._gather_children(new_node.body, new_node)
old_defs_by_name: dict[str, ast.AST] = {
name: ast_def for names, ast_def in old_result.all_defs() for name in names
}
new_defs_by_name: dict[str, ast.AST] = {
name: ast_def for names, ast_def in new_result.all_defs() for name in names
}
if not compare_ast(old_result.unfixable, new_result.unfixable):
return False
cur = self._to_autoreload.traverse_prefixes(prefixes)
for names, new_ast_def in new_result.all_defs():
names_to_reload = []
for name in names:
if new_defs_by_name[name] is not new_ast_def:
continue
if name not in old_defs_by_name or not compare_ast(
new_ast_def, old_defs_by_name[name]
):
names_to_reload.append(name)
if names_to_reload:
cur.defs_to_reload.append((tuple(names), new_ast_def))
cur.defs_to_delete |= set(old_defs_by_name.keys()) - set(
new_defs_by_name.keys()
)
for name, new_ast_def_class in new_result.classes.items():
if name not in old_result.classes:
cur.new_nested_classes[name] = new_ast_def_class
elif not compare_ast(
new_ast_def_class, old_result.classes[name]
) and not self.detect_autoreload(
old_result.classes[name], new_ast_def_class, prefixes + [name]
):
return False
return True
def _check_dependents(self) -> bool:
"""
If a decorator function is modified, we should similarly reload the functions which are decorated by this
decorator. Iterate through the Dependency Graph to find such cases in the given AutoreloadTree.
"""
for node in self._check_dependents_inner():
self._add_node_to_autoreload_tree(node)
return True
def _add_node_to_autoreload_tree(self, node: DependencyNode) -> None:
"""
Given a node of the dependency graph, add decorator dependencies to the autoreload tree.
"""
if len(node.qualified_name) == 0:
return
cur = self._to_autoreload.traverse_prefixes(list(node.qualified_name[:-1]))
if node.abstract_syntax_tree is not None:
cur.defs_to_reload.append(
((node.qualified_name[-1],), node.abstract_syntax_tree)
)
def _check_dependents_inner(
self, prefixes: list[str] | None = None
) -> list[DependencyNode]:
prefixes = prefixes or []
cur = self._to_autoreload.traverse_prefixes(prefixes)
ans = []
for (func_name, *_), _ in cur.defs_to_reload:
node = tuple(prefixes + [func_name])
ans.extend(self._gen_dependents(node))
for class_name in cur.new_nested_classes:
ans.extend(self._check_dependents_inner(prefixes + [class_name]))
return ans
def _gen_dependents(self, qualname: tuple[str, ...]) -> list[DependencyNode]:
ans = []
if qualname not in self.dependency_graph:
return []
for elt in self.dependency_graph[qualname]:
ans.extend(self._gen_dependents(elt.qualified_name))
ans.append(elt)
return ans
def _patch_namespace_inner(
self, ns: ModuleType | type, prefixes: list[str] | None = None
) -> bool:
"""
This function patches module functions and methods. Specifically, only objects with their name in
self.to_autoreload will be considered for patching. If an object has been marked to be autoreloaded,
new_source_code gets executed in the old version's global environment. Then, replace the old function's
attributes with the new function's attributes.
"""
prefixes = prefixes or []
cur = self._to_autoreload.traverse_prefixes(prefixes)
namespace_to_check = ns
for prefix in prefixes:
namespace_to_check = namespace_to_check.__dict__[prefix]
seen_names: set[str] = set()
for names, new_ast_def in cur.defs_to_reload:
if len(names) == 1 and names[0] in seen_names:
continue
seen_names.update(names)
local_env: dict[str, Any] = {}
if (
isinstance(new_ast_def, (ast.FunctionDef, ast.AsyncFunctionDef))
and (name := names[0]) in namespace_to_check.__dict__
):
assert len(names) == 1
to_patch_to = namespace_to_check.__dict__[name]
if isinstance(to_patch_to, (staticmethod, classmethod)):
to_patch_to = to_patch_to.__func__
# exec new source code using old function's (obj) globals environment.
func_code = textwrap.dedent(ast.unparse(new_ast_def))
if is_method := (len(prefixes) > 0):
func_code = "class __autoreload_class__:\n" + textwrap.indent(
func_code, " "
)
global_env = ns.__dict__
if not isinstance(global_env, dict):
global_env = dict(global_env)
# Compile with correct filename to preserve in traceback
filename = (
getattr(to_patch_to, "__code__", None)
and to_patch_to.__code__.co_filename
or "<string>"
)
func_asts = [ast.parse(func_code)]
if len(cast(ast.FunctionDef, func_asts[0].body[0]).decorator_list) > 0:
without_decorator_list = pickle.loads(pickle.dumps(func_asts[0]))
cast(
ast.FunctionDef, without_decorator_list.body[0]
).decorator_list = []
func_asts.insert(0, without_decorator_list)
for func_ast in func_asts:
compiled_code = compile(
func_ast, filename, mode="exec", dont_inherit=True
)
exec(compiled_code, global_env, local_env) # type: ignore[arg-type]
# local_env contains the function exec'd from new version of function
if is_method:
to_patch_from = getattr(local_env["__autoreload_class__"], name)
else:
to_patch_from = local_env[name]
if isinstance(to_patch_from, (staticmethod, classmethod)):
to_patch_from = to_patch_from.__func__
if isinstance(to_patch_to, property) and isinstance(
to_patch_from, property
):
for attr in ("fget", "fset", "fdel"):
if (
getattr(to_patch_to, attr) is None
or getattr(to_patch_from, attr) is None
):
self.try_patch_attr(to_patch_to, to_patch_from, attr)
else:
self.patch_function(
getattr(to_patch_to, attr),
getattr(to_patch_from, attr),
is_method,
)
elif not isinstance(to_patch_to, property) and not isinstance(
to_patch_from, property
):
self.patch_function(to_patch_to, to_patch_from, is_method)
else:
raise ValueError(
"adding or removing property decorations not supported"
)
else:
exec(
ast.unparse(new_ast_def),
ns.__dict__ | namespace_to_check.__dict__,
local_env,
)
for name in names:
setattr(namespace_to_check, name, local_env[name])
cur.defs_to_reload.clear()
for name in cur.defs_to_delete:
try:
delattr(namespace_to_check, name)
except (AttributeError, TypeError, ValueError):
# give up on deleting the attribute, let the stale one dangle
pass
cur.defs_to_delete.clear()
for class_name, class_ast_node in cur.new_nested_classes.items():
local_env_class: dict[str, Any] = {}
exec(
ast.unparse(class_ast_node),
ns.__dict__ | namespace_to_check.__dict__,
local_env_class,
)
setattr(namespace_to_check, class_name, local_env_class[class_name])
cur.new_nested_classes.clear()
for class_name in cur.children.keys():
if not self._patch_namespace(ns, prefixes + [class_name]):
return False
cur.children.clear()
return True
def _patch_namespace(
self, ns: ModuleType | type, prefixes: list[str] | None = None
) -> bool:
"""
Wrapper for patching all elements in a namespace as specified by the to_autoreload member variable.
Returns `true` if patching was successful, and `false` if unsuccessful.
"""
try:
return self._patch_namespace_inner(ns, prefixes=prefixes)
except Exception:
return False
def maybe_reload_module(self, module: ModuleType) -> bool:
"""
Uses Deduperreload to try to update a module.
Returns `true` on success and `false` on failure.
"""
if not self.enabled:
return False
if not (modname := getattr(module, "__name__", None)):
return False
if (fname := get_module_file_name(module)) is None:
return False
with open(fname, "r") as f:
new_source_code = f.read()
patched_flag = False
if old_source_code := self.source_by_modname.get(modname):
# get old/new module ast
try:
old_module_ast = ast.parse(old_source_code)
new_module_ast = ast.parse(new_source_code)
except Exception:
return False
# detect if we are able to use our autoreload algorithm
ctx = contextlib.suppress()
with ctx:
self._build_dependency_graph(new_module_ast)
if (
self.detect_autoreload(old_module_ast, new_module_ast)
and self._check_dependents()
and self._patch_namespace(module)
):
patched_flag = True
self.source_by_modname[modname] = new_source_code
self._to_autoreload = AutoreloadTree()
return patched_flag
def _separate_name(
self,
decorator: ast.Attribute | ast.Name | ast.Call | ast.expr,
accept_calls: bool,
) -> list[str] | None:
"""
Generates a qualified name for a given decorator by finding its relative namespace.
"""
if isinstance(decorator, ast.Name):
return [decorator.id]
elif isinstance(decorator, ast.Call):
if accept_calls:
return self._separate_name(decorator.func, False)
else:
return None
if not isinstance(decorator, ast.Attribute):
return None
if pref := self._separate_name(decorator.value, False):
return pref + [decorator.attr]
else:
return None
def _gather_dependents(
self, body: list[ast.stmt], body_prefixes: list[str] | None = None
) -> bool:
body_prefixes = body_prefixes or []
for ast_node in body:
ast_elt: ast.expr | ast.stmt = ast_node
if isinstance(ast_elt, ast.ClassDef):
self._gather_dependents(ast_elt.body, body_prefixes + [ast_elt.name])
continue
if not isinstance(ast_elt, (ast.FunctionDef, ast.AsyncFunctionDef)):
continue
qualified_name = tuple(body_prefixes + [ast_elt.name])
cur_dependency_node = DependencyNode(qualified_name, ast_elt)
for decorator in ast_elt.decorator_list:
decorator_path = self._separate_name(decorator, True)
if not decorator_path:
continue
decorator_path_tuple = tuple(decorator_path)
self.dependency_graph.setdefault(decorator_path_tuple, []).append(
cur_dependency_node
)
return True
def _build_dependency_graph(self, new_ast: ast.Module | ast.ClassDef) -> bool:
"""
Wrapper function for generating dependency graph given some AST.
Returns `true` on success. Returns `false` on failure.
Currently, only returns `true` as we do not block on failure to build this graph.
"""
return self._gather_dependents(new_ast.body)
| DeduperReloader |
python | fluentpython__example-code | 12-inheritance/diamond.py | {
"start": 0,
"end": 60
} | class ____:
def ping(self):
print('ping:', self)
| A |
python | kubernetes-client__python | kubernetes/base/config/incluster_config_test.py | {
"start": 1327,
"end": 5971
} | class ____(unittest.TestCase):
def setUp(self):
self._temp_files = []
def tearDown(self):
for f in self._temp_files:
os.remove(f)
def _create_file_with_temp_content(self, content=""):
handler, name = tempfile.mkstemp()
self._temp_files.append(name)
os.write(handler, str.encode(content))
os.close(handler)
return name
def get_test_loader(self,
token_filename=None,
cert_filename=None,
environ=_TEST_ENVIRON):
if not token_filename:
token_filename = self._create_file_with_temp_content(_TEST_TOKEN)
if not cert_filename:
cert_filename = self._create_file_with_temp_content(_TEST_CERT)
return InClusterConfigLoader(token_filename=token_filename,
cert_filename=cert_filename,
try_refresh_token=True,
environ=environ)
def test_join_host_port(self):
self.assertEqual(_TEST_HOST_PORT,
_join_host_port(_TEST_HOST, _TEST_PORT))
self.assertEqual(_TEST_IPV6_HOST_PORT,
_join_host_port(_TEST_IPV6_HOST, _TEST_PORT))
def test_load_config(self):
cert_filename = self._create_file_with_temp_content(_TEST_CERT)
loader = self.get_test_loader(cert_filename=cert_filename)
loader._load_config()
self.assertEqual("https://" + _TEST_HOST_PORT, loader.host)
self.assertEqual(cert_filename, loader.ssl_ca_cert)
self.assertEqual('bearer ' + _TEST_TOKEN, loader.token)
def test_refresh_token(self):
loader = self.get_test_loader()
config = Configuration()
loader.load_and_set(config)
self.assertEqual('bearer ' + _TEST_TOKEN,
config.get_api_key_with_prefix('authorization'))
self.assertEqual('bearer ' + _TEST_TOKEN, loader.token)
self.assertIsNotNone(loader.token_expires_at)
old_token = loader.token
old_token_expires_at = loader.token_expires_at
loader._token_filename = self._create_file_with_temp_content(
_TEST_NEW_TOKEN)
self.assertEqual('bearer ' + _TEST_TOKEN,
config.get_api_key_with_prefix('authorization'))
loader.token_expires_at = datetime.datetime.now()
self.assertEqual('bearer ' + _TEST_NEW_TOKEN,
config.get_api_key_with_prefix('authorization'))
self.assertEqual('bearer ' + _TEST_NEW_TOKEN, loader.token)
self.assertGreater(loader.token_expires_at, old_token_expires_at)
def _should_fail_load(self, config_loader, reason):
try:
config_loader.load_and_set()
self.fail("Should fail because %s" % reason)
except ConfigException:
# expected
pass
def test_no_port(self):
loader = self.get_test_loader(
environ={SERVICE_HOST_ENV_NAME: _TEST_HOST})
self._should_fail_load(loader, "no port specified")
def test_empty_port(self):
loader = self.get_test_loader(environ={
SERVICE_HOST_ENV_NAME: _TEST_HOST,
SERVICE_PORT_ENV_NAME: ""
})
self._should_fail_load(loader, "empty port specified")
def test_no_host(self):
loader = self.get_test_loader(
environ={SERVICE_PORT_ENV_NAME: _TEST_PORT})
self._should_fail_load(loader, "no host specified")
def test_empty_host(self):
loader = self.get_test_loader(environ={
SERVICE_HOST_ENV_NAME: "",
SERVICE_PORT_ENV_NAME: _TEST_PORT
})
self._should_fail_load(loader, "empty host specified")
def test_no_cert_file(self):
loader = self.get_test_loader(cert_filename="not_exists_file_1123")
self._should_fail_load(loader, "cert file does not exist")
def test_empty_cert_file(self):
loader = self.get_test_loader(
cert_filename=self._create_file_with_temp_content())
self._should_fail_load(loader, "empty cert file provided")
def test_no_token_file(self):
loader = self.get_test_loader(token_filename="not_exists_file_1123")
self._should_fail_load(loader, "token file does not exist")
def test_empty_token_file(self):
loader = self.get_test_loader(
token_filename=self._create_file_with_temp_content())
self._should_fail_load(loader, "empty token file provided")
if __name__ == '__main__':
unittest.main()
| InClusterConfigTest |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 41831,
"end": 42884
} | class ____(JITTestMixin):
def test_emit_assembly(self):
"""Test TargetMachineRef.emit_assembly()"""
target_machine = self.target_machine(jit=True)
mod = self.module()
ee = self.jit(mod, target_machine) # noqa F841 # Keeps pointers alive
raw_asm = target_machine.emit_assembly(mod)
self.assertIn("sum", raw_asm)
target_machine.set_asm_verbosity(True)
raw_asm_verbose = target_machine.emit_assembly(mod)
self.assertIn("sum", raw_asm)
self.assertNotEqual(raw_asm, raw_asm_verbose)
def test_emit_object(self):
"""Test TargetMachineRef.emit_object()"""
target_machine = self.target_machine(jit=True)
mod = self.module()
ee = self.jit(mod, target_machine) # noqa F841 # Keeps pointers alive
code_object = target_machine.emit_object(mod)
self.assertIsInstance(code_object, bytes)
if sys.platform.startswith('linux'):
# Sanity check
self.assertIn(b"ELF", code_object[:10])
| JITWithTMTestMixin |
python | modin-project__modin | modin/experimental/core/io/sql/utils.py | {
"start": 7290,
"end": 7413
} | class ____(Exception):
"""Exception that should be raised if invalid arguments combination was found."""
| InvalidArguments |
python | django__django | tests/multiple_database/tests.py | {
"start": 77957,
"end": 79782
} | class ____(TestCase):
databases = {"default", "other"}
fixtures = ["multidb-common", "multidb"]
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_fixture_loading(self):
"Multi-db fixtures are loaded correctly"
# "Pro Django" exists on the default database, but not on other
# database
Book.objects.get(title="Pro Django")
Book.objects.using("default").get(title="Pro Django")
with self.assertRaises(Book.DoesNotExist):
Book.objects.using("other").get(title="Pro Django")
# "Dive into Python" exists on the default database, but not on other
# database
Book.objects.using("other").get(title="Dive into Python")
with self.assertRaises(Book.DoesNotExist):
Book.objects.get(title="Dive into Python")
with self.assertRaises(Book.DoesNotExist):
Book.objects.using("default").get(title="Dive into Python")
# "Definitive Guide" exists on the both databases
Book.objects.get(title="The Definitive Guide to Django")
Book.objects.using("default").get(title="The Definitive Guide to Django")
Book.objects.using("other").get(title="The Definitive Guide to Django")
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_pseudo_empty_fixtures(self):
"""
A fixture can contain entries, but lead to nothing in the database;
this shouldn't raise an error (#14068).
"""
new_io = StringIO()
management.call_command("loaddata", "pets", stdout=new_io, stderr=new_io)
command_output = new_io.getvalue().strip()
# No objects will actually be loaded
self.assertEqual(
command_output, "Installed 0 object(s) (of 2) from 1 fixture(s)"
)
| FixtureTestCase |
python | mlflow__mlflow | mlflow/tracing/processor/otel_metrics_mixin.py | {
"start": 921,
"end": 4825
} | class ____:
"""
Mixin class that provides metrics recording capabilities for span processors.
This mixin is designed to be used with OpenTelemetry span processors to record
span-related metrics (e.g. duration) and metadata.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize the mixin and pass through to parent classes."""
super().__init__(*args, **kwargs)
self._duration_histogram = None
self._trace_manager = InMemoryTraceManager.get_instance()
def _setup_metrics_if_necessary(self) -> None:
"""
Set up OpenTelemetry metrics if not already configured previously.
"""
if self._duration_histogram is not None:
return
endpoint = _get_otlp_metrics_endpoint()
if not endpoint:
return
protocol = _get_otlp_metrics_protocol()
if protocol == "grpc":
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import (
OTLPMetricExporter,
)
elif protocol == "http/protobuf":
from opentelemetry.exporter.otlp.proto.http.metric_exporter import (
OTLPMetricExporter,
)
else:
_logger.warning(
f"Unsupported OTLP metrics protocol '{protocol}'. "
"Supported protocols are 'grpc' and 'http/protobuf'. "
"Metrics export will be skipped."
)
return
metric_exporter = OTLPMetricExporter(endpoint=endpoint)
reader = PeriodicExportingMetricReader(metric_exporter)
provider = MeterProvider(metric_readers=[reader])
metrics.set_meter_provider(provider)
meter = metrics.get_meter("mlflow.tracing")
self._duration_histogram = meter.create_histogram(
name="mlflow.trace.span.duration",
description="Duration of spans in milliseconds",
unit="ms",
)
def record_metrics_for_span(self, span: OTelReadableSpan) -> None:
"""
Record metrics for a completed span.
This method should be called at the beginning of the on_end() method
to record span duration and associated metadata.
Args:
span: The completed OpenTelemetry span to record metrics for.
"""
self._setup_metrics_if_necessary()
if self._duration_histogram is None:
return
span_type = span.attributes.get(SpanAttributeKey.SPAN_TYPE, SpanType.UNKNOWN)
try:
# Span attributes are JSON encoded by default; decode them for metric label readability
span_type = json.loads(span_type)
except (json.JSONDecodeError, TypeError):
pass
attributes = {
"root": span.parent is None,
"span_type": span_type,
"span_status": span.status.status_code.name if span.status else "UNSET",
"experiment_id": get_experiment_id_for_trace(span),
}
# Add trace tags and metadata if trace is available
# Get MLflow trace ID from OpenTelemetry trace ID
mlflow_trace_id = self._trace_manager.get_mlflow_trace_id_from_otel_id(
span.context.trace_id
)
if mlflow_trace_id is not None:
with self._trace_manager.get_trace(mlflow_trace_id) as trace:
if trace is not None:
for key, value in trace.info.tags.items():
attributes[f"tags.{key}"] = str(value)
if trace.info.trace_metadata:
for meta_key, meta_value in trace.info.trace_metadata.items():
attributes[f"metadata.{meta_key}"] = str(meta_value)
self._duration_histogram.record(
amount=(span.end_time - span.start_time) / 1e6, attributes=attributes
)
| OtelMetricsMixin |
python | google__pytype | pytype/pytd/optimize.py | {
"start": 23430,
"end": 29120
} | class ____(TypeParameterScope):
"""Remove all function type parameters in a union with a class type param.
For example, this will change
class A(typing.Generic(T)):
def append(self, Union[T, T2]) -> T2
to
class A(typing.Generic(T)):
def append(self, T) -> T
.
Use this visitor after using AbsorbMutableParameters.
As another example, the combination of AbsorbMutableParameters and
MergeTypeParameters transforms
class list(typing.Generic(T)):
def append(self, v: T2) -> NoneType:
self = Union[T, T2]
to
class list(typing.Generic(T')):
def append(self, V:T') -> NoneType
by creating a *new* template variable T' that propagates the
mutations to the outermost level (in this example, T' = Union[T, T2])
"""
def __init__(self):
super().__init__()
self.type_param_union = None
def _AppendNew(self, l1, l2):
"""Appends all items to l1 that are not in l2."""
# l1 and l2 are small (2-3 elements), so just use two loops.
for e2 in l2:
if not any(e1 is e2 for e1 in l1):
l1.append(e2)
def EnterSignature(self, sig):
# Necessary because TypeParameterScope also defines this function
super().EnterSignature(sig)
assert self.type_param_union is None
self.type_param_union = collections.defaultdict(list)
def LeaveSignature(self, node):
# Necessary because TypeParameterScope also defines this function
super().LeaveSignature(node)
self.type_param_union = None
def VisitUnionType(self, u):
type_params = [t for t in u.type_list if isinstance(t, pytd.TypeParameter)]
for t in type_params:
if self.IsFunctionTypeParameter(t):
self._AppendNew(self.type_param_union[t.name], type_params)
return u
def _AllContaining(self, type_param, seen=None):
"""Gets all type parameters that are in a union with the passed one."""
seen = seen or set()
result = [type_param]
for other in self.type_param_union[type_param.name]:
if other in seen:
continue # break cycles
seen.add(other)
self._AppendNew(result, self._AllContaining(other, seen) or [other])
return result
def _ReplaceByOuterIfNecessary(self, item, substitutions):
"""Potentially replace a function type param with a class type param.
Args:
item: A pytd.TemplateItem
substitutions: A dictionary to update with what we replaced.
Returns:
Either [item] or [].
"""
containing_union = self._AllContaining(item.type_param)
if not containing_union:
return [item]
class_type_parameters = [
type_param
for type_param in containing_union
if self.IsClassTypeParameter(type_param)
]
if class_type_parameters:
substitutions[item.type_param] = pytd_utils.JoinTypes(
class_type_parameters
)
return []
else:
# It's a function type parameter that appears in a union with other
# function type parameters.
return [item]
def VisitSignature(self, sig):
new_template = []
substitutions = {k: k for k in self.type_params_stack[-1]}
for item in sig.template:
new_template += self._ReplaceByOuterIfNecessary(item, substitutions)
if sig.template == new_template:
return sig # Nothing changed.
else:
return (
sig.Replace(template=tuple(new_template))
.Visit(visitors.ReplaceTypeParameters(substitutions))
.Visit(SimplifyUnions())
)
def Optimize(
node,
deps=None,
lossy=False,
use_abcs=False,
max_union=7,
remove_mutable=False,
can_do_lookup=True,
):
"""Optimize a PYTD tree.
Tries to shrink a PYTD tree by applying various optimizations.
Arguments:
node: A pytd node to be optimized. It won't be modified - this function will
return a new node.
deps: Definitions of all of the external types in node.
lossy: Allow optimizations that change the meaning of the pytd.
use_abcs: Use abstract base classes to represent unions like e.g.
"Union[float, int]" as "Real".
max_union: How many types we allow in a union before we simplify it to just
"object".
remove_mutable: Whether to simplify mutable parameters to normal parameters.
can_do_lookup: True: We're either allowed to try to resolve NamedType
instances in the AST, or the AST is already resolved. False: Skip any
optimizations that would require NamedTypes to be resolved.
Returns:
An optimized node.
"""
node = node.Visit(NormalizeGenericSelfTypes())
node = node.Visit(RemoveDuplicates())
node = node.Visit(SimplifyUnions())
node = node.Visit(CombineReturnsAndExceptions())
node = node.Visit(CombineContainers())
node = node.Visit(SimplifyContainers())
if deps:
superclasses = deps.Visit(visitors.ExtractSuperClassesByName())
superclasses.update(node.Visit(visitors.ExtractSuperClassesByName()))
if use_abcs:
superclasses.update(abc_hierarchy.GetSuperClasses())
hierarchy = SuperClassHierarchy(superclasses)
node = node.Visit(SimplifyUnionsWithSuperclasses(hierarchy))
if lossy:
node = node.Visit(FindCommonSuperClasses(hierarchy))
if max_union:
node = node.Visit(CollapseLongUnions(max_union))
node = node.Visit(AdjustReturnAndConstantGenericType())
if remove_mutable:
node = node.Visit(AbsorbMutableParameters())
node = node.Visit(CombineContainers())
node = node.Visit(MergeTypeParameters())
node = node.Visit(visitors.AdjustSelf())
node = node.Visit(SimplifyContainers())
if deps and can_do_lookup:
node = visitors.LookupClasses(node, deps, ignore_late_types=True)
return node
| MergeTypeParameters |
python | sphinx-doc__sphinx | sphinx/domains/_domains_container.py | {
"start": 997,
"end": 9590
} | class ____:
"""Container for domain instances.
This class is private, including its name, constructor, and all methods.
Any or all of these will change without notice or warning in any release.
The public interface is restricted to:
* the``domains.['<domain-name>']`` mapping interface
* the ``domains.<core-domain-name>`` attributes for core domains.
* the `.get()``, ``.keys()``, ``.items()``, and ``.values()`` methods.
Additionally, this class supports ``iter`` and ``len``,
and provides membership testing via the ``in`` operator.
"""
__slots__ = (
'_domain_instances',
'c_domain',
'changeset_domain',
'citation_domain',
'cpp_domain',
'index_domain',
'javascript_domain',
'math_domain',
'python_domain',
'restructuredtext_domain',
'standard_domain',
)
#: First-party domains in :mod:`sphinx.domains`
_core_domains: Final = frozenset({
'std',
# Language-specific domains
'c',
'cpp',
'js',
'py',
'rst',
# Other core domains
'changeset',
'citation',
'index',
'math',
})
@classmethod
def _from_environment(
cls, env: BuildEnvironment, /, *, registry: SphinxComponentRegistry
) -> Self:
create_domains = registry.create_domains
# Initialise domains
if domains := {domain.name: domain for domain in create_domains(env)}:
return cls(**domains) # type: ignore[arg-type]
return cls._from_environment_default(env=env)
@classmethod
def _from_environment_default(cls, *, env: BuildEnvironment) -> Self:
"""Return a default instance with every domain we require."""
from sphinx.domains.c import CDomain
from sphinx.domains.changeset import ChangeSetDomain
from sphinx.domains.citation import CitationDomain
from sphinx.domains.cpp import CPPDomain
from sphinx.domains.index import IndexDomain
from sphinx.domains.javascript import JavaScriptDomain
from sphinx.domains.math import MathDomain
from sphinx.domains.python import PythonDomain
from sphinx.domains.rst import ReSTDomain
from sphinx.domains.std import StandardDomain
return cls(
c=CDomain(env),
changeset=ChangeSetDomain(env),
citation=CitationDomain(env),
cpp=CPPDomain(env),
index=IndexDomain(env),
js=JavaScriptDomain(env),
math=MathDomain(env),
py=PythonDomain(env),
rst=ReSTDomain(env),
std=StandardDomain(env),
)
def __init__(
self,
*,
c: CDomain,
cpp: CPPDomain,
js: JavaScriptDomain,
py: PythonDomain,
rst: ReSTDomain,
std: StandardDomain,
changeset: ChangeSetDomain,
citation: CitationDomain,
index: IndexDomain,
math: MathDomain,
**domains: Domain,
) -> None:
# All domains, including core.
# Implemented as a dict for backwards compatibility.
self._domain_instances: Mapping[str, Domain] = {
'c': c,
'changeset': changeset,
'citation': citation,
'cpp': cpp,
'index': index,
'js': js,
'math': math,
'py': py,
'rst': rst,
'std': std,
**domains,
}
# Provide typed attributes for the core domains
self.standard_domain: StandardDomain = std
self.c_domain: CDomain = c
self.cpp_domain: CPPDomain = cpp
self.javascript_domain: JavaScriptDomain = js
self.python_domain: PythonDomain = py
self.restructuredtext_domain: ReSTDomain = rst
self.changeset_domain: ChangeSetDomain = changeset
self.citation_domain: CitationDomain = citation
self.index_domain: IndexDomain = index
self.math_domain: MathDomain = math
for domain_name, domain in self._domain_instances.items():
# invariant from ``_DomainsContainer._from_environment``
if domain_name != domain.name:
msg = f'Domain name mismatch in {domain!r}: {domain_name!r} != {domain.name!r}'
raise ValueError(msg)
def _setup(self) -> None:
for domain in self._domain_instances.values():
domain.setup()
def _process_doc(
self, env: BuildEnvironment, docname: str, document: nodes.document
) -> None:
for domain in self._domain_instances.values():
domain.process_doc(env, docname, document)
def _clear_doc(self, docname: str) -> None:
for domain in self._domain_instances.values():
domain.clear_doc(docname)
def _merge_domain_data(
self, docnames: Set[str], domain_data: dict[str, Any]
) -> None:
for domain_name, domain in self._domain_instances.items():
domain.merge_domaindata(docnames, domain_data[domain_name])
def _check_consistency(self) -> None:
for domain in self._domain_instances.values():
domain.check_consistency()
def __contains__(self, key: str) -> bool:
return key in self._domain_instances
def __eq__(self, other: object) -> bool:
if not isinstance(other, _DomainsContainer):
return NotImplemented
return self._domain_instances == other._domain_instances
def __hash__(self) -> int:
return hash(sorted(self._domain_instances.items()))
def __setattr__(self, key: str, value: object) -> None:
if key in self._core_domains:
msg = f'{self.__class__.__name__!r} object does not support assignment to {key!r}'
raise TypeError(msg)
super().__setattr__(key, value)
def __delattr__(self, key: str) -> None:
if key in self._core_domains:
msg = f'{self.__class__.__name__!r} object does not support deletion of {key!r}'
raise TypeError(msg)
super().__delattr__(key)
# Mapping interface: builtin domains
@overload
def __getitem__(self, key: Literal['c']) -> CDomain: ...
@overload
def __getitem__(self, key: Literal['cpp']) -> CPPDomain: ...
@overload
def __getitem__(self, key: Literal['changeset']) -> ChangeSetDomain: ...
@overload
def __getitem__(self, key: Literal['citation']) -> CitationDomain: ...
@overload
def __getitem__(self, key: Literal['index']) -> IndexDomain: ...
@overload
def __getitem__(self, key: Literal['js']) -> JavaScriptDomain: ...
@overload
def __getitem__(self, key: Literal['math']) -> MathDomain: ...
@overload
def __getitem__(self, key: Literal['py']) -> PythonDomain: ...
@overload
def __getitem__(self, key: Literal['rst']) -> ReSTDomain: ...
@overload
def __getitem__(self, key: Literal['std']) -> StandardDomain: ...
# Mapping interface: first-party domains
@overload
def __getitem__(self, key: Literal['duration']) -> DurationDomain: ...
@overload
def __getitem__(self, key: Literal['todo']) -> TodoDomain: ...
# Mapping interface: third-party domains
@overload
def __getitem__(self, key: str) -> Domain: ...
def __getitem__(self, key: str) -> Domain:
if domain := getattr(self, key, None):
return domain
return self._domain_instances[key]
def __setitem__(self, key: str, value: Domain) -> NoReturn:
msg = f'{self.__class__.__name__!r} object does not support item assignment'
raise TypeError(msg)
def __delitem__(self, key: str) -> NoReturn:
msg = f'{self.__class__.__name__!r} object does not support item deletion'
raise TypeError(msg)
def __iter__(self) -> Iterator[str]:
return iter(self._domain_instances.keys())
def __len__(self) -> int:
return len(self._domain_instances)
def get(self, key: str, default: Domain | None = None) -> Domain | None:
return self._domain_instances.get(key, default)
def keys(self) -> Iterable[str]:
return self._domain_instances.keys()
def items(self) -> Iterable[tuple[str, Domain]]:
return self._domain_instances.items()
def values(self) -> Iterable[Domain]:
return self._domain_instances.values()
def sorted(self) -> Iterable[Domain]:
for _domain_name, domain in sorted(self._domain_instances.items()):
yield domain
| _DomainsContainer |
python | google__pytype | pytype/tools/analyze_project/config_test.py | {
"start": 1076,
"end": 1931
} | class ____(unittest.TestCase):
"""Base for config tests."""
def _validate_file_contents(self, conf, path):
self.assertEqual(conf.exclude, set())
# output shouldn't be present since we haven't set it.
self.assertFalse(hasattr(conf, 'output'))
self.assertEqual(
file_utils.expand_paths(conf.pythonpath),
file_utils.expand_paths([
path,
(
('C:' if sys.platform == 'win32' else '')
+ file_utils.replace_separator('/foo/bar')
),
path_utils.join(path, file_utils.replace_separator('baz/quux')),
]),
)
self.assertEqual(conf.python_version, '3.7')
self.assertEqual(conf.disable, 'import-error,module-attr')
def _validate_empty_contents(self, conf):
for k in config.ITEMS:
self.assertFalse(hasattr(conf, k))
| TestBase |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 118302,
"end": 118713
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(OrgEnterpriseOwnerOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| OrgEnterpriseOwnerOrder |
python | PyCQA__pylint | tests/functional/n/non/non_init_parent_called.py | {
"start": 521,
"end": 909
} | class ____(BBBBMixin, non_init_parent_called.AAAA, non_init_parent_called.BBBB, nonexistent.AClass): # [no-member]
"""mix different things, some inferable some not"""
def __init__(self):
BBBBMixin.__init__(self)
non_init_parent_called.AAAA.__init__(self)
non_init_parent_called.BBBB.__init__(self) # [no-member]
nonexistent.AClass.__init__(self)
| CCC |
python | numpy__numpy | numpy/typing/tests/data/pass/arithmetic.py | {
"start": 403,
"end": 7766
} | class ____:
def __array__(self, dtype: np.typing.DTypeLike | None = None,
copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]:
ret = np.empty((), dtype=object)
ret[()] = self
return ret
def __sub__(self, value: Any) -> Object:
return self
def __rsub__(self, value: Any) -> Object:
return self
def __floordiv__(self, value: Any) -> Object:
return self
def __rfloordiv__(self, value: Any) -> Object:
return self
def __mul__(self, value: Any) -> Object:
return self
def __rmul__(self, value: Any) -> Object:
return self
def __pow__(self, value: Any) -> Object:
return self
def __rpow__(self, value: Any) -> Object:
return self
AR_b: npt.NDArray[np.bool] = np.array([True])
AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32)
AR_i: npt.NDArray[np.int64] = np.array([1])
AR_integer: npt.NDArray[np.integer] = cast(npt.NDArray[np.integer], AR_i)
AR_f: npt.NDArray[np.float64] = np.array([1.0])
AR_c: npt.NDArray[np.complex128] = np.array([1j])
AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")])
AR_M: npt.NDArray[np.datetime64] = np.array([np.datetime64(1, "D")])
AR_O: npt.NDArray[np.object_] = np.array([Object()])
AR_LIKE_b = [True]
AR_LIKE_u = [np.uint32(1)]
AR_LIKE_i = [1]
AR_LIKE_f = [1.0]
AR_LIKE_c = [1j]
AR_LIKE_m = [np.timedelta64(1, "D")]
AR_LIKE_M = [np.datetime64(1, "D")]
AR_LIKE_O = [Object()]
# Array subtractions
AR_b - AR_LIKE_u
AR_b - AR_LIKE_i
AR_b - AR_LIKE_f
AR_b - AR_LIKE_c
AR_b - AR_LIKE_m
AR_b - AR_LIKE_O
AR_LIKE_u - AR_b
AR_LIKE_i - AR_b
AR_LIKE_f - AR_b
AR_LIKE_c - AR_b
AR_LIKE_m - AR_b
AR_LIKE_M - AR_b
AR_LIKE_O - AR_b
AR_u - AR_LIKE_b
AR_u - AR_LIKE_u
AR_u - AR_LIKE_i
AR_u - AR_LIKE_f
AR_u - AR_LIKE_c
AR_u - AR_LIKE_m
AR_u - AR_LIKE_O
AR_LIKE_b - AR_u
AR_LIKE_u - AR_u
AR_LIKE_i - AR_u
AR_LIKE_f - AR_u
AR_LIKE_c - AR_u
AR_LIKE_m - AR_u
AR_LIKE_M - AR_u
AR_LIKE_O - AR_u
AR_i - AR_LIKE_b
AR_i - AR_LIKE_u
AR_i - AR_LIKE_i
AR_i - AR_LIKE_f
AR_i - AR_LIKE_c
AR_i - AR_LIKE_m
AR_i - AR_LIKE_O
AR_LIKE_b - AR_i
AR_LIKE_u - AR_i
AR_LIKE_i - AR_i
AR_LIKE_f - AR_i
AR_LIKE_c - AR_i
AR_LIKE_m - AR_i
AR_LIKE_M - AR_i
AR_LIKE_O - AR_i
AR_f - AR_LIKE_b
AR_f - AR_LIKE_u
AR_f - AR_LIKE_i
AR_f - AR_LIKE_f
AR_f - AR_LIKE_c
AR_f - AR_LIKE_O
AR_LIKE_b - AR_f
AR_LIKE_u - AR_f
AR_LIKE_i - AR_f
AR_LIKE_f - AR_f
AR_LIKE_c - AR_f
AR_LIKE_O - AR_f
AR_c - AR_LIKE_b
AR_c - AR_LIKE_u
AR_c - AR_LIKE_i
AR_c - AR_LIKE_f
AR_c - AR_LIKE_c
AR_c - AR_LIKE_O
AR_LIKE_b - AR_c
AR_LIKE_u - AR_c
AR_LIKE_i - AR_c
AR_LIKE_f - AR_c
AR_LIKE_c - AR_c
AR_LIKE_O - AR_c
AR_m - AR_LIKE_b
AR_m - AR_LIKE_u
AR_m - AR_LIKE_i
AR_m - AR_LIKE_m
AR_LIKE_b - AR_m
AR_LIKE_u - AR_m
AR_LIKE_i - AR_m
AR_LIKE_m - AR_m
AR_LIKE_M - AR_m
AR_M - AR_LIKE_b
AR_M - AR_LIKE_u
AR_M - AR_LIKE_i
AR_M - AR_LIKE_m
AR_M - AR_LIKE_M
AR_LIKE_M - AR_M
AR_O - AR_LIKE_b
AR_O - AR_LIKE_u
AR_O - AR_LIKE_i
AR_O - AR_LIKE_f
AR_O - AR_LIKE_c
AR_O - AR_LIKE_O
AR_LIKE_b - AR_O
AR_LIKE_u - AR_O
AR_LIKE_i - AR_O
AR_LIKE_f - AR_O
AR_LIKE_c - AR_O
AR_LIKE_O - AR_O
AR_u += AR_b
AR_u += AR_u
AR_u += 1 # Allowed during runtime as long as the object is 0D and >=0
# Array floor division
AR_b // AR_LIKE_b
AR_b // AR_LIKE_u
AR_b // AR_LIKE_i
AR_b // AR_LIKE_f
AR_b // AR_LIKE_O
AR_LIKE_b // AR_b
AR_LIKE_u // AR_b
AR_LIKE_i // AR_b
AR_LIKE_f // AR_b
AR_LIKE_O // AR_b
AR_u // AR_LIKE_b
AR_u // AR_LIKE_u
AR_u // AR_LIKE_i
AR_u // AR_LIKE_f
AR_u // AR_LIKE_O
AR_LIKE_b // AR_u
AR_LIKE_u // AR_u
AR_LIKE_i // AR_u
AR_LIKE_f // AR_u
AR_LIKE_m // AR_u
AR_LIKE_O // AR_u
AR_i // AR_LIKE_b
AR_i // AR_LIKE_u
AR_i // AR_LIKE_i
AR_i // AR_LIKE_f
AR_i // AR_LIKE_O
AR_LIKE_b // AR_i
AR_LIKE_u // AR_i
AR_LIKE_i // AR_i
AR_LIKE_f // AR_i
AR_LIKE_m // AR_i
AR_LIKE_O // AR_i
AR_f // AR_LIKE_b
AR_f // AR_LIKE_u
AR_f // AR_LIKE_i
AR_f // AR_LIKE_f
AR_f // AR_LIKE_O
AR_LIKE_b // AR_f
AR_LIKE_u // AR_f
AR_LIKE_i // AR_f
AR_LIKE_f // AR_f
AR_LIKE_m // AR_f
AR_LIKE_O // AR_f
AR_m // AR_LIKE_u
AR_m // AR_LIKE_i
AR_m // AR_LIKE_f
AR_m // AR_LIKE_m
AR_LIKE_m // AR_m
AR_m /= f
AR_m //= f
AR_m /= AR_f
AR_m /= AR_LIKE_f
AR_m //= AR_f
AR_m //= AR_LIKE_f
AR_O // AR_LIKE_b
AR_O // AR_LIKE_u
AR_O // AR_LIKE_i
AR_O // AR_LIKE_f
AR_O // AR_LIKE_O
AR_LIKE_b // AR_O
AR_LIKE_u // AR_O
AR_LIKE_i // AR_O
AR_LIKE_f // AR_O
AR_LIKE_O // AR_O
# Inplace multiplication
AR_b *= AR_LIKE_b
AR_u *= AR_LIKE_b
AR_u *= AR_LIKE_u
AR_i *= AR_LIKE_b
AR_i *= AR_LIKE_u
AR_i *= AR_LIKE_i
AR_integer *= AR_LIKE_b
AR_integer *= AR_LIKE_u
AR_integer *= AR_LIKE_i
AR_f *= AR_LIKE_b
AR_f *= AR_LIKE_u
AR_f *= AR_LIKE_i
AR_f *= AR_LIKE_f
AR_c *= AR_LIKE_b
AR_c *= AR_LIKE_u
AR_c *= AR_LIKE_i
AR_c *= AR_LIKE_f
AR_c *= AR_LIKE_c
AR_m *= AR_LIKE_b
AR_m *= AR_LIKE_u
AR_m *= AR_LIKE_i
AR_m *= AR_LIKE_f
AR_O *= AR_LIKE_b
AR_O *= AR_LIKE_u
AR_O *= AR_LIKE_i
AR_O *= AR_LIKE_f
AR_O *= AR_LIKE_c
AR_O *= AR_LIKE_O
# Inplace power
AR_u **= AR_LIKE_b
AR_u **= AR_LIKE_u
AR_i **= AR_LIKE_b
AR_i **= AR_LIKE_u
AR_i **= AR_LIKE_i
AR_integer **= AR_LIKE_b
AR_integer **= AR_LIKE_u
AR_integer **= AR_LIKE_i
AR_f **= AR_LIKE_b
AR_f **= AR_LIKE_u
AR_f **= AR_LIKE_i
AR_f **= AR_LIKE_f
AR_c **= AR_LIKE_b
AR_c **= AR_LIKE_u
AR_c **= AR_LIKE_i
AR_c **= AR_LIKE_f
AR_c **= AR_LIKE_c
AR_O **= AR_LIKE_b
AR_O **= AR_LIKE_u
AR_O **= AR_LIKE_i
AR_O **= AR_LIKE_f
AR_O **= AR_LIKE_c
AR_O **= AR_LIKE_O
# unary ops
-c16
-c8
-f8
-f4
-i8
-i4
with pytest.warns(RuntimeWarning):
-u8
-u4
-td
-AR_f
+c16
+c8
+f8
+f4
+i8
+i4
+u8
+u4
+td
+AR_f
abs(c16)
abs(c8)
abs(f8)
abs(f4)
abs(i8)
abs(i4)
abs(u8)
abs(u4)
abs(td)
abs(b_)
abs(AR_f)
# Time structures
dt + td
dt + i
dt + i4
dt + i8
dt - dt
dt - i
dt - i4
dt - i8
td + td
td + i
td + i4
td + i8
td - td
td - i
td - i4
td - i8
td / f
td / f4
td / f8
td / td
td // td
td % td
# boolean
b_ / b
b_ / b_
b_ / i
b_ / i8
b_ / i4
b_ / u8
b_ / u4
b_ / f
b_ / f8
b_ / f4
b_ / c
b_ / c16
b_ / c8
b / b_
b_ / b_
i / b_
i8 / b_
i4 / b_
u8 / b_
u4 / b_
f / b_
f8 / b_
f4 / b_
c / b_
c16 / b_
c8 / b_
# Complex
c16 + c16
c16 + f8
c16 + i8
c16 + c8
c16 + f4
c16 + i4
c16 + b_
c16 + b
c16 + c
c16 + f
c16 + i
c16 + AR_f
c16 + c16
f8 + c16
i8 + c16
c8 + c16
f4 + c16
i4 + c16
b_ + c16
b + c16
c + c16
f + c16
i + c16
AR_f + c16
c8 + c16
c8 + f8
c8 + i8
c8 + c8
c8 + f4
c8 + i4
c8 + b_
c8 + b
c8 + c
c8 + f
c8 + i
c8 + AR_f
c16 + c8
f8 + c8
i8 + c8
c8 + c8
f4 + c8
i4 + c8
b_ + c8
b + c8
c + c8
f + c8
i + c8
AR_f + c8
# Float
f8 + f8
f8 + i8
f8 + f4
f8 + i4
f8 + b_
f8 + b
f8 + c
f8 + f
f8 + i
f8 + AR_f
f8 + f8
i8 + f8
f4 + f8
i4 + f8
b_ + f8
b + f8
c + f8
f + f8
i + f8
AR_f + f8
f4 + f8
f4 + i8
f4 + f4
f4 + i4
f4 + b_
f4 + b
f4 + c
f4 + f
f4 + i
f4 + AR_f
f8 + f4
i8 + f4
f4 + f4
i4 + f4
b_ + f4
b + f4
c + f4
f + f4
i + f4
AR_f + f4
# Int
i8 + i8
i8 + u8
i8 + i4
i8 + u4
i8 + b_
i8 + b
i8 + c
i8 + f
i8 + i
i8 + AR_f
u8 + u8
u8 + i4
u8 + u4
u8 + b_
u8 + b
u8 + c
u8 + f
u8 + i
u8 + AR_f
i8 + i8
u8 + i8
i4 + i8
u4 + i8
b_ + i8
b + i8
c + i8
f + i8
i + i8
AR_f + i8
u8 + u8
i4 + u8
u4 + u8
b_ + u8
b + u8
c + u8
f + u8
i + u8
AR_f + u8
i4 + i8
i4 + i4
i4 + i
i4 + b_
i4 + b
i4 + AR_f
u4 + i8
u4 + i4
u4 + u8
u4 + u4
u4 + i
u4 + b_
u4 + b
u4 + AR_f
i8 + i4
i4 + i4
i + i4
b_ + i4
b + i4
AR_f + i4
i8 + u4
i4 + u4
u8 + u4
u4 + u4
b_ + u4
b + u4
i + u4
AR_f + u4
| Object |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_work_queues.py | {
"start": 26301,
"end": 37544
} | class ____:
@pytest.fixture
async def work_queue_2(self, session):
work_queue = await models.work_queues.create_work_queue(
session=session,
work_queue=schemas.actions.WorkQueueCreate(name="wq-2"),
)
await session.commit()
return work_queue
@pytest.fixture
async def scheduled_flow_runs(self, session, deployment, work_queue, work_queue_2):
for i in range(3):
for wq in [work_queue, work_queue_2]:
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=deployment.flow_id,
deployment_id=deployment.id,
work_queue_name=wq.name,
state=schemas.states.State(
type="SCHEDULED",
timestamp=datetime.now(timezone.utc) + timedelta(minutes=i),
state_details=dict(
scheduled_time=datetime.now(timezone.utc)
+ timedelta(minutes=i)
),
),
),
)
await session.commit()
@pytest.fixture
async def running_flow_runs(self, session, deployment, work_queue, work_queue_2):
for i in range(3):
for wq in [work_queue, work_queue_2]:
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=deployment.flow_id,
deployment_id=deployment.id,
work_queue_name=wq.name,
state=schemas.states.State(
type="RUNNING" if i == 0 else "PENDING",
timestamp=datetime.now(timezone.utc)
- timedelta(seconds=10),
),
),
)
await session.commit()
async def test_get_runs_in_queue(
self,
hosted_api_client,
work_queue,
work_queue_2,
scheduled_flow_runs,
running_flow_runs,
):
response1 = await hosted_api_client.post(
f"/work_queues/{work_queue.id}/get_runs"
)
assert response1.status_code == status.HTTP_200_OK
response2 = await hosted_api_client.post(
f"/work_queues/{work_queue_2.id}/get_runs"
)
assert response2.status_code == status.HTTP_200_OK
runs_wq1 = parse_obj_as(
List[schemas.responses.FlowRunResponse], response1.json()
)
runs_wq2 = parse_obj_as(
List[schemas.responses.FlowRunResponse], response2.json()
)
assert len(runs_wq1) == len(runs_wq2) == 3
assert all(r.work_queue_name == work_queue.name for r in runs_wq1)
assert all(r.work_queue_name == work_queue_2.name for r in runs_wq2)
assert set([r.id for r in runs_wq1]) != set([r.id for r in runs_wq2])
@pytest.mark.parametrize("limit", [2, 0])
async def test_get_runs_in_queue_limit(
self,
hosted_api_client,
work_queue,
scheduled_flow_runs,
running_flow_runs,
limit,
):
response1 = await hosted_api_client.post(
f"/work_queues/{work_queue.id}/get_runs", json=dict(limit=limit)
)
runs_wq1 = parse_obj_as(
List[schemas.responses.FlowRunResponse], response1.json()
)
assert len(runs_wq1) == limit
async def test_get_runs_in_queue_scheduled_before(
self,
hosted_api_client,
work_queue,
scheduled_flow_runs,
running_flow_runs,
):
response1 = await hosted_api_client.post(
f"/work_queues/{work_queue.id}/get_runs",
json=dict(scheduled_before=datetime.now(timezone.utc).isoformat()),
)
runs_wq1 = parse_obj_as(
List[schemas.responses.FlowRunResponse], response1.json()
)
assert len(runs_wq1) == 1
async def test_get_runs_in_queue_nonexistant(
self,
hosted_api_client,
work_queue,
scheduled_flow_runs,
running_flow_runs,
):
response1 = await hosted_api_client.post(f"/work_queues/{uuid4()}/get_runs")
assert response1.status_code == status.HTTP_404_NOT_FOUND
async def test_get_runs_in_queue_paused(
self,
hosted_api_client,
work_queue,
scheduled_flow_runs,
running_flow_runs,
):
await hosted_api_client.patch(
f"/work_queues/{work_queue.id}", json=dict(is_paused=True)
)
response1 = await hosted_api_client.post(
f"/work_queues/{work_queue.id}/get_runs"
)
assert response1.json() == []
@pytest.mark.parametrize("concurrency_limit", [10, 5, 1])
async def test_get_runs_in_queue_concurrency_limit(
self,
hosted_api_client,
work_queue,
scheduled_flow_runs,
running_flow_runs,
concurrency_limit,
):
await hosted_api_client.patch(
f"/work_queues/{work_queue.id}",
json=dict(concurrency_limit=concurrency_limit),
)
response1 = await hosted_api_client.post(
f"/work_queues/{work_queue.id}/get_runs"
)
assert len(response1.json()) == max(0, min(3, concurrency_limit - 3))
@pytest.mark.parametrize("limit", [10, 1])
async def test_get_runs_in_queue_concurrency_limit_and_limit(
self,
hosted_api_client,
work_queue,
scheduled_flow_runs,
running_flow_runs,
limit,
):
await hosted_api_client.patch(
f"/work_queues/{work_queue.id}",
json=dict(concurrency_limit=5),
)
response1 = await hosted_api_client.post(
f"/work_queues/{work_queue.id}/get_runs",
json=dict(limit=limit),
)
assert len(response1.json()) == min(limit, 2)
async def test_read_work_queue_runs_updates_work_queue_last_polled_time(
self,
hosted_api_client,
work_queue,
session,
):
now = datetime.now(timezone.utc)
response = await hosted_api_client.post(
f"/work_queues/{work_queue.id}/get_runs",
json=dict(),
)
assert response.status_code == status.HTTP_200_OK
async for attempt in retry_asserts(max_attempts=10, delay=0.5):
with attempt:
session.expunge_all()
updated_work_queue = await models.work_queues.read_work_queue(
session=session, work_queue_id=work_queue.id
)
assert updated_work_queue.last_polled is not None
assert updated_work_queue.last_polled > now
# The Prefect UI often calls this route to see which runs are enqueued.
# We do not want to record this as an actual poll event.
ui_response = await hosted_api_client.post(
f"/work_queues/{work_queue.id}/get_runs",
json=dict(),
headers={"X-PREFECT-UI": "true"},
)
assert ui_response.status_code == status.HTTP_200_OK
session.expunge_all()
ui_updated_work_queue = await models.work_queues.read_work_queue(
session=session, work_queue_id=work_queue.id
)
assert ui_updated_work_queue.last_polled == updated_work_queue.last_polled
async def test_read_work_queue_runs_associated_deployments_return_status_of_ready(
self,
hosted_api_client,
deployment,
):
work_queue_id = deployment.work_queue_id
# ensure deployment currently has a not ready status
deployment_response = await hosted_api_client.get(
f"/deployments/{deployment.id}"
)
assert deployment_response.status_code == status.HTTP_200_OK
assert deployment_response.json()["status"] == "NOT_READY"
# trigger a poll of the work queue, which should update the deployment status
response = await hosted_api_client.post(
f"/work_queues/{work_queue_id}/get_runs",
json=dict(),
)
assert response.status_code == status.HTTP_200_OK
async for attempt in retry_asserts(max_attempts=10, delay=0.5):
with attempt:
# check that the deployment status is now ready
updated_deployment_response = await hosted_api_client.get(
f"/deployments/{deployment.id}"
)
assert updated_deployment_response.status_code == status.HTTP_200_OK
assert updated_deployment_response.json()["status"] == "READY"
async def test_read_work_queue_runs_updates_work_queue_status(
self,
hosted_api_client,
work_queue,
session,
):
# Verify the work queue is initially not ready
wq_response = await hosted_api_client.get(f"/work_queues/{work_queue.id}")
assert wq_response.status_code == status.HTTP_200_OK
assert wq_response.json()["status"] == "NOT_READY"
# Trigger a polling operation
response = await hosted_api_client.post(
f"/work_queues/{work_queue.id}/get_runs",
)
assert response.status_code == status.HTTP_200_OK
async for attempt in retry_asserts(max_attempts=10, delay=0.5):
with attempt:
# Verify the work queue is now ready
wq_response = await hosted_api_client.get(
f"/work_queues/{work_queue.id}"
)
assert wq_response.status_code == status.HTTP_200_OK
assert wq_response.json()["status"] == "READY"
async def test_read_work_queue_runs_does_not_update_a_paused_work_queues_status(
self,
hosted_api_client,
work_queue,
session,
):
# Move the queue into a PAUSED state
new_data = WorkQueueUpdate(is_paused=True).model_dump(
mode="json", exclude_unset=True
)
response = await hosted_api_client.patch(
f"/work_queues/{work_queue.id}", json=new_data
)
assert response.status_code == status.HTTP_204_NO_CONTENT
# Verify the work queue is PAUSED
wq_response = await hosted_api_client.get(f"/work_queues/{work_queue.id}")
assert wq_response.status_code == status.HTTP_200_OK
assert wq_response.json()["status"] == "PAUSED"
assert wq_response.json()["is_paused"] is True
# Trigger a polling operation
response = await hosted_api_client.post(
f"/work_queues/{work_queue.id}/get_runs",
)
assert response.status_code == status.HTTP_200_OK
# Verify the work queue status is still PAUSED
wq_response = await hosted_api_client.get(f"/work_queues/{work_queue.id}")
assert wq_response.status_code == status.HTTP_200_OK
assert wq_response.json()["status"] == "PAUSED"
| TestGetRunsInWorkQueue |
python | django__django | django/contrib/messages/apps.py | {
"start": 435,
"end": 611
} | class ____(AppConfig):
name = "django.contrib.messages"
verbose_name = _("Messages")
def ready(self):
setting_changed.connect(update_level_tags)
| MessagesConfig |
python | Pylons__pyramid | src/pyramid/predicates.py | {
"start": 7836,
"end": 8395
} | class ____:
def __init__(self, val, config):
if is_nonstr_iter(val):
self.val = set(val)
else:
self.val = {val}
def text(self):
return 'effective_principals = %s' % sorted(list(self.val))
phash = text
def __call__(self, context, request):
req_principals = request.effective_principals
if is_nonstr_iter(req_principals):
rpset = set(req_principals)
if self.val.issubset(rpset):
return True
return False
| EffectivePrincipalsPredicate |
python | facebook__pyre-check | source/command/test/integration/fake_repository/commit_007_T30944862/a.py | {
"start": 204,
"end": 266
} | class ____:
def foo(self, x: int) -> None:
pass
| Base |
python | apache__avro | lang/py/avro/test/test_name.py | {
"start": 9783,
"end": 12789
} | class ____(unittest.TestCase):
"""Enable generating parse test cases over all the valid and invalid example schema."""
def __init__(self, test_schema_string):
"""Ignore the normal signature for unittest.TestCase because we are generating
many test cases from this one class. This is safe as long as the autoloader
ignores this class. The autoloader will ignore this class as long as it has
no methods starting with `test_`.
"""
super().__init__("parse_invalid_name")
self.test_schema_string = test_schema_string
def parse_invalid_name(self) -> None:
"""Parsing a schema with invalid name should not error"""
schema_string = json.dumps(self.test_schema_string)
# Parse with validation to ensure that correct exception is raised when validation enabled.
with self.assertRaises(
(avro.errors.AvroException, avro.errors.SchemaParseException), msg=f"Invalid schema should not have parsed: {self.test_schema_string!s}"
):
avro.schema.parse(schema_string, validate_names=True)
# The actual test with validation disabled.
avro.schema.parse(schema_string, validate_names=False)
PROTOCOL_EXAMPLES = [
# In record
{
"namespace": "lightyear",
"protocol": "lightspeed",
"types": [
{"name": "current-speed", "type": "record", "fields": [{"name": "speed", "type": "int"}, {"name": "unit", "type": "string"}]},
{"name": "over_c", "type": "error", "fields": [{"name": "message", "type": "string"}]},
],
"messages": {
"speedmessage": {"request": [{"name": "current_speed", "type": "current-speed"}], "response": "current-speed", "errors": ["over_c"]}
},
},
# Error union
{
"namespace": "lightyear",
"protocol": "lightspeed",
"types": [
{"name": "current_speed", "type": "record", "fields": [{"name": "speed", "type": "int"}, {"name": "unit", "type": "string"}]},
{"name": "over-c", "type": "error", "fields": [{"name": "message", "type": "string"}]},
],
"messages": {
"speedmessage": {"request": [{"name": "current_speed", "type": "current_speed"}], "response": "current_speed", "errors": ["over-c"]}
},
},
{
"namespace": "lightyear",
"protocol": "lightspeed",
"types": [
{"name": "current_speed", "type": "record", "fields": [{"name": "speed", "type": "int"}, {"name": "unit", "type": "string"}]},
{"name": "over_c", "namespace": "error-speed", "type": "error", "fields": [{"name": "message", "type": "string"}]},
],
"messages": {
"speedmessage": {
"request": [{"name": "current_speed", "type": "current_speed"}],
"response": "current_speed",
"errors": ["error-speed.over_c"],
}
},
},
]
| ParseSchemaNameValidationDisabledTestCase |
python | plotly__plotly.py | plotly/graph_objs/sunburst/marker/colorbar/_tickformatstop.py | {
"start": 233,
"end": 8554
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "sunburst.marker.colorbar"
_path_str = "sunburst.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sunburst.marke
r.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sunburst.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sunburst.marker.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | joke2k__faker | faker/providers/barcode/ja_JP/__init__.py | {
"start": 45,
"end": 1476
} | class ____(BarcodeProvider):
"""Implement barcode provider for ``ja_JP`` locale.
Japanese local EAN barcodes are called JAN-codes.
Sources:
- https://gs1.org/standards/id-keys/company-prefix
- https://www.dsri.jp/jan/about_jan.html
.. |JaJpProvider.localized_ean| replace::
:meth:`JaJpProvider.localized_ean() <faker.providers.barcode.ja_JP.Provider.localized_ean>`
.. |JaJpProvider.localized_ean8| replace::
:meth:`JaJpProvider.localized_ean8() <faker.providers.barcode.ja_JP.Provider.localized_ean8>`
.. |JaJpProvider.localized_ean13| replace::
:meth:`JaJpProvider.localized_ean13() <faker.providers.barcode.ja_JP.Provider.localized_ean13>`
"""
local_prefixes = (4, 5), (4, 9)
def jan(self, length: int = 13) -> str:
"""Generate a JAN barcode of the specified ``length``.
This method is an alias for |JaJpProvider.localized_ean|.
:sample:
:sample: length=8
:sample: length=13
"""
return self.localized_ean(length)
def jan8(self) -> str:
"""Generate a 8 digit JAN barcode.
This method is an alias for |JaJpProvider.localized_ean8|.
"""
return self.localized_ean8()
def jan13(self) -> str:
"""Generate a 13 digit JAN barcode.
This method is an alias for |JaJpProvider.localized_ean13|.
"""
return self.localized_ean13()
| Provider |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/definitions_tests/test_composition.py | {
"start": 9709,
"end": 23784
} | class ____(Exception):
pass
def test_recursion_with_exceptions():
called = {}
@dg.graph
def recurse():
@dg.graph
def outer():
try:
@dg.graph
def throws():
called["throws"] = True
raise Garbage()
throws()
except Garbage:
add_one(return_one())
outer()
assert recurse.execute_in_process().success
assert called["throws"] is True
def test_job_has_op_def():
@dg.graph
def inner():
return add_one(return_one())
@dg.graph
def outer():
add_one(inner())
@dg.job
def a_job():
outer()
assert a_job.has_node("add_one")
assert a_job.has_node("outer")
assert a_job.has_node("inner")
def test_mapping_args_ordering():
@dg.op
def take(a, b, c):
assert a == "a"
assert b == "b"
assert c == "c"
@dg.graph
def swizzle(b, a, c):
take(a, b, c)
@dg.graph
def swizzle_2(c, b, a):
swizzle(b, a=a, c=c)
@dg.graph
def ordered():
swizzle_2()
for mapping in swizzle.input_mappings:
assert mapping.graph_input_name == mapping.maps_to.input_name
for mapping in swizzle_2.input_mappings:
assert mapping.graph_input_name == mapping.maps_to.input_name
ordered.execute_in_process(
run_config={
"ops": {
"swizzle_2": {
"inputs": {
"a": {"value": "a"},
"b": {"value": "b"},
"c": {"value": "c"},
}
}
}
},
)
def test_unused_mapping():
with pytest.raises(dg.DagsterInvalidDefinitionError, match="unmapped input"):
@dg.graph
def unused_mapping(_):
return_one()
@dg.op
def single_input_op():
return
def test_collision_invocations():
with warnings.catch_warnings():
warnings.simplefilter("error")
@dg.job
def _():
single_input_op()
single_input_op()
single_input_op()
def test_alias_invoked(recwarn):
@dg.job
def _():
single_input_op.alias("foo")()
single_input_op.alias("bar")()
assert len(recwarn) == 0
def test_alias_not_invoked():
with pytest.warns(UserWarning, match="received an uninvoked op") as record:
@dg.job
def _my_job():
single_input_op.alias("foo")
single_input_op.alias("bar")
assert len(record) == 2 # This job should raise a warning for each aliasing of the solid.
def test_tag_invoked():
# See: https://docs.pytest.org/en/7.0.x/how-to/capture-warnings.html#additional-use-cases-of-warnings-in-tests
with warnings.catch_warnings():
warnings.simplefilter("error", category=UserWarning)
@dg.graph
def _my_graph():
single_input_op.tag({})()
_my_graph.execute_in_process()
def test_tag_not_invoked():
with pytest.warns(
UserWarning,
match="uninvoked op",
) as record:
@dg.job
def _my_job():
single_input_op.tag({})
single_input_op.tag({})
_my_job.execute_in_process()
user_warnings = [warning for warning in record if isinstance(warning.message, UserWarning)]
assert (
len(user_warnings) == 1
) # We should only raise one warning because solids have same name.
with pytest.warns(UserWarning, match="uninvoked op"):
@dg.job
def _my_job():
single_input_op.tag({"a": "b"})
_my_job.execute_in_process()
def test_with_hooks_invoked():
with warnings.catch_warnings():
warnings.simplefilter("error", category=UserWarning)
@dg.job
def _my_job():
single_input_op.with_hooks(set())()
_my_job.execute_in_process()
@event_list_hook(required_resource_keys=set())
def a_hook(_context, _):
return HookExecutionResult("a_hook")
def test_with_hooks_not_invoked():
with pytest.warns(
UserWarning,
match="uninvoked op",
) as record:
@dg.job
def _my_job():
single_input_op.with_hooks(set())
single_input_op.with_hooks(set())
_my_job.execute_in_process()
# Note not returning out of the pipe causes warning count to go up to 2
user_warnings = [warning for warning in record if isinstance(warning.message, UserWarning)]
assert (
len(user_warnings) == 1
) # We should only raise one warning because solids have same name.
with pytest.warns(
UserWarning,
match="uninvoked op",
):
@dg.job
def _my_job():
single_input_op.with_hooks({a_hook})
_my_job.execute_in_process()
def test_with_hooks_not_empty():
@dg.job
def _():
single_input_op.with_hooks({a_hook})
assert 1 == 1
def test_multiple_pending_invocations():
with pytest.warns(
UserWarning,
match="uninvoked op",
) as record:
@dg.job
def _my_job():
foo = single_input_op.alias("foo")
bar = single_input_op.alias("bar")
foo_tag = foo.tag({})
_bar_hook = bar.with_hooks({a_hook})
foo_tag()
assert (
len(record) == 1
) # ensure that one warning is thrown per solid_name / alias instead of per every PendingNodeInvocation.
def test_compose_nothing():
@dg.op(ins={"start": dg.In(dg.Nothing)})
def go():
pass
@dg.graph(ins={"start": dg.GraphIn()})
def _compose(start: Nothing): # type: ignore
go(start)
def test_multimap():
@dg.graph(out={"x": dg.GraphOut(), "y": dg.GraphOut()})
def multimap(foo):
x = echo.alias("echo_1")(foo)
y = echo.alias("echo_2")(foo)
return {"x": x, "y": y}
@dg.job
def multimap_pipe():
one = return_one()
multimap(one)
result = multimap_pipe.execute_in_process()
assert result.output_for_node("multimap.echo_1") == 1
assert result.output_for_node("multimap.echo_2") == 1
def test_reuse_inputs():
@dg.graph(ins={"one": dg.GraphIn(), "two": dg.GraphIn()})
def calculate(one, two):
adder(one, two)
adder.alias("adder_2")(one, two)
@dg.job
def calculate_job():
one = return_one()
two = return_two()
calculate(one, two)
result = calculate_job.execute_in_process()
assert result.output_for_node("calculate.adder") == 3
assert result.output_for_node("calculate.adder_2") == 3
def test_output_node_error():
with pytest.raises(dg.DagsterInvariantViolationError):
@dg.job
def _bad_destructure():
_a, _b = return_tuple()
with pytest.raises(dg.DagsterInvariantViolationError):
@dg.job
def _bad_index():
out = return_tuple()
add_one(out[0])
def test_job_composition_metadata():
@dg.op
def metadata_op(context):
return context.op.tags["key"]
@dg.job
def metadata_test_job():
metadata_op.tag({"key": "foo"}).alias("aliased_one")()
metadata_op.alias("aliased_two").tag({"key": "foo"}).tag({"key": "bar"})()
metadata_op.alias("aliased_three").tag({"key": "baz"})()
metadata_op.tag({"key": "quux"})()
res = metadata_test_job.execute_in_process()
assert res.output_for_node("aliased_one") == "foo"
assert res.output_for_node("aliased_two") == "bar"
assert res.output_for_node("aliased_three") == "baz"
assert res.output_for_node("metadata_op") == "quux"
def test_composition_metadata():
@dg.op
def metadata_op(context):
return context.op.tags["key"]
@dg.graph
def metadata_graph():
metadata_op.tag({"key": "foo"}).alias("aliased_one")()
metadata_op.alias("aliased_two").tag({"key": "foo"}).tag({"key": "bar"})()
metadata_op.alias("aliased_three").tag({"key": "baz"})()
metadata_op.tag({"key": "quux"})()
@dg.job
def metadata_test_job():
metadata_graph()
res = metadata_test_job.execute_in_process()
assert res.output_for_node("metadata_graph.aliased_one") == "foo"
assert res.output_for_node("metadata_graph.aliased_two") == "bar"
assert res.output_for_node("metadata_graph.aliased_three") == "baz"
assert res.output_for_node("metadata_graph.metadata_op") == "quux"
def test_uninvoked_op_fails():
with pytest.raises(dg.DagsterInvalidDefinitionError, match=r".*Did you forget parentheses?"):
@dg.job
def uninvoked_solid_job():
add_one(return_one)
uninvoked_solid_job.execute_in_process()
def test_uninvoked_aliased_op_fails():
with pytest.raises(dg.DagsterInvalidDefinitionError, match=r".*Did you forget parentheses?"):
@dg.job
def uninvoked_aliased_solid_job():
add_one(return_one.alias("something"))
uninvoked_aliased_solid_job.execute_in_process()
def test_alias_on_invoked_op_fails():
with pytest.raises(
dg.DagsterInvariantViolationError,
match=r".*Consider checking the location of parentheses.",
):
@dg.job
def alias_on_invoked_solid_job():
return_one().alias("something")
alias_on_invoked_solid_job.execute_in_process()
def test_tags():
@dg.op(tags={"def": "1"})
def emit(_):
return 1
@dg.job
def tag():
emit.tag({"invoke": "2"})()
plan = create_execution_plan(tag)
step = next(iter(plan.step_dict.values()))
assert step.tags == {"def": "1", "invoke": "2"}
def test_bad_alias():
with pytest.raises(dg.DagsterInvalidDefinitionError, match="not a valid name"):
echo.alias("uh oh")
with pytest.raises(dg.DagsterInvalidDefinitionError, match="not a valid name"):
echo.alias("uh[oh]")
def test_tag_subset():
@dg.op
def empty(_):
pass
@dg.op(tags={"def": "1"})
def emit(_):
return 1
@dg.job
def tag():
empty()
emit.tag({"invoke": "2"})()
plan = create_execution_plan(tag.get_subset(op_selection=["emit"]))
step = next(iter(plan.step_dict.values()))
assert step.tags == {"def": "1", "invoke": "2"}
def test_composition_order():
solid_to_tags = {}
@dg.success_hook
def test_hook(context):
solid_to_tags[context.op.name] = context.op.tags
@dg.op
def a_op(_):
pass
@dg.job
def a_job():
a_op.with_hooks(hook_defs={test_hook}).alias("hook_alias_tag").tag({"pos": 3})() # pyright: ignore[reportArgumentType]
a_op.with_hooks(hook_defs={test_hook}).tag({"pos": 2}).alias("hook_tag_alias")() # pyright: ignore[reportArgumentType]
a_op.alias("alias_tag_hook").tag({"pos": 2}).with_hooks(hook_defs={test_hook})() # pyright: ignore[reportArgumentType]
a_op.alias("alias_hook_tag").with_hooks(hook_defs={test_hook}).tag({"pos": 3})() # pyright: ignore[reportArgumentType]
a_op.tag({"pos": 1}).with_hooks(hook_defs={test_hook}).alias("tag_hook_alias")() # pyright: ignore[reportArgumentType]
a_op.tag({"pos": 1}).alias("tag_alias_hook").with_hooks(hook_defs={test_hook})() # pyright: ignore[reportArgumentType]
result = a_job.execute_in_process(raise_on_error=False)
assert result.success
assert solid_to_tags == {
"tag_hook_alias": {"pos": "1"},
"tag_alias_hook": {"pos": "1"},
"hook_tag_alias": {"pos": "2"},
"alias_tag_hook": {"pos": "2"},
"hook_alias_tag": {"pos": "3"},
"alias_hook_tag": {"pos": "3"},
}
def test_fan_in_scalars_fails():
@dg.op
def fan_in_op(_, xs):
return sum(xs)
with pytest.raises(
dg.DagsterInvalidDefinitionError,
match="Lists can only contain the output from previous op invocations or input mappings",
):
@dg.job
def _scalar_fan_in_job():
fan_in_op([1, 2, 3])
def test_with_hooks_on_invoked_op_fails():
@dg.op
def yield_1_op(_):
return 1
with pytest.raises(
dg.DagsterInvariantViolationError,
match="attempted to call hook method for InvokedNodeOutputHandle.",
):
@dg.job
def _bad_hooks_job():
yield_1_op().with_hooks({a_hook})
def test_iterating_over_dynamic_outputs_fails():
@dg.op
def dynamic_output_op(_):
yield dg.DynamicOutput(1, "1")
yield dg.DynamicOutput(2, "2")
@dg.op
def yield_input(_, x):
return x
with pytest.raises(
dg.DagsterInvariantViolationError,
match="Attempted to iterate over an InvokedNodeOutputHandle.",
):
@dg.job
def _iterating_over_dynamic_output_job():
for x in dynamic_output_op():
yield_input(x)
def test_indexing_into_dynamic_outputs_fails():
@dg.op
def dynamic_output_op(_):
yield dg.DynamicOutput(1, "1")
yield dg.DynamicOutput(2, "2")
@dg.op
def yield_input(_, x):
return x
with pytest.raises(
dg.DagsterInvariantViolationError,
match="Attempted to index in to an InvokedNodeOutputHandle.",
):
@dg.job
def _indexing_into_dynamic_output_job():
yield_input(dynamic_output_op()[0])
def test_aliasing_invoked_dynamic_output_fails():
@dg.op
def dynamic_output_op(_):
yield dg.DynamicOutput(1, "1")
yield dg.DynamicOutput(2, "2")
with pytest.raises(
dg.DagsterInvariantViolationError,
match="attempted to call alias method for InvokedNodeOutputHandle.",
):
@dg.job
def _alias_invoked_dynamic_output_job():
dynamic_output_op().alias("dynamic_output")
def test_compose_asset():
@dg.asset
def foo():
pass
@dg.graph
def compose():
foo()
result = compose.execute_in_process()
assert result.success
assert result.events_for_node("foo")
| Garbage |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/bigquery.py | {
"start": 76186,
"end": 79675
} | class ____(GoogleCloudBaseOperator):
"""
Update a dataset for your Project in BigQuery.
Use ``fields`` to specify which fields of dataset to update. If a field
is listed in ``fields`` and is ``None`` in dataset, it will be deleted.
If no ``fields`` are provided then all fields of provided ``dataset_resource``
will be used.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpdateDatasetOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:param dataset_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:param fields: The properties of dataset to change (e.g. "friendly_name").
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
template_fields_renderers = {"dataset_resource": "json"}
ui_color = BigQueryUIColors.DATASET.value
operator_extra_links = (BigQueryDatasetLink(),)
def __init__(
self,
*,
dataset_resource: dict[str, Any],
fields: list[str] | None = None,
dataset_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.fields = fields
self.gcp_conn_id = gcp_conn_id
self.dataset_resource = dataset_resource
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
fields = self.fields or list(self.dataset_resource.keys())
dataset = bq_hook.update_dataset(
dataset_resource=self.dataset_resource,
project_id=self.project_id,
dataset_id=self.dataset_id,
fields=fields,
)
dataset_api_repr = dataset.to_api_repr()
BigQueryDatasetLink.persist(
context=context,
dataset_id=dataset_api_repr["datasetReference"]["datasetId"],
project_id=dataset_api_repr["datasetReference"]["projectId"],
)
return dataset_api_repr
| BigQueryUpdateDatasetOperator |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/providers.py | {
"start": 1023,
"end": 1186
} | class ____(BaseModel):
"""Provider Collection serializer for responses."""
providers: list[ProviderResponse]
total_entries: int
| ProviderCollectionResponse |
python | html5lib__html5lib-python | html5lib/tests/support.py | {
"start": 2264,
"end": 2499
} | class ____(dict):
def __init__(self, default, *args, **kwargs):
self.default = default
dict.__init__(self, *args, **kwargs)
def __getitem__(self, key):
return dict.get(self, key, self.default)
| DefaultDict |
python | sqlalchemy__sqlalchemy | test/orm/test_association.py | {
"start": 415,
"end": 7399
} | class ____(fixtures.MappedTest):
run_setup_classes = "once"
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"items",
metadata,
Column(
"item_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(40)),
)
Table(
"item_keywords",
metadata,
Column("item_id", Integer, ForeignKey("items.item_id")),
Column("keyword_id", Integer, ForeignKey("keywords.keyword_id")),
Column("data", String(40)),
)
Table(
"keywords",
metadata,
Column(
"keyword_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(40)),
)
@classmethod
def setup_classes(cls):
class Item(cls.Basic):
def __init__(self, name):
self.name = name
def __repr__(self):
return "Item id=%d name=%s keywordassoc=%r" % (
self.item_id,
self.name,
self.keywords,
)
class Keyword(cls.Basic):
def __init__(self, name):
self.name = name
def __repr__(self):
return "Keyword id=%d name=%s" % (self.keyword_id, self.name)
class KeywordAssociation(cls.Basic):
def __init__(self, keyword, data):
self.keyword = keyword
self.data = data
def __repr__(self):
return "KeywordAssociation itemid=%d keyword=%r data=%s" % (
self.item_id,
self.keyword,
self.data,
)
@classmethod
def setup_mappers(cls):
KeywordAssociation, Item, Keyword = (
cls.classes.KeywordAssociation,
cls.classes.Item,
cls.classes.Keyword,
)
items, item_keywords, keywords = cls.tables.get_all(
"items", "item_keywords", "keywords"
)
cls.mapper_registry.map_imperatively(Keyword, keywords)
cls.mapper_registry.map_imperatively(
KeywordAssociation,
item_keywords,
properties={"keyword": relationship(Keyword, lazy="joined")},
primary_key=[item_keywords.c.item_id, item_keywords.c.keyword_id],
)
cls.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
KeywordAssociation,
order_by=item_keywords.c.data,
cascade="all, delete-orphan",
)
},
)
def test_insert(self):
KeywordAssociation, Item, Keyword = (
self.classes.KeywordAssociation,
self.classes.Item,
self.classes.Keyword,
)
sess = fixture_session()
item1 = Item("item1")
item2 = Item("item2")
item1.keywords.append(
KeywordAssociation(Keyword("blue"), "blue_assoc")
)
item1.keywords.append(KeywordAssociation(Keyword("red"), "red_assoc"))
item2.keywords.append(
KeywordAssociation(Keyword("green"), "green_assoc")
)
sess.add_all((item1, item2))
sess.flush()
saved = repr([item1, item2])
sess.expunge_all()
result = sess.query(Item).all()
loaded = repr(result)
eq_(saved, loaded)
def test_replace(self):
KeywordAssociation, Item, Keyword = (
self.classes.KeywordAssociation,
self.classes.Item,
self.classes.Keyword,
)
sess = fixture_session()
item1 = Item("item1")
item1.keywords.append(
KeywordAssociation(Keyword("blue"), "blue_assoc")
)
item1.keywords.append(KeywordAssociation(Keyword("red"), "red_assoc"))
sess.add(item1)
sess.flush()
red_keyword = item1.keywords[1].keyword
del item1.keywords[1]
item1.keywords.append(KeywordAssociation(red_keyword, "new_red_assoc"))
sess.flush()
saved = repr([item1])
sess.expunge_all()
result = sess.query(Item).all()
loaded = repr(result)
eq_(saved, loaded)
def test_modify(self):
KeywordAssociation, Item, Keyword = (
self.classes.KeywordAssociation,
self.classes.Item,
self.classes.Keyword,
)
sess = fixture_session()
item1 = Item("item1")
item2 = Item("item2")
item1.keywords.append(
KeywordAssociation(Keyword("blue"), "blue_assoc")
)
item1.keywords.append(KeywordAssociation(Keyword("red"), "red_assoc"))
item2.keywords.append(
KeywordAssociation(Keyword("green"), "green_assoc")
)
sess.add_all((item1, item2))
sess.flush()
red_keyword = item1.keywords[1].keyword
del item1.keywords[0]
del item1.keywords[0]
purple_keyword = Keyword("purple")
item1.keywords.append(KeywordAssociation(red_keyword, "new_red_assoc"))
item2.keywords.append(
KeywordAssociation(purple_keyword, "purple_item2_assoc")
)
item1.keywords.append(
KeywordAssociation(purple_keyword, "purple_item1_assoc")
)
item1.keywords.append(
KeywordAssociation(Keyword("yellow"), "yellow_assoc")
)
sess.flush()
saved = repr([item1, item2])
sess.expunge_all()
result = sess.query(Item).all()
loaded = repr(result)
eq_(saved, loaded)
def test_delete(self):
KeywordAssociation = self.classes.KeywordAssociation
Item = self.classes.Item
item_keywords = self.tables.item_keywords
Keyword = self.classes.Keyword
sess = fixture_session()
item1 = Item("item1")
item2 = Item("item2")
item1.keywords.append(
KeywordAssociation(Keyword("blue"), "blue_assoc")
)
item1.keywords.append(KeywordAssociation(Keyword("red"), "red_assoc"))
item2.keywords.append(
KeywordAssociation(Keyword("green"), "green_assoc")
)
sess.add_all((item1, item2))
sess.flush()
eq_(
sess.connection().scalar(
select(func.count("*")).select_from(item_keywords)
),
3,
)
sess.delete(item1)
sess.delete(item2)
sess.flush()
eq_(
sess.connection().scalar(
select(func.count("*")).select_from(item_keywords)
),
0,
)
| AssociationTest |
python | dagster-io__dagster | python_modules/libraries/dagster-tableau/dagster_tableau/components/tableau_component.py | {
"start": 1222,
"end": 2214
} | class ____(Model, Resolvable):
"""Arguments for configuring a Tableau Cloud workspace connection."""
type: Literal["cloud"] = Field(
default="cloud",
description="Type of Tableau workspace. Must be 'cloud' for Tableau Cloud.",
)
connected_app_client_id: str = Field(
...,
description="Tableau connected app client ID for authentication.",
)
connected_app_secret_id: str = Field(
...,
description="Tableau connected app secret ID.",
)
connected_app_secret_value: str = Field(
...,
description="Tableau connected app secret value.",
)
username: str = Field(
...,
description="Tableau username for authentication.",
)
site_name: str = Field(
...,
description="Tableau site name.",
)
pod_name: str = Field(
default="10ax",
description="Tableau pod name (e.g. '10ax', '10ay'). Defaults to '10ax'.",
)
| TableauCloudWorkspaceArgs |
python | doocs__leetcode | solution/2700-2799/2781.Length of the Longest Valid Substring/Solution.py | {
"start": 0,
"end": 384
} | class ____:
def longestValidSubstring(self, word: str, forbidden: List[str]) -> int:
s = set(forbidden)
ans = i = 0
for j in range(len(word)):
for k in range(j, max(j - 10, i - 1), -1):
if word[k : j + 1] in s:
i = k + 1
break
ans = max(ans, j - i + 1)
return ans
| Solution |
python | pytorch__pytorch | test/test_jit.py | {
"start": 114529,
"end": 570134
} | class ____(JitTestCase):
# Tests that calling torch.jit.script repeated on function is allowed.
def test_repeated_script_on_function(self):
@torch.jit.script
@torch.jit.script
def fn(x):
return x
torch.jit.script(torch.jit.script(fn))
def test_pretty_print_function(self):
@torch.jit.script
def foo(x):
return torch.nn.functional.interpolate(x)
FileCheck().check("interpolate").run(foo.code)
def test_inlined_graph(self):
"""
Check that the `inlined_graph` property correctly returns an inlined
graph, both through function calls and method calls.
"""
@torch.jit.script
def foo(x):
return torch.add(x, x)
class MyNestedMod(torch.nn.Module):
def forward(self, x):
return torch.sub(x, x)
class MyMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.nested = MyNestedMod()
def forward(self, x):
x = self.nested(x) # sub
x = foo(x) # add
return torch.mul(x, x)
m = torch.jit.script(MyMod())
FileCheck().check("aten::sub") \
.check("aten::add") \
.check("aten::mul") \
.run(m.inlined_graph)
def test_static_method_on_module(self):
"""
Check that the `@staticmethod` annotation on a function on a module works.
"""
class MyCell(torch.nn.Module):
@staticmethod
def do_it(x, h):
new_h = torch.tanh(x + h)
return new_h, new_h
def forward(self, x, h):
return self.do_it(x, h)
my_cell = torch.jit.script(MyCell())
x = torch.rand(3, 4)
h = torch.rand(3, 4)
jitted_cell = my_cell(x, h)
non_jitted_cell = MyCell().do_it(x, h)
self.assertEqual(jitted_cell, non_jitted_cell)
def test_code_with_constants(self):
"""
Check that the `code_with_constants` property correctly returns graph CONSTANTS in the
CONSTANTS.cN format used in the output of the `code` property.
"""
@torch.jit.script
def foo(x=torch.ones(1)):
return x
class Moddy(torch.nn.Module):
def forward(self, x):
return foo()
m = torch.jit.script(Moddy())
src, CONSTANTS = m.code_with_constants
self.assertEqual(CONSTANTS.c0, torch.ones(1))
self.assertEqual(src, m.code)
def test_code_with_constants_restore(self):
"""
Check that the `code_with_constants` property correctly works on restoration after save() + load()
"""
@torch.jit.script
def foo(x=torch.ones(1)):
return x
class Moddy(torch.nn.Module):
def forward(self, x):
return foo()
m = torch.jit.script(Moddy())
src, CONSTANTS = m.code_with_constants
eic = self.getExportImportCopy(m)
src_eic, CONSTANTS_eic = eic.code_with_constants
self.assertEqual(src, src_eic)
self.assertEqual(CONSTANTS.c0, CONSTANTS_eic.c0)
def test_oneline_func(self):
def fn(x): return x # noqa: E704
self.checkScript(fn, (torch.ones(2, 2), ))
def test_request_bailout(self):
with enable_profiling_mode_for_profiling_tests():
def fct_loop(x):
for _ in range(3):
x = torch.cat((x, x), 0)
return x
x = torch.ones(2, 3, 4, dtype=torch.float32)
expected = fct_loop(x)
jitted = torch.jit.script(fct_loop)
# profile
jitted(x)
# optimize
jitted(x)
dstate = jitted.get_debug_state()
eplan = get_execution_plan(dstate)
num_bailouts = eplan.code.num_bailouts()
for i in range(num_bailouts):
eplan.code.request_bailout(i)
self.assertEqual(jitted(x), expected)
@unittest.skip("bailouts are being deprecated")
def test_dominated_bailout(self):
with enable_profiling_mode_for_profiling_tests():
# functional dominated guard
@torch.jit.script
def foo(x):
dim = x.dim()
if dim == 0:
y = int(x)
else:
y = x.size()[dim - 1]
return y
x = torch.zeros(2)
self.assertEqual(foo(x), 2)
self.assertEqual(foo(x), 2)
g = torch.jit.last_executed_optimized_graph()
g_s = str(g)
g_s = g_s[0:g_s.find("return")]
FileCheck().check_count("prim::BailOut[", 1, exactly=True).run(g_s)
# dominated guard of non-functional value
@torch.jit.script
def foo(x):
dim = x.dim()
x.add_(3)
if dim == 0:
return 0
else:
return x.size()[dim - 1]
x = torch.zeros(2)
self.assertEqual(foo(x), 2)
self.assertEqual(foo(x), 2)
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("prim::BailOut[").check("aten::add_").check_next("prim::BailOut[").check("return").run(g)
with torch.enable_grad():
@torch.jit.ignore
def disable_grad():
torch.set_grad_enabled(False)
@torch.jit.ignore
def enable_grad():
torch.set_grad_enabled(True)
@torch.jit.script
def foo(x):
x = x + 1
dim = x.dim()
disable_grad()
if dim == 0:
y = int(x)
else:
y = x.size()[dim - 1]
enable_grad()
return y
x = torch.zeros(2, requires_grad=True)
self.assertEqual(foo(x), 2)
self.assertEqual(foo(x), 2)
g = torch.jit.last_executed_optimized_graph()
# there should still be a Bailout after disable_grad call
FileCheck().check("disable_grad").check("BailOut[").check("BailoutTemplate").run(g)
@skipIfTorchDynamo("Torchdynamo cannot correctly handle profiler.profile calls")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "skip if profiling isn't enabled")
def test_profiling_merge(self):
@torch.jit.script
def test_not_const(x):
if x.size(0) == 1:
return 1
else:
return 2
with enable_profiling_mode_for_profiling_tests():
with num_profiled_runs(2):
test_not_const(torch.rand([1, 2]))
test_not_const(torch.rand([2, 2]))
graph_str = torch.jit.last_executed_optimized_graph()
FileCheck().check("profiled_type=Float(*, 2, strides=[2, 1], requires_grad=0, device=cpu").run(graph_str)
FileCheck().check_not("profiled_type=Float(1, 2, strides=[2, 1], requires_grad=0, device=cpu").run(graph_str)
def test_nested_bailouts(self):
@torch.jit.script
def fct_loop(x):
for _ in range(3):
x = torch.cat((x, x), 0)
return x
x = torch.ones(2, 3, 4, dtype=torch.float32)
out = fct_loop(x)
jit_trace = torch.jit.trace(fct_loop, x)
out_trace = jit_trace(x)
def test_no_self_arg_ignore_function(self):
class MyModule(nn.Module):
@torch.jit.ignore # noqa: B902
def call_np(): # noqa: B902
# type: () -> int
return np.random.choice(2, p=[.95, .05])
def forward(self):
return self.call_np()
with self.assertRaisesRegex(Exception, "does not have a self argument"):
torch.jit.script(MyModule())
def test_loop_liveness(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def f(i):
# type: (int) -> Tensor
l = []
for n in [2, 1]:
l.append(torch.zeros(n, i))
return l[0]
f(2)
f(1)
def test_bailout_loop_carried_deps_name_clash(self):
with enable_profiling_mode_for_profiling_tests():
NUM_ITERATIONS = 10
@torch.jit.script
def fct_loop(z, size):
# type: (int, int) -> Tuple[Tensor, List[int]]
counters = torch.jit.annotate(List[int], [])
j = 0
y = torch.ones(2)
for i in range(size):
counters.append(i + j)
y = torch.cat((y, torch.ones(z)), 0)
j = j + 1
return y, counters
inputs = [1, 2, 3, 4]
expected = [x * 2 for x in range(NUM_ITERATIONS)]
for inp in inputs:
results = fct_loop(inp, NUM_ITERATIONS)
self.assertEqual(results[1], expected)
def test_bailout_loop_counter_transition(self):
with enable_profiling_mode_for_profiling_tests():
NUM_ITERATIONS = 10
@torch.jit.script
def fct_loop(z, size):
# type: (int, int) -> Tuple[Tensor, List[int]]
counters = torch.jit.annotate(List[int], [])
y = torch.ones(2)
for i in range(size):
counters.append(i)
y = torch.cat((y, torch.ones(z)), 0)
return y, counters
inputs = [1, 2, 3, 4]
expected = list(range(NUM_ITERATIONS))
for inp in inputs:
results = fct_loop(inp, NUM_ITERATIONS)
self.assertEqual(results[1], expected)
def test_ignored_method_binding(self):
class Bar(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x : int = 0
@torch.jit.export
def setx(self, x : int):
self.x = x
@torch.jit.export
def getx(self):
return self.x
@torch.jit.ignore
def ignored_getx(self):
return self.x
b = Bar()
b.setx(123)
sb = torch.jit.script(b)
self.assertEqual(sb.getx(), 123)
self.assertEqual(sb.ignored_getx(), 123)
sb.setx(456)
self.assertEqual(sb.getx(), 456)
self.assertEqual(sb.ignored_getx(), 456)
def test_set_attribute_through_optional(self):
class A(torch.nn.Module):
__annotations__ = {"x": Optional[torch.Tensor]}
def __init__(self) -> None:
super().__init__()
self.x = None
@torch.jit.ignore
def foo(self):
if self.x is None:
self.x = torch.tensor([3])
return self.x
def forward(self, x):
a = self.foo()
return x + 1
m = torch.jit.script(A())
self.assertEqual(m.x, None)
m(torch.rand(1))
self.assertEqual(m.x, torch.tensor([3]))
def test_mutate_constant(self):
class M(torch.jit.ScriptModule):
__constants__ = ["foo"]
def __init__(self, foo):
super().__init__()
self.foo = foo
m = M(5)
# m has a constant attribute, but we can't
# assign to it
with self.assertRaises(RuntimeError):
m.foo = 6
def test_class_attribute(self):
class M(torch.jit.ScriptModule):
FOO = 0
def __init__(self) -> None:
super().__init__()
self.foo = self.FOO
m = M()
self.assertEqual(m.foo, M.FOO)
def test_class_attribute_in_script(self):
class M(torch.jit.ScriptModule):
FOO = 0
@torch.jit.script_method
def forward(self):
return self.FOO
with self.assertRaises(RuntimeError):
M()
def test_not_initialized_err(self):
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
self.foo = torch.rand(2, 3)
with self.assertRaises(RuntimeError):
M()
def test_attribute_in_init(self):
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.foo = torch.jit.Attribute(0.1, float)
# we should be able to use self.foo as a float here
assert 0.0 < self.foo
M()
def test_scriptable_fn_as_attr(self):
class M(torch.nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
m = M(torch.sigmoid)
inp = torch.rand(2, 3)
self.checkModule(m, (inp, ))
def test_sequence_parsing(self):
tests = [
("return [x, x,]", True),
("return [x x]", "expected ]"),
("return x, x,", True),
("return bar(x, x,)", True),
("return bar()", "Argument x not provided"),
("for a, b, in x, x,:\n pass", "List of iterables"),
("a, b, = x, x,\n return a + b", True)
]
for exp, result in tests:
cu = torch.jit.CompilationUnit()
full = f"""
def bar(x, y):
return x + y
def foo(x):
{exp}
"""
if isinstance(result, str):
with self.assertRaisesRegex(RuntimeError, result):
cu.define(full)
else:
cu.define(full)
def test_namedtuple_python(self):
global MyTuple, MyMod # see [local resolution in python]
MyTuple = namedtuple('MyTuple', ['a'])
@torch.jit.unused
def fn():
# type: () -> MyTuple
return MyTuple(1)
# Only check compilation
@torch.jit.script
def fn2():
# type: () -> MyTuple
return fn()
FileCheck().check("NamedTuple").run(fn2.graph)
class MyMod(torch.nn.Module):
@torch.jit.unused
def fn(self):
# type: () -> MyTuple
return MyTuple(1)
def forward(self, x):
if 1 == 1:
return MyTuple(torch.rand(2, 3))
else:
return self.fn()
# shouldn't throw a type error
torch.jit.script(MyMod())
def test_unused_decorator(self):
class MyMod(torch.nn.Module):
@torch.jit.unused
@torch.no_grad()
def fn(self, x):
# type: (Tensor) -> int
return next(x) # invalid, but should be ignored
def forward(self, x):
return self.fn(x)
torch.jit.script(MyMod())
@_inline_everything
def test_lazy_script(self):
def untraceable(x):
if x.ndim > 2:
print("hello")
else:
print("goodbye")
return x + 2
# Non-working example
def fn(x):
return untraceable(x)
with self.capture_stdout():
traced_bad = torch.jit.trace(fn, [torch.ones(2, 2)])
FileCheck().check_not("goodbye").check_not("hello").run(traced_bad.graph)
# Working example
untraceable = torch.jit.script_if_tracing(untraceable)
def fn2(x):
return untraceable(x)
with self.capture_stdout():
traced = torch.jit.trace(fn, [torch.ones(2, 2)])
FileCheck().check("goodbye").run(traced.graph)
def foo(x: int):
return x + 1
@torch.jit.script_if_tracing
def fee(x: int = 2):
return foo(1) + x
# test directly compiling function
fee_compiled = torch.jit.script(fee)
self.assertEqual(fee_compiled(), fee())
# test compiling it within another function
@torch.jit.script
def hum():
return fee(x=3)
self.assertEqual(hum(), 5)
def test_big_int_literals(self):
def ok():
# signed 64 bit max
a = 9223372036854775807
return a
def toobig():
a = 9223372036854775808
return a
def waytoobig():
a = 99999999999999999999
return a
self.checkScript(ok, [])
with self.assertRaisesRegex(RuntimeError, "out of range"):
torch.jit.script(toobig)
with self.assertRaisesRegex(RuntimeError, "out of range"):
torch.jit.script(waytoobig)
def test_hex_literals(self):
def test1():
return 0xaaaaaa
def test2():
return 0xaaaaaa
def test3():
return -0xaaaaaa
self.checkScript(test1, [])
self.checkScript(test2, [])
self.checkScript(test3, [])
def ok():
a = 0x7FFFFFFFFFFFFFFF
return a
def toobig():
a = 0xFFFFFFFFFFFFFFFF
return a
def waytoobig():
a = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
return a
self.checkScript(ok, [])
with self.assertRaisesRegex(RuntimeError, "out of range"):
torch.jit.script(toobig)
with self.assertRaisesRegex(RuntimeError, "out of range"):
torch.jit.script(waytoobig)
def test_big_float_literals(self):
def ok():
# Python interprets this as inf
a = 1.2E400
return a
def check(fn):
self.assertTrue(fn() == ok())
# checkScript doesn't work since assertEqual doesn't consider
# `inf` == `inf`
check(torch.jit.script(ok))
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(ok)))
check(cu.ok)
def _test_device_type(self, dest):
def fn(x):
# type: (Device) -> Tuple[str, Optional[int]]
return x.type, x.index
device = torch.ones(2).to(dest).device
self.checkScript(fn, [device])
def test_device_type(self):
self._test_device_type('cpu')
@unittest.skipIf(not RUN_CUDA, "Requires CUDA")
def test_device_type_cuda(self):
self._test_device_type('cuda')
def test_string_device_implicit_conversion(self):
@torch.jit.script
def fn(x: torch.device):
return x
self.assertEqual(fn("cpu"), torch.device("cpu"))
with self.assertRaisesRegex(RuntimeError, "Expected one of"):
fn("invalid_device")
def test_eval_python(self):
def _test(m):
self.assertTrue(m(torch.ones(2, 2)))
self.assertTrue(m.training)
self.assertTrue(m._c.getattr('training'))
m.eval()
self.assertFalse(m.training)
self.assertFalse(m._c.getattr('training'))
self.assertFalse(m(torch.ones(2, 2)))
buffer = io.BytesIO()
torch.jit.save(m, buffer)
buffer.seek(0)
loaded = torch.jit.load(buffer)
self.assertFalse(loaded.training)
self.assertFalse(loaded._c.getattr('training'))
class M(nn.Module):
def forward(self, x):
return self.training
class OldM(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return self.training
_test(torch.jit.script(M()))
_test(OldM())
def test_inherit_method(self):
class A(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x + self.bar(x)
class B(A):
@torch.jit.script_method
def bar(self, x):
return x * x
with self.assertRaisesRegex(RuntimeError, 'attribute'):
A() # cannot use because bar is not defined
v = torch.rand(3, 4)
b = B()
self.assertEqual(b(v), v + v * v)
class C(torch.jit.ScriptModule):
@torch.jit.script_method
def bar(self, x):
return x
class D(C, B):
def __init__(self) -> None:
super().__init__()
self.assertEqual(D()(v), v + v)
def test_tensor_subclasses(self):
def check_subclass(x, tensor):
template = dedent("""
def func(input: {}) -> {}:
return torch.zeros((input.shape[0], 1), dtype=input.dtype)
""")
self._check_code(template.format(x, x), "func", [tensor])
check_subclass("torch.LongTensor", torch.LongTensor([[1, 2], [3, 4]]))
check_subclass("torch.DoubleTensor", torch.DoubleTensor([[1.2, 2.3], [3.4, 4.5]]))
check_subclass("torch.IntTensor", torch.IntTensor([[1, 2], [3, 4]]))
check_subclass("torch.BoolTensor", torch.BoolTensor([[False, True], [True, False]]))
def check_subclass_warn(input: torch.LongTensor) -> torch.LongTensor:
return torch.zeros((input.shape[0], 1), dtype=input.dtype)
with warnings.catch_warnings(record=True) as warns:
scripted = torch.jit.script(check_subclass_warn)
FileCheck().check("TorchScript will treat type annotations of Tensor").run(str(warns[0]))
def test_first_class_module(self):
class Foo(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.foo = nn.Parameter(torch.rand(3, 4))
@torch.jit.script_method
def forward(self, input):
self.foo = input
return self.foo
foo = Foo()
input = torch.rand(3, 4)
foo.forward(input)
self.assertEqual(input, foo.foo)
@_tmp_donotuse_dont_inline_everything
def test_first_class_calls(self):
@torch.jit.script
class Foo:
def __init__(self, x):
self.bar = x
def stuff(self, x):
return self.bar + x
@torch.jit.script
def foo(x):
return x * x + Foo(x).stuff(2 * x)
@torch.jit.script
def bar(x):
return foo(x) * foo(x)
x = torch.rand(3, 4)
self.assertEqual(bar(x), (x * x + 3 * x) * (x * x + 3 * x))
def test_static_methods(self):
class M(nn.Module):
@staticmethod
def my_method(x):
return x + 100
def forward(self, x):
return x + M.my_method(x)
class N(nn.Module):
@staticmethod
def my_method(x):
return x * 100
def forward(self, x):
return x - M.my_method(x) + N.my_method(x)
self.checkModule(M(), (torch.ones(2, 2),))
self.checkModule(N(), (torch.ones(2, 2),))
def test_invalid_prefix_annotation(self):
with self.assertRaisesRegex(RuntimeError, "annotation prefix in line"):
with self.capture_stdout() as captured:
@torch.jit.script
def invalid_prefix_annotation1(a):
#type: (Int) -> Int # noqa: E265
return a + 2
with self.assertRaisesRegex(RuntimeError, "annotation prefix in line"):
with self.capture_stdout() as captured:
@torch.jit.script
def invalid_prefix_annotation2(a):
#type : (Int) -> Int # noqa: E265
return a + 2
with self.assertRaisesRegex(RuntimeError, "annotation prefix in line"):
with self.capture_stdout() as captured:
@torch.jit.script
def invalid_prefix_annotation3(a):
# type: (Int) -> Int
return a + 2
def test_builtin_function_attributes(self):
class Add(nn.Module):
def __init__(self) -> None:
super().__init__()
self.add = torch.add
def forward(self, input):
return self.add(input, input)
self.checkModule(Add(), [torch.randn(2, 2)])
def test_pybind_type_comparisons(self):
@torch.jit.script
def f():
return None
node = list(f.graph.nodes())[0]
t = node.outputsAt(0).type()
self.assertIsNotNone(t)
@unittest.skipIf(IS_WINDOWS, 'TODO: need to fix the test case')
def test_unmatched_type_annotation(self):
message1 = re.escape("Number of type annotations (2) did not match the number of function parameters (1):")
message2 = 'def invalid2\\(a\\):\n\\s*~+\\.*\\s+<--- HERE\n\\s+# type: \\(Int, Int\\) -> Int\n\\s+return a \\+ 2'
message3 = 'def invalid4\\(a\\):\n\\s*~+\\.*\\s+<--- HERE\n\\s+# type: \\(Int, Int\\) -> Int\n\\s+return a \\+ 2'
with self.assertRaisesRegex(RuntimeError, message1):
@torch.jit.script
def invalid1(a):
# type: (Int, Int) -> Int
return a + 2
with self.assertRaisesRegex(RuntimeError, message2):
@torch.jit.script
def invalid2(a):
# type: (Int, Int) -> Int
return a + 2
with self.assertRaisesRegex(RuntimeError, message1):
def invalid3(a):
# type: (Int, Int) -> Int
return a + 2
torch.jit.script(invalid3)
with self.assertRaisesRegex(RuntimeError, message3):
def invalid4(a):
# type: (Int, Int) -> Int
return a + 2
torch.jit.script(invalid4)
def test_calls_in_type_annotations(self):
with self.assertRaisesRegex(RuntimeError, "Type annotation should not contain calls"):
def spooky(a):
# type: print("Hello") -> Tensor # noqa: F723
return a + 2
print(torch.__file__)
torch.jit.annotations.get_signature(spooky, None, 1, True)
def test_is_optional(self):
ann = Union[List[int], List[float]]
torch._jit_internal.is_optional(ann)
def test_interpreter_fuzz(self):
import builtins
# This test generates random tree-like programs to fuzz test
# that the interpreter does not have a bug in its stack manipulation
# code. An assert in that code ensures individual operators are
# not reordered.
templates = [
"torch.rand(3, 4)",
"({} + {})",
"-{}",
"({} * {})",
"torch.tanh({})",
"VAR {}",
]
def gen_code():
src_lines = ['def f():']
exprs = []
n_variables = 0
def get_expr(idx):
elem = exprs[idx]
exprs[idx] = exprs[-1]
exprs.pop()
return elem
def select_expr_or_var():
idx = random.randrange(0, len(exprs) + n_variables)
if idx < len(exprs):
return get_expr(idx)
else:
return f'v{idx - len(exprs)}'
for _ in range(50):
n = None
while n is None or n > len(exprs) + n_variables:
template = random.choice(templates)
n = template.count('{}')
if 'VAR' in template:
src_lines.append(f' v{n_variables} = {select_expr_or_var()}')
n_variables += 1
else:
exprs.append(template.format(*(select_expr_or_var() for _ in range(n))))
src_lines.append(' return ({})\n'.format(''.join(f'v{i},' for i in range(n_variables))))
return '\n'.join(src_lines)
for _ in range(100):
g = {'torch': torch}
code = gen_code()
builtins.exec(code, g, None)
cu = torch.jit.CompilationUnit(code)
with freeze_rng_state():
o1 = g['f']()
with freeze_rng_state():
o2 = cu.f()
self.assertEqual(o1, o2)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_cpp_module_iterator(self):
a = nn.Module()
a.name = 'a'
a.p = nn.Parameter(torch.rand(3, 4))
a.foo = nn.Module()
a.foo.name = 'foo'
a.foo.b = nn.Buffer(torch.rand(1, 1))
a.foo.bar = nn.Module()
a.foo.bar.name = 'bar'
a.foo.bar.an_int = 4
a.another = nn.Module()
a.another.name = 'another'
sa = torch.jit.script(a)
result = torch._C._jit_debug_module_iterators(sa._c)
def replace(e):
if e is a.p:
return 'P'
elif e is a.foo.b:
return 'B'
elif isinstance(e, torch._C.ScriptModule):
return e.getattr('name')
return e
for v in result.values():
for i in range(len(v)):
if isinstance(v[i], tuple):
n, v2 = v[i]
v[i] = (n, replace(v2))
else:
v[i] = replace(v[i])
# module type creation is not deterministic, so we have to sort
# the result
v.sort()
expected = {'buffers': [],
'buffers_r': ['B'],
'children': ['another', 'foo'],
'modules': ['a', 'another', 'bar', 'foo'],
'named_attributes': [('_is_full_backward_hook', None),
('another', 'another'),
('foo', 'foo'),
('name', 'a'),
('p', 'P'),
('training', True)],
'named_attributes_r': [('_is_full_backward_hook', None),
('another', 'another'),
('another._is_full_backward_hook', None),
('another.name', 'another'),
('another.training', True),
('foo', 'foo'),
('foo._is_full_backward_hook', None),
('foo.b', 'B'),
('foo.bar', 'bar'),
('foo.bar._is_full_backward_hook', None),
('foo.bar.an_int', 4),
('foo.bar.name', 'bar'),
('foo.bar.training', True),
('foo.name', 'foo'),
('foo.training', True),
('name', 'a'),
('p', 'P'),
('training', True)],
'named_buffers': [],
'named_buffers_r': [('foo.b', 'B')],
'named_children': [('another', 'another'), ('foo', 'foo')],
'named_modules': [('', 'a'),
('another', 'another'),
('foo', 'foo'),
('foo.bar', 'bar')],
'named_parameters': [('p', 'P')],
'named_parameters_r': [('p', 'P')],
'parameters': ['P'],
'parameters_r': ['P']}
self.assertEqual(expected, result)
def test_parameter_order(self):
m = nn.Module()
for i, name in enumerate(string.ascii_letters):
setattr(m, name, nn.Parameter(torch.tensor([float(i)])))
ms = torch.jit.script(m)
print(torch.cat(list(m.parameters())))
print(torch.cat(list(ms.parameters())))
self.assertEqual(list(m.parameters()), list(ms.parameters()))
def test_python_op_builtins(self):
@torch.jit.unused
def fn(x):
# type: (List[int]) -> int
return sum(x)
@torch.jit.script
def script_fn(x):
# type: (List[int]) -> int
return fn(x)
def test_submodule_twice(self):
@torch.jit.script
def foo(x):
return x * x
class What(torch.jit.ScriptModule):
def __init__(self, x):
super().__init__()
self.foo = x
a = What(foo)
c = What(foo)
def test_training_param(self):
class What(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
# type: (int) -> int
if self.training:
r = x
else:
r = x + 4
# check double use of training
if self.training:
r = r + 1
return r
w = What()
self.assertEqual(4, w(3))
w.train(False)
self.assertEqual(7, w(3))
self.assertFalse("training" in w.state_dict())
def test_class_as_attribute(self):
@torch.jit.script
class Foo321:
def __init__(self) -> None:
self.x = 3
class FooBar1234(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.f = Foo321()
def forward(self, x):
return x + self.f.x
scripted = torch.jit.script(FooBar1234())
eic = self.getExportImportCopy(scripted)
x = torch.rand(3, 4)
self.assertEqual(scripted(x), eic(x))
def test_module_str(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
f = torch.jit.script(Foo())
str_f = str(f._c)
self.assertTrue(str_f.startswith('ScriptObject'))
self.assertTrue('__torch__.' in str_f)
self.assertTrue('.Foo' in str_f)
def test_jitter_bug(self):
@torch.jit.script
def fn2(input, kernel_size):
# type: (Tensor, List[int]) -> Tensor
if kernel_size[0] > 1:
_stride = [2]
else:
_stride = kernel_size
print(_stride, kernel_size)
return input
@torch.jit.script
def fn(input):
# type: (Tensor) -> Tensor
return fn2(input, [1])
def test_parser_kwargonly(self):
cu = torch.jit.CompilationUnit('''
def foo(x, *, y) -> Tuple[Tensor, Tensor]:
return x, x
def bar(x):
return foo(x, y=x)
''')
self.assertTrue('*' in str(cu.foo.schema))
with self.assertRaisesRegex(RuntimeError, "not provided"):
torch.jit.CompilationUnit('''
def foo(x, *, y) -> Tuple[Tensor, Tensor]:
return x, x
def bar(x):
return foo(x, x)
''')
def test_annoying_doubles(self):
mod = types.ModuleType("temp")
mod.inf = float("inf")
mod.ninf = float("-inf")
mod.nan = float("nan")
with torch._jit_internal._disable_emit_hooks():
class Foo(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self):
return math.pi, 0.1, mod.inf, mod.ninf, 2.225073858507201e-308, mod.nan
foo = Foo()
buffer = io.BytesIO()
torch.jit.save(foo, buffer)
buffer.seek(0)
foo_loaded = torch.jit.load(buffer)
r = foo()
r2 = foo_loaded()
# use precise assert, we are checking floating point details
self.assertTrue(r[:-1] == r2[:-1])
self.assertTrue(math.isnan(r[-1]) and math.isnan(r2[-1]))
def test_type_annotate(self):
def foo(a):
return torch.jit.annotate(torch.Tensor, a)
self.checkScript(foo, (torch.rand(3),))
def bar():
a = torch.jit.annotate(List[int], [])
for _ in range(10):
a.append(4)
return a
self.checkScript(bar, ())
def baz(a):
return torch.jit.annotate(float, a)
self.checkScript(baz, (torch.rand(()),))
# test annotate none types
def annotate_none():
return torch.jit.annotate(Optional[torch.Tensor], None)
self.checkScript(annotate_none, ())
def test_robust_op_resolution(self):
neg = torch.add # misleading name to make sure we resolve by function
def stuff(x):
return neg(x, x)
a = (torch.rand(3),)
self.checkScript(stuff, a)
def test_nested_aug_assign(self):
@torch.jit.script
class SomeClass:
def __init__(self) -> None:
self.num = 99
def __iadd__(self, x):
# type: (int)
self.num += x
return self
def __eq__(self, other):
# type: (SomeClass) -> bool
return self.num == other.num
@torch.jit.script
class SomeOutOfPlaceClass:
def __init__(self) -> None:
self.num = 99
def __add__(self, x):
# type: (int)
self.num = x
return self
def __eq__(self, other):
# type: (SomeClass) -> bool
return self.num == other.num
class Child(nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = 2
self.o = SomeClass()
self.oop = SomeOutOfPlaceClass()
self.list = [1, 2, 3]
class A(nn.Module):
def __init__(self) -> None:
super().__init__()
self.child = Child()
def forward(self):
self.child.x += 1
self.child.o += 5
self.child.oop += 5
some_list = [1, 2]
self.child.list += some_list
self.child.list *= 2
return self.child.x, self.child.o, self.child.list, self.child.oop
a = A()
sa = torch.jit.script(A())
eager_result = a()
script_result = sa()
self.assertEqual(eager_result, script_result)
self.assertEqual(a.child.x, sa.child.x)
self.assertEqual(a.child.o, sa.child.o)
self.assertEqual(a.child.list, sa.child.list)
@torch.jit.script
class SomeNonAddableClass:
def __init__(self) -> None:
self.num = 99
def __eq__(self, other):
# type: (SomeClass) -> bool
return self.num == other.num
# with self.assertRaisesRegex(RuntimeError, "")
class A(nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = SomeNonAddableClass()
def forward(self):
self.x += SomeNonAddableClass()
return self.x
with self.assertRaisesRegex(RuntimeError, "Cannot emit inplace op"):
torch.jit.script(A())
def test_var_aug_assign(self):
@torch.jit.script
class SomeNonAddableClass:
def __init__(self) -> None:
self.num = 99
def __eq__(self, other):
# type: (SomeNonAddableClass) -> bool
return self.num == other.num
with self.assertRaisesRegex(RuntimeError, "Cannot emit inplace op"):
@torch.jit.script
def fn():
a = SomeNonAddableClass()
a += SomeNonAddableClass()
return a
@torch.jit.script
class SomeClass:
def __init__(self) -> None:
self.num = 99
def __iadd__(self, x):
# type: (int)
self.num += x
return self
def __eq__(self, other):
# type: (SomeClass) -> bool
return self.num == other.num
@torch.jit.script
class SomeOutOfPlaceClass:
def __init__(self) -> None:
self.num = 99
def __add__(self, x):
# type: (int)
self.num = x
return self
def __eq__(self, other):
# type: (SomeClass) -> bool
return self.num == other.num
def fn2():
a = SomeClass()
a_copy = a
a += 20
assert a is a_copy
b = SomeOutOfPlaceClass()
b_copy = b
b += 99
assert b is b_copy
c = [1, 2, 3]
c_copy = c
c *= 2
assert c is c_copy
c += [4, 5, 6]
d = torch.ones(2, 2)
d_copy = d
d += torch.ones(2, 2)
assert d is d_copy
return a, b, c, d
self.checkScript(fn2, [])
def test_nested_list_construct(self):
def foo():
return [[4]] + [[4, 5]]
self.checkScript(foo, ())
def test_file_line_error(self):
def foobar(xyz):
return torch.blargh(xyz)
_, lineno = inspect.getsourcelines(foobar)
with self.assertRaisesRegex(RuntimeError, f'test_jit.py", line {lineno + 1}'):
scripted = torch.jit.script(foobar)
def test_file_line_error_class_defn(self):
class FooBar:
def baz(self, xyz):
return torch.blargh(xyz)
_, lineno = inspect.getsourcelines(FooBar)
with self.assertRaisesRegex(RuntimeError, f'test_jit.py", line {lineno + 2}'):
torch.jit.script(FooBar)
def test_file_line_graph(self):
def foobar(xyz):
return torch.neg(xyz)
scripted = torch.jit.script(foobar)
_, lineno = inspect.getsourcelines(foobar)
fc = FileCheck().check(f'test_jit.py:{lineno + 1}:19')
fc.run(scripted.graph)
fc.run(str(scripted.graph))
def test_file_line_save_load(self):
class Scripted(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, xyz):
return torch.neg(xyz)
scripted = Scripted()
# NB: not using getExportImportCopy because that takes a different
# code path that calls CompilationUnit._import rather than
# going through the full save/load pathway
buffer = scripted.save_to_buffer()
bytesio = io.BytesIO(buffer)
scripted = torch.jit.load(bytesio)
_, lineno = inspect.getsourcelines(Scripted)
fc = FileCheck().check(f':{lineno + 3}')
fc.run(scripted.graph)
fc.run(str(scripted.graph))
def test_file_line_string(self):
scripted = torch.jit.CompilationUnit('''
def foo(xyz):
return torch.neg(xyz)
''')
fc = FileCheck().check('<string>:3:11')
fc.run(scripted.foo.graph)
fc.run(str(scripted.foo.graph))
@skipIfCrossRef
def test_file_line_trace(self):
def foobar(xyz):
return torch.neg(xyz)
scripted = torch.jit.trace(foobar, (torch.rand(3, 4)))
_, lineno = inspect.getsourcelines(foobar)
fc = FileCheck().check(f'test_jit.py:{lineno + 1}:0')
fc.run(scripted.graph)
fc.run(str(scripted.graph))
def test_serialized_source_ranges(self):
class FooTest(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, w):
return torch.mm(x, w.t())
ft = FooTest()
loaded = self.getExportImportCopy(ft)
_, lineno = inspect.getsourcelines(FooTest)
with self.assertRaisesRegex(RuntimeError, f'test_jit.py", line {lineno + 3}'):
loaded(torch.rand(3, 4), torch.rand(30, 40))
def test_serialized_source_ranges_graph(self):
class FooTest3(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, w):
return torch.mm(x, w.t())
ft = FooTest3()
loaded = self.getExportImportCopy(ft)
_, lineno = inspect.getsourcelines(FooTest3)
fc = FileCheck().check(f'test_jit.py:{lineno + 3}')
fc.run(loaded.graph)
def test_serialized_source_ranges2(self):
class FooTest2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self):
raise RuntimeError('foo')
_, lineno = inspect.getsourcelines(FooTest2)
with self.assertRaisesRegex(torch.jit.Error, f'test_jit.py", line {lineno + 3}'):
ft = FooTest2()
loaded = self.getExportImportCopy(ft)
loaded()
def test_serialized_source_ranges_dont_jitter(self):
class FooTest3(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, lim):
first = 1
second = 1
i = 1
somenum = 5
dontmutateme = 3
third = 0
while bool(i < lim):
third = first + second
first = second
second = third
j = 0
while j < 10:
somenum = somenum * 2
j = j + 1
i = i + j
i = i + dontmutateme
st = second + third
fs = first + second
return third, st, fs
ft3 = FooTest3()
def debug_records_from_mod(self, mod):
buffer = io.BytesIO()
torch.jit.save(ft3, buffer)
buffer.seek(0)
archive = zipfile.ZipFile(buffer)
files = filter(lambda x: x.startswith('archive/code/'), archive.namelist())
debug_files = list(filter(lambda f: f.endswith('.debug_pkl'), files))
self.assertEqual(len(debug_files), 1)
debug_file = archive.open(debug_files[0])
return pickle.load(debug_file), buffer
records1, buffer = debug_records_from_mod(self, ft3)
buffer.seek(0)
loaded = torch.jit.load(buffer)
records2, buffer = debug_records_from_mod(self, loaded)
buffer.seek(0)
loaded2 = torch.jit.load(buffer)
records3, _ = debug_records_from_mod(self, loaded2)
self.assertEqual(records1, records2)
self.assertEqual(records2, records3)
def test_serialized_source_ranges_no_dups(self):
class FooTest3(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, lim):
first = 1
second = 1
i = 1
somenum = 5
dontmutateme = 3
third = 0
while bool(i < lim):
third = first + second
first = second
second = third
j = 0
while j < 10:
somenum = somenum * 2
j = j + 1
i = i + j
i = i + dontmutateme
st = second + third
fs = first + second
return third, st, fs
ft3 = FooTest3()
def debug_records_from_mod(mod):
buffer = io.BytesIO()
torch.jit.save(ft3, buffer)
buffer.seek(0)
archive = zipfile.ZipFile(buffer)
files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist()))
debug_files = filter(lambda f: f.endswith('.debug_pkl'), files)
debug_files = (archive.open(f) for f in debug_files)
debug_files = (pickle.load(f) for f in debug_files)
debug_files = (f[2] for f in debug_files)
return list(debug_files)
debug_files = debug_records_from_mod(ft3)
for debug_file in debug_files:
for i in range(len(debug_file) - 1):
offset, source_range_tag, source_range = debug_file[i]
offset2, source_range_tag2, source_range2 = debug_file[i + 1]
self.assertNotEqual(source_range, source_range2)
def test_circular_dependency(self):
"""
https://github.com/pytorch/pytorch/issues/25871
"""
class A(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x
class B(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.ModuleList([A()])
@torch.jit.script_method
def forward(self, x):
for f in self.foo:
x = f(x)
return x
class C(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Sequential(B())
@torch.jit.script_method
def forward(self, x):
for f in self.foo:
x = f(x)
return x
self.getExportImportCopy(C())
def test_serialize_long_lines(self):
class OrderModuleLong(torch.nn.Module):
def forward(self, long_arg_name: List[torch.Tensor]):
return [(long_arg_name[1],), (long_arg_name[0].argmax(),)]
src = str(torch.jit.script(OrderModuleLong()).code)
# make long_arg_name[1] does not get reordered after the argmax
FileCheck().check("long_arg_name[1]").check("argmax").run(src)
def test_tensor_shape(self):
x = torch.empty(34, 56, 78)
def f(x):
return x.shape
self.checkScript(f, (x,))
def test_block_input_grad_in_loop(self):
x = torch.randn(3, 3, requires_grad=False)
y = torch.randn(3, 3, requires_grad=True)
def grad_in_loop(x, y):
for _ in range(100):
x = y @ x
return x
scripted = torch.jit.script(grad_in_loop)
outer = scripted.graph_for(x, y)
loop = outer.findNode("prim::Loop")
loop_block = next(loop.blocks())
param_node = loop_block.paramNode()
x_value = list(param_node.outputs())[1]
self.assertTrue(x_value.requires_grad())
def test_tensor_grad(self):
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(3, 4, requires_grad=False)
def f_requires_grad(x):
return x.requires_grad
self.checkScript(f_requires_grad, (x,))
self.checkScript(f_requires_grad, (y,))
def f_grad(x):
return x.grad
x.sum().backward()
self.checkScript(f_grad, (x,))
self.checkScript(f_grad, (y,))
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "shape analysis is only enabled in Legacy")
def test_prim_grad_undefined(self):
x = torch.ones(2)
def f_grad(x):
return x.grad
scripted = self.checkScript(f_grad, (x,))
g = scripted.graph_for(x)
prim_grad_node = g.findNode("prim::grad")
self.assertTrue(next(prim_grad_node.outputs()).type().undefined() is None)
def test_tensor_data(self):
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(4, 5)
def f_data(x):
return x.data
scripted_f_data = torch.jit.script(f_data)
scripted_x = scripted_f_data(x)
self.assertEqual(scripted_x, f_data(x))
self.assertEqual(scripted_x.requires_grad, False)
scripted_y = scripted_f_data(y)
self.assertEqual(scripted_y, f_data(y))
self.assertEqual(scripted_x.requires_grad, False)
def test_tensor_dtype(self):
x_byte = torch.empty(34, 56, 78, dtype=torch.uint8)
x_long = torch.empty(34, 56, 78, dtype=torch.long)
x_float32 = torch.empty(34, 56, 78, dtype=torch.float32)
@torch.jit.script
def byte(x):
return x.dtype == torch.uint8
@torch.jit.script
def long(x):
return x.dtype == torch.long
@torch.jit.script
def float32(x):
return x.dtype == torch.float32
self.assertTrue(byte(x_byte))
self.assertFalse(byte(x_long))
self.assertFalse(byte(x_float32))
self.assertFalse(long(x_byte))
self.assertTrue(long(x_long))
self.assertFalse(long(x_float32))
self.assertFalse(float32(x_byte))
self.assertFalse(float32(x_long))
self.assertTrue(float32(x_float32))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_tensor_device(self):
cpu = torch.empty(34, 56, 78, device='cpu')
gpu = torch.empty(34, 56, 78, device='cuda')
@torch.jit.script
def same_device(x, y):
return x.device == y.device
self.assertTrue(same_device(cpu, cpu))
self.assertTrue(same_device(gpu, gpu))
self.assertFalse(same_device(cpu, gpu))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_tensor_to_device(self):
def to_device(x):
return x.to(device="cuda").to(device=torch.device("cpu"))
self.checkScript(to_device, (torch.ones(3, 4),))
def test_tensor_to_cpu(self):
def to_cpu(x):
return x.cpu()
x = torch.ones(3, 4)
script_fn = torch.jit.script(to_cpu)
self.assertEqual(to_cpu(x).device, script_fn(x).device)
self.checkScript(to_cpu, (x,))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_tensor_to_cuda(self):
def to_cuda(x):
return x.cuda()
x = torch.ones(3, 4)
script_fn = torch.jit.script(to_cuda)
self.assertEqual(to_cuda(x).device, script_fn(x).device)
self.checkScript(to_cuda, (x,))
def test_generic_list_errors(self):
with self.assertRaisesRegex(RuntimeError, "previously matched to type"):
@torch.jit.script
def foo(x):
return [[x]] + [[1]]
def test_script_cu(self):
cu = torch.jit.CompilationUnit('''
def foo(a):
b = a
return b
''')
a = Variable(torch.rand(1))
self.assertEqual(a, cu.foo(a))
# because the compilation unit ingests python strings
# to use an escape sequence escape the backslash (\\n = \n)
def test_string_cu(self):
cu = torch.jit.CompilationUnit('''
def foo(a):
print(a, """a\\n\tb\\n""", 2, "a\
a")
return a
''')
FileCheck().check("aa").check("a\\n\\tb\\n").run(str(cu.foo.graph))
def test_function_compilation_caching(self):
def fun():
return 1 + 2
fun_compiled = torch.jit.script(fun)
# python wrapper around the script function is a different pointer,
# but the underlying script function graph is the same
self.assertIs(fun_compiled.graph, torch.jit.script(fun).graph)
def fun():
return 3 + 4
num_ref_counts = sys.getrefcount(fun)
# caching doesn't get tripped up by same qualname
fun_compiled_2 = torch.jit.script(fun)
self.assertIsNot(fun_compiled, fun_compiled_2)
self.assertEqual(fun_compiled_2(), 7)
# caching doesn't increase refcounts to function (holds weak reference)
self.assertTrue(sys.getrefcount(fun), num_ref_counts)
def test_string_ops(self):
def foo():
a = "a" + "b"
return a + a, "ab" == "b", "ab" != "b", "ab" == "ab", "ab" != "ab"
self.checkScript(foo, ())
def test_string_sorted(self):
def foo(strs: List[str]):
return sorted(strs)
FileCheck() \
.check("graph") \
.check_next("str[] = aten::sorted") \
.check_next("return") \
.run(str(torch.jit.script(foo).graph))
inputs = ["str3", "str2", "str1"]
self.checkScript(foo, (inputs,))
def test_string_sort(self):
def foo(strs: List[str]):
strs.sort()
return strs
inputs = ["str3", "str2", "str1"]
self.checkScript(foo, (inputs,))
def test_tuple_sorted(self):
def foo(tups: List[Tuple[int, int]]):
return sorted(tups)
inputs = [(1, 2), (0, 2), (1, 3)]
self.checkScript(foo, (inputs,))
def test_tuple_sort(self):
def foo(tups: List[Tuple[int, int]]):
tups.sort()
return tups
inputs = [(1, 2), (0, 2), (1, 3)]
self.checkScript(foo, (inputs,))
def test_tuple_sort_reverse(self):
def foo(tups: List[Tuple[int, int]]):
tups.sort(reverse=True)
return tups
inputs = [(1, 2), (0, 2), (1, 3)]
self.checkScript(foo, (inputs,))
def test_tuple_unsortable_element_type(self):
@torch.jit.script
def foo():
tups = [({1: 2}, {2: 3})]
tups.sort()
return tups
with self.assertRaisesRegexWithHighlight(RuntimeError, "are not sortable", "tups.sort"):
foo()
def test_tuple_unsortable_diff_type(self):
@torch.jit.script
def foo(inputs: List[Any]):
inputs.sort()
return inputs
inputs = [(1, 2), ("foo", "bar")]
with self.assertRaisesRegexWithHighlight(RuntimeError, "Only values of same type can be compared", "inputs.sort"):
foo(inputs)
def test_tuple_nested_sort(self):
def foo(inputs: List[Tuple[int, Tuple[int, str]]]):
inputs.sort()
return inputs
inputs = [(1, (2, "foo")), (1, (2, "bar")), (1, (0, "bar"))]
self.checkScript(foo, (inputs,))
def test_tuple_unsortable_nested_diff_type(self):
@torch.jit.script
def foo(inputs: List[Any]):
inputs.sort()
return inputs
inputs = [(1, (2, 3)), (2, ("foo", "bar"))]
with self.assertRaisesRegexWithHighlight(RuntimeError, "Only values of same type can be compared", "inputs.sort"):
foo(inputs)
def test_string_new_line(self):
with self.assertRaisesRegex(RuntimeError, "expected a valid token*"):
torch.jit.CompilationUnit('''
def test_while(a):
print("
a")
return a
''')
def test_string_single_escape(self):
with self.assertRaisesRegex(RuntimeError, "expected a valid token*"):
torch.jit.CompilationUnit('''
def test_while(a):
print("\\")
return a
''')
def test_script_annotation(self):
@torch.jit.script
def foo(a):
return a + a + a
s = Variable(torch.rand(2))
self.assertEqual(s + s + s, foo(s))
def test_torch_pow(self):
def func(a, b):
return pow(a, b)
def func2(a, b, c, d):
return pow(pow(c + a, b), d)
def func3(a : int, b : float):
# type: (int, float) -> float
return pow(a, b)
def func4():
# type: () -> float
return pow(2, -2)
def func5(x, y):
return pow(x.item(), y.item())
def func6(a : int, b : int):
# type: (int, int) -> float
return pow(a, b)
a = torch.rand(1)
b = torch.rand(1)
c = torch.rand(1)
d = torch.rand(1)
self.checkScript(func, (a, b))
self.checkScript(func2, (a, b, c, d))
self.checkScript(func3, (4, -0.5))
self.checkScript(func4, ())
self.checkScript(func6, (2, 4))
inputs = [torch.tensor(2), torch.tensor(-2), torch.tensor(.5), torch.tensor(.2)]
for x in inputs:
for y in inputs:
if x < 0:
continue
else:
self.checkScript(func5, (x, y))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_pow_scalar_backward_cuda(self):
# see that scalar exponent works with cuda base (#19253)
with enable_profiling_mode_for_profiling_tests():
for dtype in [torch.float, torch.double]:
@torch.jit.script
def func(a, b):
# type: (Tensor, float) -> Tensor
return (a * 2) ** b
a = torch.rand(1, requires_grad=True, device='cuda', dtype=dtype)
func(a, 1, profile_and_replay=True).backward()
@torch.jit.script
def func(a, b):
# type: (float, Tensor) -> Tensor
return a ** (b * 2 + 1)
a = torch.rand(1, requires_grad=True, device='cuda', dtype=dtype)
func(2, a, profile_and_replay=True).backward()
def _check_code(self, code_str, fn_name, inputs):
scope = {}
exec(code_str, globals(), scope)
cu = torch.jit.CompilationUnit(code_str)
self.assertEqual(cu.func(*inputs), scope[fn_name](*inputs))
@unittest.skipIf(not RUN_CUDA, 'no CUDA')
def test_scriptmodule_releases_tensors_cuda(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def fn(x, y):
return x.sigmoid() * y.tanh()
def test(backward=False):
x = torch.randn(3, 3, dtype=torch.double, device='cuda', requires_grad=True)
y = torch.randn(3, 3, dtype=torch.double, device='cuda', requires_grad=True)
out = fn(x, y, profile_and_replay=True)
if backward:
out.sum().backward()
with self.assertLeaksNoCudaTensors():
test()
test()
test()
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
with self.assertLeaksNoCudaTensors():
test(backward=True)
test(backward=True)
test(backward=True)
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
def test_index(self):
def consec(size, start=0):
numel = torch.tensor(size).prod().item()
return torch.arange(numel).view(size)
def consec_list(size):
return list(range(size))
def random_string(size):
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(size))
def check_indexing(indexing, tensor):
template = dedent("""
def func(x):
return x{}
""")
self._check_code(template.format(indexing), "func", [tensor])
def check_dynamic_indexing(indexing, tensor, value1, value2):
value1 = torch.tensor(value1)
value2 = torch.tensor(value2)
template = dedent("""
def func(x, value1, value2):
i = int(value1)
j = int(value2)
return x{}
""")
self._check_code(template.format(indexing), "func", [tensor, value1, value2])
# Torchscript assumes type Tensor by default, so we need this explicit
# declaration.
def check_indexing_list_int(indexing, list):
template = dedent("""
def func(x):
# type: (List[int]) -> Any
return x{}
""")
self._check_code(template.format(indexing), "func", [list])
def check_indexing_str(indexing, str):
template = dedent("""
def func(x):
# type: (str) -> Any
return x{}
""")
self._check_code(template.format(indexing), "func", [str])
# basic slices
check_indexing('[0]', consec((3, 3)))
check_indexing('[1]', consec((3, 3), 10))
check_indexing('[2]', consec((3, 3), 19))
check_indexing('[2]', consec((3,)))
check_indexing('[-1]', consec((3, 3), 19))
check_indexing('[0:2]', consec((3, 3, 3)))
check_indexing('[1:-1]', consec((3, 3, 3)))
check_indexing('[-3:-1]', consec((6, 3)))
check_indexing('[1:]', consec((3, 3)))
check_indexing('[:1]', consec((3, 3)))
check_indexing('[:]', consec((3, 2)))
# multi-dim: indexes
check_indexing('[0, 1]', consec((3, 3)))
check_indexing('[0, 1]', consec((3, 3, 2)))
check_indexing('[1, 0, 2]', consec((3, 3, 3)))
check_indexing('[2, -1]', consec((3, 3)))
# multi-dim: mixed slicing and indexing
check_indexing('[0, 1:2]', consec((3, 3)))
check_indexing('[0, :1]', consec((3, 3, 2)))
check_indexing('[1, 2:]', consec((3, 3, 3)))
check_indexing('[-1, 1:, 0]', consec((3, 3, 3, 3)))
check_indexing('[1:, -1, 0]', consec((3, 3, 3, 3)))
check_indexing('[-1, 2:, 1:2]', consec((3, 3, 3, 3)))
check_indexing('[-1, 1:, 0]', consec((3, 3, 3, 3)))
check_indexing('[-1, :, 0, 2]', consec((3, 3, 3, 3)))
# zero-sized slices
check_indexing('[0:0]', consec((2, 2)))
check_indexing('[0:0, 1]', consec((3, 3)))
# trivial expression usage
check_indexing('[1+1]', consec((3, 3)))
check_indexing('[1:(0 + 2)]', consec((3, 3, 3)))
# None for new dimensions
check_indexing('[None, 0]', consec((3, 3)))
check_indexing('[1, None]', consec((3, 3), 10))
check_indexing('[None, None, 2]', consec((3, 3), 19))
check_indexing('[None, 2, None]', consec((3,)))
check_indexing('[0:2, None]', consec((3, 3, 3)))
check_indexing('[None, 1:-1]', consec((3, 3, 3)))
check_indexing('[None, -3:-1, None]', consec((6, 3)))
check_indexing('[-1, None, 2:, None, 1:2]', consec((3, 3, 3, 3)))
check_indexing('[None, -1, None, 2:, None, 1:2, None]', consec((3, 3, 3, 3)))
# dynamic expression usage
check_dynamic_indexing("[i + j]", consec((3, 3)), 0, 1)
check_dynamic_indexing("[i:j, i]", consec((3, 3, 2)), 0, 2)
# positive striding
check_indexing_list_int('[0]', consec_list(6))
check_indexing_list_int('[1]', consec_list(7))
check_indexing_list_int('[2]', consec_list(8))
check_indexing_list_int('[2]', consec_list(9))
check_indexing_list_int('[-1]', consec_list(10))
check_indexing_list_int('[0:2]', consec_list(11))
check_indexing_list_int('[1:-1]', consec_list(12))
check_indexing_list_int('[-3:-1]', consec_list(13))
check_indexing_list_int('[1:]', consec_list(15))
check_indexing_list_int('[:1]', consec_list(16))
check_indexing_list_int('[:]', consec_list(17))
check_indexing_list_int('[::]', consec_list(0))
check_indexing_list_int('[1000::]', consec_list(0))
check_indexing_list_int('[:1000:]', consec_list(0))
# negative striding
check_indexing_list_int('[::-1]', consec_list(7))
check_indexing_list_int('[:3:-1]', consec_list(7))
check_indexing_list_int('[3::-1]', consec_list(7))
check_indexing_list_int('[1000::-1]', consec_list(7))
check_indexing_list_int('[3:0:-1]', consec_list(7))
check_indexing_list_int('[3:-1000:-1]', consec_list(7))
check_indexing_list_int('[0:0:-1]', consec_list(7))
check_indexing_list_int('[0:-1000:-1]', consec_list(7))
# only step is specified
check_indexing_list_int('[::-1]', consec_list(0))
check_indexing_list_int('[::-1]', consec_list(7))
check_indexing_list_int('[::-2]', consec_list(7))
check_indexing_list_int('[::2]', consec_list(7))
check_indexing_list_int('[::42]', consec_list(7))
check_indexing_list_int('[::-42]', consec_list(7))
check_indexing_list_int('[::42]', consec_list(0))
check_indexing_list_int('[::-42]', consec_list(0))
check_indexing_list_int('[::9223372036854775807]', consec_list(42))
check_indexing_list_int('[::-9223372036854775807]', consec_list(42))
with self.assertRaisesRegex(RuntimeError, "out of bounds"):
check_indexing_list_int('[::-9223372036854775808]', consec_list(42))
with self.assertRaisesRegex(RuntimeError, "should have non-zero step"):
check_indexing_list_int('[::0]', consec_list(42))
# striding strings
check_indexing_str('[0]', random_string(6))
check_indexing_str('[1]', random_string(7))
check_indexing_str('[2]', random_string(8))
check_indexing_str('[2]', random_string(9))
check_indexing_str('[-1]', random_string(10))
check_indexing_str('[0:2]', random_string(11))
check_indexing_str('[1:-1]', random_string(12))
check_indexing_str('[-3:-1]', random_string(13))
check_indexing_str('[1:]', random_string(15))
check_indexing_str('[:1]', random_string(16))
check_indexing_str('[:]', random_string(17))
check_indexing_str('[::]', random_string(0))
check_indexing_str('[1000::]', random_string(0))
check_indexing_str('[:1000:]', random_string(0))
check_indexing_str('[::-1]', random_string(7))
check_indexing_str('[:3:-1]', random_string(7))
check_indexing_str('[3::-1]', random_string(7))
check_indexing_str('[1000::-1]', random_string(7))
check_indexing_str('[3:0:-1]', random_string(7))
check_indexing_str('[3:-1000:-1]', random_string(7))
check_indexing_str('[0:0:-1]', random_string(7))
check_indexing_str('[0:-1000:-1]', random_string(7))
check_indexing_str('[::-1]', random_string(0))
check_indexing_str('[::-1]', random_string(7))
check_indexing_str('[::-2]', random_string(7))
check_indexing_str('[::2]', random_string(7))
check_indexing_str('[::42]', random_string(7))
check_indexing_str('[::-42]', random_string(7))
check_indexing_str('[::42]', random_string(0))
check_indexing_str('[::-42]', random_string(0))
check_indexing_str('[::9223372036854775807]', random_string(42))
check_indexing_str('[::-9223372036854775807]', random_string(42))
with self.assertRaisesRegex(RuntimeError, "out of bounds"):
check_indexing_str('[::-9223372036854775808]', random_string(42))
with self.assertRaisesRegex(RuntimeError, "should have non-zero step"):
check_indexing_str('[::0]', random_string(42))
def test_module_copy_with_attributes(self):
class Vocabulary(torch.jit.ScriptModule):
def __init__(self, vocab_list):
super().__init__()
self._vocab = torch.jit.Attribute(vocab_list, List[str])
self.some_idx = torch.jit.Attribute(2, int)
self.idx = torch.jit.Attribute(
{word: i for i, word in enumerate(vocab_list)}, Dict[str, int]
)
@torch.jit.script_method
def lookup_indices_1d(self, values):
# type: (List[str]) -> List[int]
result = torch.jit.annotate(List[int], [])
# Direct list iteration not supported
for i in range(len(values)):
value = values[i]
result.append(self.idx.get(value, self.some_idx))
return result
@torch.jit.script_method
def forward(self, values):
# type: (List[List[str]]) -> List[List[int]]
result = torch.jit.annotate(List[List[int]], [])
# Direct list iteration not supported
for i in range(len(values)):
result.append(self.lookup_indices_1d(values[i]))
return result
v = Vocabulary(list('uabcdefg'))
v.__copy__()
def test_tuple_to_opt_list(self):
@torch.jit.script
def foo(x):
# type: (Optional[List[int]]) -> int
return 1
@torch.jit.script
def tuple_call():
return foo((1, 2))
def test_keyword(self):
@torch.jit.script
def func(x):
return torch.sum(x, dim=0)
x = torch.rand(10, dtype=torch.float, requires_grad=True)
y = func(x)
y2 = torch.sum(x, dim=0)
self.assertEqual(y, y2)
def test_constant_pooling_none(self):
@torch.jit.script
def typed_nones(a=None, b=None, c=None):
# type: (Optional[int], Optional[bool], Optional[Tensor]) -> Tuple[Optional[int], Optional[bool], Optional[Tensor]]
return a, b, c
@torch.jit.script
def test(a):
# type: (bool) -> None
if a:
print(typed_nones())
else:
print(typed_nones())
graph_str = str(test.graph)
self.assertTrue(graph_str.count("NoneType = prim::Constant") == 1)
def test_constant_pooling_same_identity(self):
def foo():
a = torch.tensor([4])
b = (a,)
index = len(a) - 1
c = b[index]
d = b[index]
return c, d
foo_script = torch.jit.script(foo)
self.run_pass('constant_propagation', foo_script.graph)
self.run_pass('constant_pooling', foo_script.graph)
# even though the c & d escape scope, we are still able
# pool them into one constant because they are the same object
FileCheck().check_count("prim::Constant", 1, exactly=True).run(foo_script.graph)
self.assertEqual(foo(), foo_script())
def test_constant_pooling_introduce_aliasing(self):
@torch.jit.script
def foo():
a = torch.tensor(1)
b = torch.tensor(1)
return a, b
self.run_pass('constant_propagation', foo.graph)
self.run_pass('constant_pooling', foo.graph)
# dont pool constants bc it would introduce observable alias relationship changing
a, b = foo()
self.assertIsNot(a, b)
def test_literal(self):
def func1(a, b):
c = a, b
d, e = c
return d + e
def func2(a, b):
c = a, (a, b)
d, e = c
f, g = e
return d + f + g
def func3(a, b):
# type: (float, float) -> float
c = 0., (0., 0.)
x = True
while x:
x = False
c = a, (a, b)
d, e = c
f, g = e
return d + f + g
a = torch.rand(1, requires_grad=True)
b = torch.rand(1, requires_grad=True)
self.checkScript(func1, (a, b), optimize=True)
self.checkScript(func2, (a, b), optimize=True)
self.checkScript(func3, (a.item(), b.item()), optimize=True)
def test_expand(self):
@torch.jit.script
def func(x, y):
return x + y
x = torch.rand(2, 3, dtype=torch.float, requires_grad=True)
y = torch.rand(3, dtype=torch.float, requires_grad=True)
out = func(x, y)
self.assertEqual(func(x, y), x + y)
grad = torch.randn(2, 3, dtype=torch.float)
out.backward(grad)
self.assertEqual(x.grad, grad)
self.assertEqual(y.grad, grad.sum(dim=0))
def test_sum(self):
@torch.jit.script
def func(x):
return x.sum(dim=[4])
@torch.jit.script
def func2(x):
return x.sum(dim=4)
# test that shape analysis is written correctly for sum with OptionalIntArrayRef[1] dim argument
self.run_pass('constant_propagation', func.graph)
self.run_pass('constant_propagation', func2.graph)
g = _propagate_shapes(func.graph, (torch.zeros(1, 1, 1, 1, 4),), False)
g2 = _propagate_shapes(func2.graph, (torch.zeros(1, 1, 1, 1, 4),), False)
def test_cat(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def func(x):
return torch.cat((x, x), dim=0)
x = torch.rand(10, dtype=torch.float, requires_grad=True)
self.assertEqual(func(x, profile_and_replay=True), torch.cat((x, x), dim=0))
@torch.jit.script
def func2(x, y):
return torch.cat((x, x), y)
with disable_autodiff_subgraph_inlining():
for sizes in ((2, 2), (0, 2)):
x = torch.rand(sizes).requires_grad_()
y = torch.tensor(1)
output = func2(x, y, profile_and_replay=True)
output_ref = torch.cat((x, x), y)
self.assertEqual(output, output_ref)
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
self.assertAutodiffNode(func2.graph_for(x, y), True, ['aten::cat'], [])
grad = torch.autograd.grad(output.sum(), x)
grad_ref = torch.autograd.grad(output_ref.sum(), x)
self.assertEqual(grad, grad_ref)
def test_cat_lifts(self):
@torch.jit.script
def foo(x):
return torch.cat([x, x], dim=1)
@torch.jit.script
def foo2(x):
return torch.cat([], dim=1)
@torch.jit.script
def foo3(x):
return torch.cat([x], dim=1)
for g in [foo.graph, foo2.graph, foo3.graph]:
FileCheck().check("int =").check("ListConstruct").check("aten::cat").run(str(g))
def test_stack(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def func(x):
return torch.stack((x, x), dim=1)
x = torch.rand(10, 10)
self.assertEqual(func(x, profile_and_replay=True), torch.stack((x, x), dim=1))
@torch.jit.script
def func2(x, y):
return torch.stack((x, y), dim=0)
with disable_autodiff_subgraph_inlining():
x = torch.randn([2, 2]).requires_grad_()
y = torch.randn([2, 2]).requires_grad_()
output = func2(x, y, profile_and_replay=True)
output_ref = torch.stack((x, y), 0)
self.assertEqual(output, output_ref)
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
self.assertAutodiffNode(func2.graph_for(x, y), True, ['aten::stack'], [])
grads = torch.autograd.grad(output.sum(), (x, y))
grads_ref = torch.autograd.grad(output_ref.sum(), (x, y))
self.assertEqual(grads, grads_ref)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY,
"Profiling executor will be using different heuristics for constructing differentiable graphs")
def test_unbind(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def func(x, y):
# type: (Tensor, int) -> List[Tensor]
return torch.unbind(x, y)
with disable_autodiff_subgraph_inlining():
x = torch.rand([2, 2]).requires_grad_()
y = 0
outputs = func(x, y, profile_and_replay=True)
outputs_ref = torch.unbind(x, dim=y)
self.assertEqual(outputs, outputs_ref)
self.assertAutodiffNode(func.graph_for(x, y), True, [], [])
grad = torch.autograd.grad(_sum_of_list(outputs), x)
grad_ref = torch.autograd.grad(_sum_of_list(outputs_ref), x)
self.assertEqual(grad, grad_ref)
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.PROFILING,
"Profiling executor fails to recognize that tensors in a list require gradients")
def test_meshgrid(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def func(a):
# type: (List[Tensor]) -> List[Tensor]
return torch.meshgrid(a)
with disable_autodiff_subgraph_inlining():
a = torch.tensor([1.0, 2, 3]).requires_grad_()
b = torch.tensor([1.0, 2, 3, 4]).requires_grad_()
inputs = [a, b]
outputs_ref = torch.meshgrid(inputs)
outputs = func(inputs, profile_and_replay=True)
self.assertEqual(outputs, outputs_ref)
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
self.assertAutodiffNode(func.graph_for(inputs), True, [], [])
grads = torch.autograd.grad(_sum_of_list(outputs), inputs)
grads_ref = torch.autograd.grad(_sum_of_list(outputs_ref), inputs)
self.assertEqual(grads, grads_ref)
def test_tensor_len(self):
def func(x):
return len(x)
self.checkScript(func, [torch.ones(4, 5, 6)])
def test_func_call(self):
def add(a, b):
return a + b
def mul(a, x):
return a * x
def func(alpha, beta, x, y):
return add(mul(alpha, x), mul(beta, y))
alpha = torch.rand(1, dtype=torch.float, requires_grad=True)
beta = torch.rand(1, dtype=torch.float, requires_grad=True)
x = torch.rand(3, dtype=torch.float, requires_grad=True)
y = torch.rand(3, dtype=torch.float, requires_grad=True)
# NOTE: cannot optimize yet because broadcasts are not inserted before the fuser runs
self.checkScript(func, [alpha, beta, x, y], optimize=False)
@unittest.skip("bailouts are being deprecated")
def test_profiling_graph_executor(self):
@torch.jit.script
def def_in_one_branch(x, z):
# type: (Tensor, bool) -> float
y = x
if z is False:
y = x + 1
return y.sum()
a = torch.rand(2, 3)
with enable_profiling_mode_for_profiling_tests():
# check prim::profile are inserted
profiled_graph_str = str(def_in_one_branch.graph_for(a, True))
FileCheck().check_count("prim::profile", 4).run(profiled_graph_str)
# this call is optimized for
# the given shape of (2, 3)
def_in_one_branch(a, False)
# change shape to (3)
# so we go down a bailout path
a = torch.ones(3)
# check prim::BailOuts are inserted
bailout_graph_str = str(def_in_one_branch.graph_for(a, True))
FileCheck().check_count("prim::BailOut", 3).run(bailout_graph_str)
# this triggers all 3 bailouts
self.assertEqual(def_in_one_branch(a, False), 6.0)
# this triggers 2 bailouts
self.assertEqual(def_in_one_branch(a, True), 3.0)
@unittest.skip("bailouts are being deprecated")
def test_maxpool_guard_elimination(self):
@torch.jit.script
def my_maxpool(x):
return F.max_pool1d(x, kernel_size=[1]) + torch.ones([32, 32, 32])
a = torch.rand(32, 32, 32)
with enable_profiling_mode_for_profiling_tests():
my_maxpool(a)
bailout_graph_str = str(my_maxpool.graph_for(a))
FileCheck().check_count("prim::BailOut", 1).run(bailout_graph_str)
@unittest.skip("bailouts are being deprecated")
def test_slice_guard_elimination(self):
@torch.jit.script
def my_slice(x):
return x[0:16:2] + x[0:16:2]
a = torch.rand(32, 4)
with enable_profiling_mode_for_profiling_tests():
my_slice(a)
bailout_graph_str = str(my_slice.graph_for(a))
FileCheck().check_count("prim::BailOut", 1).run(bailout_graph_str)
@unittest.skip("bailouts are being deprecated")
def test_unsqueeze_guard_elimination(self):
@torch.jit.script
def my_unsqueeze(x):
return torch.unsqueeze(x, 0) + torch.unsqueeze(x, 0)
a = torch.rand(32, 4)
with enable_profiling_mode_for_profiling_tests():
my_unsqueeze(a)
bailout_graph_str = str(my_unsqueeze.graph_for(a))
FileCheck().check_count("prim::BailOut", 2).run(bailout_graph_str)
def test_resize_input_ops(self):
# resize_ and resize_as resize the input tensor. because our shape analysis
# is flow invariant, we set any Tensor that can alias a resized Tensor
# to the base Tensor Type, without size information.
# testing that value which is an input of a graph gets handled
def out_op_graph_input():
@torch.jit.script
def test(x, y, z):
torch.mul(x, y, out=z)
return z
graph = _propagate_shapes(test.graph,
(torch.zeros(2, 1), torch.zeros(1, 2), torch.zeros(1, 1, 1)), False)
self.assertTrue(next(graph.outputs()).type() == TensorType.get())
out_op_graph_input()
def test_resize():
@torch.jit.script
def test(x):
after_resize_alias = torch.zeros([2])
for _ in range(5):
b = x + 1
f = [1]
before_resize_alias = b.sub_(1)
# for i in range(10):
f.append(1)
b.resize_(f)
after_resize_alias = b.add_(1)
return after_resize_alias
self.run_pass('constant_propagation', test.graph)
g = _propagate_shapes(test.graph, (torch.zeros(1, 1),), False)
resize_node = g.findNode("aten::resize_")
# first input and output of b.resize_ is b
self.assertTrue(next(resize_node.inputs()).type() == TensorType.get())
self.assertTrue(next(resize_node.outputs()).type() == TensorType.get())
# correctly propagates to b alias set
before_resize = g.findNode("aten::sub_")
self.assertTrue(next(before_resize.outputs()).type() == TensorType.get())
after_resize = g.findNode("aten::add_")
self.assertTrue(next(after_resize.outputs()).type() == TensorType.get())
test_resize()
def test_resize_as():
@torch.jit.script
def test(x):
b = torch.zeros([2, 2])
b.resize_as_(x)
return b
g = test.graph
self.run_pass('constant_propagation', g)
g = _propagate_shapes(test.graph, (torch.zeros(1, 1),), False)
# x doesn't alias a resized op so it shouldn't be set to base Tensor type
self.assertTrue(next(g.inputs()).type() != TensorType.get())
# return is resized
self.assertTrue(next(g.outputs()).type() == TensorType.get())
test_resize_as()
def test_uninitialized(self):
graph_str = """graph():
%1 : int = prim::Uninitialized()
%2 : int = prim::Constant[value=1]()
%3 : int = aten::add(%1, %2)
return (%3)
"""
g = parse_ir(graph_str)
m = self.createFunctionFromGraph(g)
self.getExportImportCopy(m)
with self.assertRaisesRegex(RuntimeError, "expected int"):
m()
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.SIMPLE, "Simple Executor doesn't use requires_grad information")
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.PROFILING, "Peeling is now disabled")
def test_requires_grad_loop(self):
@torch.jit.script
def test(x, y, z):
# type: (Tensor, Tensor, int) -> Tensor
for _ in range(z):
x = y
return x
# x requires grad, y does not
# testing that requires grad analysis correctly exits, with its input
# to the loop (x) requiring grad and its output to the loop not requiring grad
# and the output of the node conservatively setting grad to true
inps = (torch.tensor(1.0, requires_grad=True), torch.tensor(1), 10)
test(*inps, profile_and_replay=True)
graph = test.graph_for(*inps)
loop = graph.findNode("prim::Loop")
loop_body = next(loop.blocks())
loop_inputs = list(loop_body.inputs())
loop_outputs = list(loop_body.outputs())
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
# TODO: simplify this test as it's very sensitive
# the optimized graph will have 3 loops
# the original loop is peeled
# peeled loop also gets unrolled
index_of_x_in_peeled_unrolled_loop = -2
self.assertTrue(loop_inputs[index_of_x_in_peeled_unrolled_loop].requires_grad())
bailouts_in_outer_block = graph.findAllNodes("prim::BailOut", False)
last_bailout_index_on_loops_output = -1
self.assertFalse(bailouts_in_outer_block[last_bailout_index_on_loops_output].output().requires_grad())
else:
self.assertTrue(loop_inputs[1].requires_grad())
self.assertTrue(loop.output().requires_grad())
self.assertFalse(loop_outputs[1].requires_grad())
def test_view_shape_prop(self):
cu = torch.jit.CompilationUnit('''
def test_view_shape_prop(a):
return a.view(size=[-1])
''')
inputs = [torch.zeros(10, 10)]
outputs = torch.zeros(100)
real_outs = cu.test_view_shape_prop(*inputs)
self.assertEqual(real_outs, outputs)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_view_listconstruct_shape_prop(self):
def fn(x):
B = x.size(0)
C = x.size(1)
T = x.size(2)
return x.view(T, B, C)
x = torch.randn(3, 1, 5, requires_grad=True)
fn = torch.jit.script(fn)
graph = _propagate_shapes(fn.graph, (x,), False)
self.assertTrue(next(graph.outputs()).type().scalarType() == 'Float')
def test_shape_prop_promotion(self):
@torch.jit.script
def fn(x, y):
return x + y
x, y = torch.rand(3, 4, dtype=torch.float), torch.rand(3, 4, dtype=torch.double)
graph = _propagate_shapes(fn.graph, (x, y), False)
FileCheck().check('Double(*, *, device=cpu) = aten::add').run(graph)
def test_shape_prop_promote_scalar_arg(self):
@torch.jit.script
def fn(x):
return math.pi + x
x = torch.zeros(3, 4, dtype=torch.long)
graph = _propagate_shapes(fn.graph, (x,), False)
default = torch.get_default_dtype()
if default == torch.float:
FileCheck().check('Float(*, *, requires_grad=0, device=cpu) = aten::add').run(graph)
else:
FileCheck().check('Double(*, *, requires_grad=0, device=cpu) = aten::add').run(graph)
def test_integral_shape_inference(self):
cu = torch.jit.CompilationUnit('''
def test_integral_shape_inference(a):
return a * a
''')
inputs = [torch.ones(10, 10, dtype=torch.long)]
outputs = torch.ones(10, 10, dtype=torch.long)
self.assertEqual(cu.test_integral_shape_inference(*inputs), outputs)
@unittest.skipIf(RUN_CUDA, 'This tests the CPU fuser')
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser support for Sandcastle")
@enable_cpu_fuser
def test_batchnorm_fuser_cpu(self):
code = '''
graph(%3 : Tensor,
%7 : Tensor,
%12 : Float(*, *),
%13 : Tensor,
%25 : Tensor):
%23 : int = prim::Constant[value=1]()
%22 : float = prim::Constant[value=1e-05]()
%26 : Tensor = aten::sqrt(%25)
%24 : Tensor = aten::add(%26, %22, %23)
%20 : Tensor = aten::reciprocal(%24)
%norm_invstd : Tensor = aten::mul(%20, %23)
%15 : Tensor = aten::sub(%12, %13, %23)
%11 : Tensor = aten::mul(%15, %norm_invstd)
%8 : Tensor = aten::mul(%11, %7)
%5 : Tensor = aten::add(%8, %3, %23)
%1 : Float(*, *) = aten::relu(%5)
return (%1)
'''
graph = parse_ir(code)
inputs = 5 * [torch.rand(26, 2048, dtype=torch.float)]
code = torch._C._jit_fuser_get_fused_kernel_code(graph, inputs)
FileCheck().check('sqrtf').run(code)
@slowTest
@unittest.skipIf(RUN_CUDA, 'This tests the CPU fuser')
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser support for Sandcastle")
@enable_cpu_fuser
def test_fuser_double_float_codegen(self):
fns = ['log', 'log10', 'log1p', 'log2', 'lgamma', 'exp', 'expm1', 'erf',
'erfc', 'cos', 'acos', 'cosh', 'sin', 'asin', 'sinh', 'tan',
'atan', 'tanh', 'sqrt', 'ceil', 'floor', 'round', 'trunc',
'frac']
def lookup_c_equivalent_fn(aten_fn):
return aten_fn
def test_dispatch(op, expects, dtype, binary=False):
if dtype == torch.double:
dtype_str = 'Double'
elif dtype == torch.float:
dtype_str = 'Float'
else:
raise RuntimeError('Unknown dtype')
if binary:
code = f'''
graph(%3 : Tensor, %4 : Tensor):
%2 : {dtype_str}(*, *) = aten::{op}(%3, %4)
%1 : {dtype_str}(*, *) = aten::relu(%2)
return (%1)
'''
else:
code = f'''
graph(%3 : Tensor):
%2 : {dtype_str}(*, *) = aten::{op}(%3)
%1 : {dtype_str}(*, *) = aten::relu(%2)
return (%1)
'''
graph = parse_ir(code)
inputs = (2 if binary else 1) * [torch.rand(26, 2048, dtype=dtype)]
code = torch._C._jit_fuser_get_fused_kernel_code(graph, inputs)
FileCheck().check(expects).run(code)
for fn in fns:
test_dispatch(fn, lookup_c_equivalent_fn(fn) + '(', torch.double)
test_dispatch(fn, lookup_c_equivalent_fn(fn) + 'f(', torch.float)
# 'min', 'max' were previously tested but are now replaced with ternary expressions
# instead of fmin() and fmax()
binary_fns = ['pow']
for fn in binary_fns:
test_dispatch(fn, lookup_c_equivalent_fn(fn) + '(', torch.double, binary=True)
test_dispatch(fn, lookup_c_equivalent_fn(fn) + 'f(', torch.float, binary=True)
@unittest.skipIf(RUN_CUDA, 'This tests the CPU fuser')
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser support for Sandcastle")
@enable_cpu_fuser
def test_fuser_double_literal_precision(self):
code = '''
graph(%2 : Float(*, *)):
%4 : int = prim::Constant[value=1]()
%3 : float = prim::Constant[value=1.282549830161864]()
%5 : Float(*, *) = aten::add(%2, %3, %4)
%1 : Float(*, *) = aten::relu(%5)
return (%1)
'''
graph = parse_ir(code)
code = torch._C._jit_fuser_get_fused_kernel_code(graph, [torch.rand(3, 4)])
FileCheck().check('1.282549830161864').run(code)
def test_fuser_multiple_blocks(self):
cu = torch.jit.CompilationUnit('''
def test_fuser_multiple_blocks(this, that, theother, meme):
i = 0
while i < 20:
this = torch.cat([this, meme], dim=0)
that = torch.cat([that, meme], dim=0)
theother = torch.cat([theother, meme], dim=0)
i = i + 1
return this, that, theother
''')
inputs = [torch.ones(0, 10, 10)] * 3
inputs += [torch.ones(1, 10, 10)]
outputs = [torch.ones(20, 10, 10)] * 3
self.assertEqual(cu.test_fuser_multiple_blocks(*inputs), outputs)
@unittest.skip("RuntimeError: VariableType::ID() not implemented")
def test_cast(self):
script = '''
def to_int(x):
return int(x)
'''
x = Variable(torch.FloatTensor([1.1, 2.3]), requires_grad=True)
out = Variable(torch.IntTensor([1, 2]), requires_grad=True)
self.checkScript(script, [x], optimize=True, outputs=[out], func='to_int')
def test_str_cast(self):
@torch.jit.script
def to_str(x):
# type: (int) -> str
return str((x, x))
self.assertEqual("(1, 1)", to_str(1))
def test_int_cast(self):
@torch.jit.script
def to_int(x):
# type: (str) -> int
return int(x)
self.assertEqual(5, to_int('5'))
self.assertEqual(-5, to_int('-5'))
self.assertEqual(2147483647, to_int('2147483647'))
self.assertEqual(-2147483648, to_int('-2147483648'))
with self.assertRaisesRegex(RuntimeError, "invalid literal for int()"):
to_int('0x20')
with self.assertRaisesRegex(RuntimeError, "invalid literal for int()"):
to_int('0b0001')
def test_python_frontend(self):
def fn(x, y, z):
q = None
q = x + y - z.sigmoid()
print(q)
w = -z
if not x and not y and z:
m = x if not z else y
while x < y > z:
q = x
assert 1 == 1, "hello"
return x
ast = torch.jit.frontend.get_jit_def(fn, fn.__name__)
self.assertExpected(str(ast))
def test_python_frontend_source_range(self):
def fn():
raise Exception("hello") # noqa: TRY002
ast = torch.jit.frontend.get_jit_def(fn, fn.__name__)
FileCheck().check("SourceRange at:") \
.check("def fn():") \
.check("~~~~~~~~~") \
.check('raise Exception("hello")') \
.check('~~~~~~~~~~~~~~~~~ <--- HERE') \
.run(str(ast.range()))
def test_python_frontend_py3(self):
def fn():
raise Exception("hello") # noqa: TRY002
ast = torch.jit.frontend.get_jit_def(fn, fn.__name__)
self.assertExpected(str(ast))
def _make_scalar_vars(self, arr, dtype):
return [torch.tensor(val, dtype=dtype) for val in arr]
def test_string_print(self):
def func(a):
print(a, "a" 'b' '''c''' """d""", 2, 1.5)
return a
inputs = self._make_scalar_vars([1], torch.int64)
self.checkScript(func, inputs, capture_output=True)
def test_while(self):
def func(a, b, max):
while bool(a < max):
a = a + 1
b = b + 1
c = a + b
return c
inputs = self._make_scalar_vars([1, 1, 10], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_fibb(self):
def func(lim):
first = 1
second = 1
i = 1
somenum = 5
dontmutateme = 3
third = 0
while bool(i < lim):
third = first + second
first = second
second = third
j = 0
while j < 10:
somenum = somenum * 2
j = j + 1
i = i + j
i = i + dontmutateme
st = second + third
fs = first + second
return third, st, fs
inputs = self._make_scalar_vars([10], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_fibb_totally_better(self):
def fib(x):
# type: (int) -> int
prev = 1
v = 1
for _ in range(x):
save = v
v = v + prev
prev = save
return v
self.checkScript(fib, (10,))
def test_if(self):
def func(a, b):
# type: (int, int) -> int
d = 3
if bool(a > 10):
a = 3 + d
else:
b = 3 + d
d = 4
c = a + b
return c
inputs = self._make_scalar_vars([1, -1], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_if_for_in_range(self):
def func(a, b):
# type: (int, int) -> int
d = 3
for _ in range(20):
if bool(a > 10):
a = 3 + d
else:
b = 3 + d
d = 4
c = a + b
return d
inputs = self._make_scalar_vars([1, -1], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_if_noelse(self):
def func(a, b):
if bool(a > 10):
a = 3 + b
c = a + b
return c
inputs = self._make_scalar_vars([-1, 1], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_if_is_none_dispatch(self):
@torch.jit.script
def test_lhs_none_rhs_none():
# LHS, RHS both alwaysNone, dispatch always_none_branch
# only emit one prim::Constant
if None is None:
return 1
elif None is not None:
return 2
else:
return 3
self.assertTrue(str(test_lhs_none_rhs_none.graph).count(': int = prim::Constant') == 1)
@torch.jit.script
def test_lhs_opt_rhs_none(lhs=None):
# type: (Optional[Tensor]) -> int
# LHS maybeNone: emit normal if stmt that contains 3 constants
if lhs is not None:
return 2
elif lhs is None:
return 1
else:
return 3
self.assertTrue(str(test_lhs_opt_rhs_none.graph).count(': int = prim::Constant') == 3)
@torch.jit.script
def test_lhs_none_rhs_opt(rhs=None):
# type: (Optional[Tensor]) -> int
# RHS maybeNone, emit normal if stmt that contains 3 constants
if None is rhs:
return 1
elif None is not rhs:
return 2
else:
return 3
self.assertTrue(str(test_lhs_opt_rhs_none.graph).count(': int = prim::Constant') == 3)
@torch.jit.script
def test_lhs_never_rhs_none(lhs):
# LHS neverNone, RHS alwaysNone dispatch never_none_branch
# only emit one prim::Constant
if lhs is None:
return 1
elif lhs is not None:
return 2
else:
return 3
self.assertTrue(str(test_lhs_never_rhs_none.graph).count(': int = prim::Constant') == 1)
@torch.jit.script
def test_lhs_none_rhs_never(rhs):
# LHS alwaysNone, RHS neverNone dispatch never_none_branch
# only emit one prim::Constant
if None is rhs:
return 1
elif None is not rhs:
return 2
else:
return 3
self.assertTrue(str(test_lhs_none_rhs_never.graph).count(': int = prim::Constant') == 1)
@torch.jit.script
def test_bool_arith_and(lhs):
if lhs is None and lhs is not None:
return 1
else:
return 2
self.assertEqual(test_bool_arith_and(torch.zeros(3)), 2)
self.assertTrue(str(test_bool_arith_and.graph).count('if') == 0)
@torch.jit.script
def test_bool_arith_or(lhs):
if lhs is None or lhs is not None:
return 1
else:
return 2
self.assertEqual(test_bool_arith_or(torch.zeros(3)), 1)
self.assertTrue(str(test_bool_arith_or.graph).count('if') == 0)
@torch.jit.script
def test_bool_arith_not(lhs):
if lhs is not None:
return 1
else:
return 2
self.assertEqual(test_bool_arith_not(torch.zeros(3)), 1)
self.assertTrue(str(test_bool_arith_not.graph).count('if') == 0)
def test_conditional_casting(self):
def test_bool_cast_tensor(x):
if x:
return 1
else:
return 0
for make_one_dim in [True, False]:
for inp_val in [0.1, 0.0, -0.0, -0.1, -1, 0, 1]:
inp_val = [inp_val] if make_one_dim else inp_val
self.checkScript(test_bool_cast_tensor, (torch.tensor(inp_val),))
self.checkScriptRaisesRegex(test_bool_cast_tensor, (torch.tensor([1, 1]),), Exception,
"Boolean value of Tensor with more than one value")
def test_not_cast(x):
if not x:
return 1
else:
return 0
self.checkScript(test_not_cast, (torch.tensor(1),))
self.checkScript(test_not_cast, (torch.tensor(0),))
with self.assertRaisesRegex(RuntimeError, r"Could not cast value of type Tuple\[Tensor, Tensor\]"): # noqa: W605
@torch.jit.script
def test_mult(x, y):
return not (x, y)
def test_cast_int(x):
# type: (int) -> int
if x:
return 1
else:
return 0
self.checkScript(test_cast_int, (1,))
self.checkScript(test_cast_int, (0,))
self.checkScript(test_cast_int, (-1,))
def test_cast_float(x):
# type: (float) -> int
if x:
return 1
else:
return 0
self.checkScript(test_cast_float, (1.,))
self.checkScript(test_cast_float, (0.,))
self.checkScript(test_cast_float, (-1.,))
with self.assertRaisesRegex(RuntimeError, r"Could not cast value of type Tuple\[int, int\] to bool"): # noqa: W605
@torch.jit.script
def test_bad_conditional(x):
if (1, 2): # noqa: F634
return
else:
return 0
def test_while_nonexistent_value(self):
with self.assertRaisesRegex(RuntimeError, "undefined value x"):
torch.jit.CompilationUnit('''
def test_while(a, b):
while bool(a < 10):
a = a + x
b = b + 1
return a + b
''')
def test_while_nonexistent_cond_value(self):
with self.assertRaisesRegex(RuntimeError, "undefined value x"):
torch.jit.CompilationUnit('''
def test_while(a, b):
while a < x:
a = a + 1
b = b + 1
return a + b
''')
@torch.jit.script
def test_ternary(x):
# type: (Optional[int]) -> int
x = x if x is not None else 2
return x
@torch.jit.script
def test_not_none(x):
# type: (Optional[int]) -> None
if x is not None:
print(x + 1)
@torch.jit.script
def test_and(x, y):
# type: (Optional[int], Optional[int]) -> None
if x is not None and y is not None:
print(x + y)
@torch.jit.script
def test_not(x, y):
# type: (Optional[int], Optional[int]) -> None
if not (x is not None and y is not None):
pass
else:
print(x + y)
@torch.jit.script
def test_bool_expression(x):
# type: (Optional[int]) -> None
if x is not None and x < 2:
print(x + 1)
@torch.jit.script
def test_nested_bool_expression(x, y):
# type: (Optional[int], Optional[int]) -> int
if x is not None and x < 2 and y is not None:
x = x + y
else:
x = 5
return x + 2
@torch.jit.script
def test_or(x, y):
# type: (Optional[int], Optional[int]) -> None
if y is None or x is None:
pass
else:
print(x + y)
# backwards compatibility
@torch.jit.script
def test_manual_unwrap_opt(x):
# type: (Optional[int]) -> int
if x is None:
x = 1
else:
x = torch.jit._unwrap_optional(x)
return x # noqa: T484
with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"):
@torch.jit.script
def or_error(x, y):
# type: (Optional[int], Optional[int]) -> None
if x is None or y is None:
print(x + y) # noqa: T484
with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"):
@torch.jit.script
def and_error(x, y):
# type: (Optional[int], Optional[int]) -> None
if x is None and y is None:
pass
else:
print(x + y) # noqa: T484
with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"):
@torch.jit.script
def named_var(x):
# type: (Optional[int]) -> None
x_none = x is not None
if x_none:
print(x + 1) # noqa: T484
with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"):
@torch.jit.script
def named_var_and(x, y):
# type: (Optional[int], Optional[int]) -> None
x_none = x is not None
if y is not None and x_none:
print(x + y) # noqa: T484
def test_assertion_optional_refinement(self):
@torch.jit.script
def test(x, y):
# type: (Optional[int], Optional[int]) -> int
assert x is not None and y is not None
return x + y
self.assertEqual(test(2, 2), 4)
with self.assertRaisesRegex(Exception, ""):
test(1, None)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "the current version of Profiler doesn't profile/specialize Optionals")
def test_optional_tensor(self):
@torch.jit.script
def fn(x, y):
# type: (Optional[Tensor], int) -> int
if x is None:
return y
else:
return 0
res = fn(None, 1)
self.assertEqual(res, 1)
g = torch.jit.last_executed_optimized_graph()
first_input = next(g.inputs())
# check if input is disconnected
self.assertEqual(first_input.type().kind(), 'OptionalType')
self.assertEqual(first_input.uses(), [])
t = torch.ones(1)
res = fn(t, 1)
self.assertEqual(res, 0)
g = torch.jit.last_executed_optimized_graph()
self.assertEqual(next(g.inputs()).type().kind(), 'TensorType')
@torch.jit.script
def fn(x, y, b):
# type: (Optional[Tensor], Tensor, bool) -> Tensor
if b:
res = y
else:
res = torch.jit._unwrap_optional(x)
return res
t2 = torch.zeros(1)
res = fn(t, t2, True)
self.assertEqual(res, t2)
with self.assertRaisesRegex(RuntimeError, "Unwrapping null optional"):
res = fn(None, t2, False)
res = fn(None, t2, True)
g = torch.jit.last_executed_optimized_graph()
self.assertIn(next(g.outputs()).type().str(), ("Tensor", "Tensor(requires_grad=1)"))
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "the current version of Profiler doesn't profile/specialize Optionals")
def test_optional_list(self):
@torch.jit.script
def fn(x, y):
# type: (Optional[List[int]], int) -> int
if x is None:
return y
else:
res = 0
for d in x:
res += d
return res
res = fn(None, 1)
self.assertEqual(res, 1)
g = torch.jit.last_executed_optimized_graph()
first_input = next(g.inputs())
# check if input is disconnected
self.assertEqual(first_input.type().kind(), 'OptionalType')
self.assertEqual(first_input.uses(), [])
l = [2, 3]
res = fn(l, 1)
self.assertEqual(res, 5)
g = torch.jit.last_executed_optimized_graph()
self.assertEqual(next(g.inputs()).type().kind(), 'ListType')
@torch.jit.script
def fn(x, y, b):
# type: (Optional[List[int]], List[int], bool) -> List[int]
if b:
l = torch.jit._unwrap_optional(x)
else:
l = y
return l
l2 = [0, 1]
res = fn(l, l2, True)
self.assertEqual(res, l)
with self.assertRaisesRegex(RuntimeError, "Unwrapping null optional"):
res = fn(None, l2, True)
res = fn(None, l2, False)
g = torch.jit.last_executed_optimized_graph()
self.assertEqual(next(g.outputs()).type().str(), "int[]")
def test_alias_covariant_type_containers(self):
@torch.jit.script
def foo(x):
# type: (bool)
if x:
a = (None,)
else:
a = ([],)
return a
@torch.jit.script
def foo2(x, li):
# type: (bool, Tuple[Optional[List[Tensor]]])
if x:
li = (None,)
return li
def test_while_write_outer_then_read(self):
def func(a, b):
while bool(a < 10):
a = a + 1
b = a + 1
return a + b
inputs = self._make_scalar_vars([42, 1337], torch.int64)
self.checkScript(func, inputs, optimize=True)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_while_nest_if(self):
def func(a, b):
# type: (int, int) -> int
c = 0
while a < 10:
a = a + 1
b = b + 1
if a > b:
c = -a
else:
c = -b
return c + 1
inputs = self._make_scalar_vars([-1234, 4321], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_divmod(self):
def func_int(a, b):
# type: (int, int) -> Tuple[int, int]
return divmod(a, b)
def func_float(a, b):
# type: (float, float) -> Tuple[float, float]
return divmod(a, b)
def func_int_float(a, b):
# type: (int, float) -> Tuple[float, float]
return divmod(a, b)
def func_float_int(a, b):
# type: (float, int) -> Tuple[float, float]
return divmod(a, b)
def divmod_test_iterator(func, num, den):
for i in num:
for j in den:
self.checkScript(func, (i, j), frames_up=2)
num_int = [1024, -1024]
den_int = [10, -10]
num_float = [5.3, -5.3]
den_float = [2.0, -2.0]
divmod_test_iterator(func_int, num_int, den_int)
divmod_test_iterator(func_float, num_float, den_float)
divmod_test_iterator(func_int_float, num_int, den_float)
divmod_test_iterator(func_float_int, num_float, den_int)
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError: integer division or modulo by zero"):
cu = torch.jit.CompilationUnit(dedent(inspect.getsource(func_int)))
cu.func_int(1024, 0)
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError: float divmod()"):
cu = torch.jit.CompilationUnit(dedent(inspect.getsource(func_float)))
cu.func_float(5.3, 0.0)
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError: float divmod()"):
cu = torch.jit.CompilationUnit(dedent(inspect.getsource(func_int_float)))
cu.func_int_float(1024, 0.0)
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError: float divmod()"):
cu = torch.jit.CompilationUnit(dedent(inspect.getsource(func_float_int)))
cu.func_float_int(5.3, 0)
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
def test_math_ops(self):
def checkMathWrap(func_name, num_args=1, is_float=True, **args):
if is_float:
checkMath(func_name, num_args, True, **args)
checkMath(func_name, num_args, False, **args)
else:
checkMath(func_name, num_args, is_float, **args)
inf = float("inf")
NaN = float("nan")
mx_int = 2**31 - 1
mn_int = -2**31
float_vals = ([inf, NaN, 0.0, 1.0, 2.2, -1.0, -0.0, -2.2, -inf, 1, 0, 2] +
[10.0 ** i for i in range(5)] + [-(10.0 ** i) for i in range(5)])
int_vals = list(range(-5, 5, 1)) + [mx_int + 5, mx_int * 2, mn_int - 5, mn_int * 2]
def checkMath(func_name, num_args, is_float=True, ret_type="float", debug=False, vals=None, args_type=None):
funcs_template = dedent('''
def func(a, b):
# type: {args_type} -> {ret_type}
return math.{func}({args})
''')
if num_args == 1:
args = "a"
elif num_args == 2:
args = "a, b"
else:
raise RuntimeError("Test doesn't support more than 2 arguments")
if args_type is None:
args_type = "(float, float)" if is_float else "(int, int)"
funcs_str = funcs_template.format(func=func_name, args=args, args_type=args_type, ret_type=ret_type)
scope = {}
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
f_script = cu.func
f = scope['func']
if vals is None:
vals = float_vals if is_float else int_vals
vals = [(i, j) for i in vals for j in vals]
for a, b in vals:
res_python = None
res_script = None
try:
res_python = f(a, b)
except Exception as e:
res_python = e
try:
res_script = f_script(a, b)
except Exception as e:
res_script = e
if debug:
print("in: ", a, b)
print("out: ", res_python, res_script)
# We can't use assertEqual because of a couple of differences:
# 1. nan == nan should return true
# 2. When python functions throw an exception, we usually want to silently ignore them.
# (ie: We want to return `nan` for math.sqrt(-5))
if res_python != res_script:
if isinstance(res_python, Exception):
continue
if type(res_python) is type(res_script):
if isinstance(res_python, tuple) and (math.isnan(res_python[0]) == math.isnan(res_script[0])):
continue
if isinstance(res_python, float) and math.isnan(res_python) and math.isnan(res_script):
continue
msg = (f"Failed on {func_name} with inputs {a} {b}. Python: {res_python}, Script: {res_script}")
# math.pow() behavior has changed in 3.11, see https://docs.python.org/3/library/math.html#math.pow
if sys.version_info >= (3, 11) and func_name == "pow" and a == 0.0 and b == -math.inf:
self.assertTrue(res_python == math.inf and type(res_script) is RuntimeError)
else:
self.assertEqual(res_python, res_script, msg=msg, atol=(1e-4) * max(abs(res_python), res_script), rtol=0)
unary_float_ops = ["log", "log1p", "log10", "exp", "sqrt", "gamma", "lgamma", "erf",
"erfc", "expm1", "fabs", "acos", "asin", "atan", "cos", "sin", "tan",
"asinh", "atanh", "acosh", "sinh", "cosh", "tanh", "degrees", "radians"]
binary_float_ops = ["atan2", "fmod", "copysign"]
for op in unary_float_ops:
checkMathWrap(op, 1)
for op in binary_float_ops:
checkMathWrap(op, 2)
checkMath("modf", 1, ret_type="Tuple[float, float]")
checkMath("frexp", 1, ret_type="Tuple[float, int]")
checkMath("isnan", 1, ret_type="bool")
checkMath("isinf", 1, ret_type="bool")
checkMath("ldexp", 2, is_float=False, ret_type="float", args_type="(float, int)",
vals=[(i, j) for i in float_vals for j in range(-10, 10)])
checkMath("pow", 2, is_float=False, ret_type="float")
checkMath("pow", 2, is_float=True, ret_type="float")
checkMathWrap("floor", ret_type="int")
checkMathWrap("ceil", ret_type="int")
checkMathWrap("gcd", 2, is_float=False, ret_type="int")
checkMath("isfinite", 1, ret_type="bool")
checkMathWrap("remainder", 2)
checkMathWrap("factorial", 1, is_float=False, ret_type="int", vals=[(i, 0) for i in range(-2, 10)])
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_if_nest_while(self):
def func(a, b):
# type: (int, int) -> int
c = 0
if a > b:
while a > b:
b = b + 1
c = -b
return c
inputs = self._make_scalar_vars([4321, 1234], torch.int64)
self.checkScript(func, inputs)
def test_script_optional_none(self):
def none_stmt(x):
output = None
output = x
return output
def none_args(x):
# type: (Optional[Tensor]) -> Optional[Tensor]
return None
self.checkScript(none_stmt, [torch.arange(0, 2)], optimize=True)
self.checkScript(none_args, [None], optimize=True)
# test undefined tensor None as default param
def test_script_optional_tensor_none(x=None):
# type: (Optional[Tensor]) -> Tensor
res = torch.zeros(1, dtype=torch.int8)
if x is None:
res = res + 1
else:
res = x
return res
fn = test_script_optional_tensor_none
scripted_fn = torch.jit.script(fn)
self.assertEqual(fn(), scripted_fn())
self.assertEqual(fn(torch.zeros(1)), scripted_fn(torch.zeros(1)))
# test typical None as default param
def test_script_optional_other_none(x=None):
# type: (Optional[float]) -> float
res = 2.0
if x is None:
res = res + 1.0
else:
res = x
return res
fn = test_script_optional_other_none
scripted_fn = torch.jit.script(fn)
self.assertEqual(fn(), scripted_fn())
self.assertEqual(fn(1.0), scripted_fn(1.0))
def test_script_clamp_none(self):
def test_script_clamp_max_none(x):
return torch.clamp(x, min=2, max=None)
def test_script_clamp_max(x):
return torch.clamp(x, max=2)
def test_script_clamp_min_none(x):
return torch.clamp(x, min=None, max=2)
def test_script_clamp_min(x):
return torch.clamp(x, min=2)
input = [torch.arange(0, 3)]
self.checkScript(test_script_clamp_max_none, input, optimize=True)
self.checkScript(test_script_clamp_max, input, optimize=True)
self.checkScript(test_script_clamp_min_none, input, optimize=True)
self.checkScript(test_script_clamp_min, input, optimize=True)
def test_script_bool_constant(self):
def test_script_bool_constant():
a = True
return a
self.checkScript(test_script_bool_constant, [])
def test_ternary(self):
def func(a, b):
c = 3
c = a + b if bool(a > 3) else b
return c
inputs_true = self._make_scalar_vars([5, 2], torch.int64)
inputs_false = self._make_scalar_vars([1, 0], torch.int64)
self.checkScript(func, inputs_true, optimize=True)
self.checkScript(func, inputs_false, optimize=True)
def test_ternary_module_type_hint(self):
class M1(torch.nn.Module):
def forward(self) -> Any:
return 'out' if self.training else {}
class M2(torch.nn.Module):
def forward(self) -> Any:
out: Any = 'out' if self.training else {}
return out
class M3(torch.nn.Module):
def forward(self) -> Optional[int]:
return None if self.training else 1
for module in [M1, M2, M3]:
self.checkModule(module().train(), ())
self.checkModule(module().eval(), ())
def test_ternary_static_if(self):
# Test for True branch when condition variable
# is annotated as Final
class M1(torch.nn.Module):
flag: torch.jit.Final[bool]
def __init__(self) -> None:
super().__init__()
self.flag = True
def forward(self) -> torch.Tensor:
return torch.ones(3) if self.flag else {}
# Test for True branch when condition variable
# is annotated as Final
class M2(torch.nn.Module):
flag: torch.jit.Final[bool]
def __init__(self) -> None:
super().__init__()
self.flag = False
def forward(self) -> torch.Tensor:
return {} if self.flag else torch.ones(3)
model1 = M1()
model2 = M2()
script_model_1 = torch.jit.script(model1)
script_model_2 = torch.jit.script(model2)
self.assertEqual(model1.forward(), script_model_1.forward())
self.assertEqual(model2.forward(), script_model_2.forward())
def test_ternary_right_associative(self):
def plus_123(x: int):
return x + 1 if x == 1 else x + 2 if x == 2 else x + 3
self.checkScript(plus_123, (1,))
self.checkScript(plus_123, (2,))
self.checkScript(plus_123, (3,))
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_print(self):
def func(x, y):
q = (x + y).sigmoid()
print(q, 1, 2, [1, 2], [1.0, 2.0])
w = -q
return w * w
x = torch.arange(4., requires_grad=True)
y = torch.arange(0., 8, 2, requires_grad=True)
self.checkScript(func, [x, y], optimize=True, capture_output=True)
def test_format(self):
def func(x):
print("{}, I'm a {}".format("Hello", "test"))
print("format blank".format())
print("stuff before {}".format("hi"))
print("{} stuff after".format("hi"))
return x + 1
x = torch.arange(4., requires_grad=True)
self.checkScript(func, [x], optimize=True, capture_output=True)
def test_logical_short_circuit(self):
@torch.jit.script
def testNoThrows(t):
c1 = 1
if (False and bool(t[1])) or (True or bool(t[1])): # noqa: SIM222,SIM223
c1 = 0
return c1
FileCheck().check_not("prim::If").run(testNoThrows.graph)
self.assertEqual(0, testNoThrows(torch.randn(0)))
self.assertEqual(0, testNoThrows(torch.randn([2, 3])))
@torch.jit.script
def throwsOr(t):
c0 = False or bool(t[1])
print(c0)
@torch.jit.script
def throwsAnd(t):
c0 = True and bool(t[1])
print(c0)
t = torch.randn(0)
with self.assertRaisesRegex(RuntimeError, "index 1 out of range for tensor of size"):
throwsOr(t)
with self.assertRaisesRegex(RuntimeError, "index 1 out of range for tensor of size"):
throwsAnd(t)
def test_type_cast(self):
template = dedent('''
def func(v):
# type: ({from_type}) -> {to_type}
return {to_type}(v)
''')
def check_cast(from_type, to_type, value, raises=False):
code = template.format(from_type=from_type, to_type=to_type)
self.checkScript(code, (value,))
check_cast('int', 'float', 1)
check_cast('int', 'bool', 1)
check_cast('int', 'bool', 0)
check_cast('float', 'int', 1.)
check_cast('float', 'bool', 1.)
check_cast('float', 'bool', 0.)
check_cast('bool', 'int', True)
check_cast('bool', 'float', True)
def test_multiple_assignment(self):
def outer_func(x):
return x * 2, x + 2
@torch.jit.script
def func(x):
y, z = outer_func(x)
return y + z
x = torch.arange(4)
self.assertEqual(func(x), x * 2 + x + 2)
def test_literals(self):
def func(a):
return a.view(size=[1, 2, 3])
a = torch.randn(6)
self.checkScript(func, [a], optimize=True)
def test_return(self):
def no_return(a):
a + 1
def void_return(a):
return
def one_return(a):
return a + 1.
def multiple_returns(a):
return a * 1., a * 2., a * 3.
a = torch.randn(1, dtype=torch.float)
self.checkScript(no_return, [a], optimize=True)
self.checkScript(void_return, [a], optimize=True)
self.checkScript(one_return, [a], optimize=True)
self.checkScript(multiple_returns, [a], optimize=True)
with self.assertRaisesRegex(RuntimeError, "does not return along all paths"):
torch.jit.CompilationUnit('''
def no_return_bad_annotation(a):
# type: (Tensor) -> Tensor
a + 1
''')
def test_error(self):
@torch.jit.script
def foo(a):
return a.t()
s = Variable(torch.rand(5, 5, 5))
# XXX: this should stay quiet in stay propagation and only fail in the interpreter
with self.assertRaisesRegex(RuntimeError, "failed in the TorchScript interpreter"):
foo(s)
@torch.jit.script
def bar(c, b):
return c + b
with self.assertRaisesRegex(RuntimeError, "failed in the TorchScript interpreter"):
bar(Variable(torch.rand(10), requires_grad=True), Variable(torch.rand(9), requires_grad=True))
def test_error_stacktrace(self):
@torch.jit.script
def baz(c, b):
return c + b
@torch.jit.script
def foo(c, b):
return baz(c, b)
@torch.jit.script
def bar(c, b):
return foo(c, b)
with self.assertRaises(RuntimeError) as cm:
bar(torch.rand(10), torch.rand(9))
FileCheck().check("The following operation failed in the TorchScript interpreter") \
.check("Traceback") \
.check("in foo").check("in baz").run(str(cm.exception))
def test_error_stacktrace_interface(self):
@torch.jit.script
def baz(c, b):
return c + b
@torch.jit.script
def foo(c, b):
return baz(c, b)
@torch.jit.script
def bar(c, b):
return foo(c, b)
@torch.jit.script
class Bar:
def one(self, x, y):
return bar(x, y)
@torch.jit.interface
class IFace:
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
pass
make_global(IFace)
@torch.jit.script
def as_interface(x):
# type: (IFace) -> IFace
return x
f = as_interface(Bar())
with self.assertRaises(RuntimeError) as cm:
x = f.one(torch.rand(10), torch.rand(9))
bar(torch.rand(10), torch.rand(9))
FileCheck().check("The following operation failed in the TorchScript interpreter") \
.check("Traceback") \
.check("in foo").check("in baz").run(str(cm.exception))
def test_operator_precedence(self):
def double(x):
# type: (int) -> int
return 2 * x
def complicated_arithmetic_operation():
# TODO we need to test exponent operator '**' and bitwise not
# operator '~' once they are properly supported.
list = [0, 1, 2, 3]
result = list[1:3][0] + double(4) + (-3 + 8) * 6 // 2 % 4 << 2 + 1 >> 1 | 23 & 16 + 3 ^ 4
return result
self.checkScript(complicated_arithmetic_operation, ())
def test_in_operator_with_two_strings(self):
def fn() -> bool:
return "a" in "abcd"
self.checkScript(fn, ())
def test_bitwise_ops(self):
def int_test():
return 2 & 3, 2 ^ 3, 2 | 3, 2 << 3, 2 >> 3
self.checkScript(int_test, ())
def bool_test(x, y):
# type: (bool, bool) -> Tuple[bool, bool, bool]
return x & y, x ^ y, x | y
self.checkScript(bool_test, (True, False))
self.checkScript(bool_test, (True, True))
def tensor_test(x, y):
return x & y, x ^ y, x | y
def tensor_with_int_test(x, y):
# type: (Tensor, int) -> Tuple[Tensor, Tensor]
return x << y, x >> y
x = torch.tensor(2)
y = torch.tensor(3)
self.checkScript(tensor_test, (x, y))
self.checkScript(tensor_with_int_test, (x, 2))
def not_test(x):
return ~x
self.checkScript(not_test, (torch.tensor([2, 4]), ))
def test_all(self):
@torch.jit.script
def test_all_tensor(x):
return all(x)
self.assertFalse(test_all_tensor(torch.tensor([1, 0, 3], dtype=torch.uint8)))
self.assertTrue(test_all_tensor(torch.tensor([3.14, 3, 99], dtype=torch.uint8)))
self.assertTrue(test_all_tensor(torch.tensor([True, True], dtype=torch.uint8)))
self.assertFalse(test_all_tensor(torch.tensor([True, False], dtype=torch.uint8)))
@torch.jit.script
def test_all_bool_list(x):
# type: (List[bool]) -> bool
return all(x)
self.assertTrue(test_all_bool_list([True, True]))
self.assertTrue(test_all_bool_list([True, 1]))
self.assertFalse(test_all_bool_list([True, False]))
self.assertFalse(test_all_bool_list([True, 0]))
self.assertFalse(test_all_bool_list([False, 0]))
self.assertTrue(test_all_bool_list([]))
@torch.jit.script
def test_all_int_list(x):
# type: (List[int]) -> bool
return all(x)
self.assertTrue(test_all_int_list([3, 6]))
self.assertFalse(test_all_int_list([2, 0]))
@torch.jit.script
def test_all_float_list(x):
# type: (List[float]) -> bool
return all(x)
self.assertTrue(test_all_float_list([3.14, 8.1]))
self.assertFalse(test_all_float_list([3.14, 0, 8.9]))
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
def test_number_math(self):
ops_template = dedent('''
def func():
return {scalar1} {op} {scalar2}
''')
ops = ['+', '-', '*', '%', '<', '<=', '>', '>=', '==', '!=', '//']
funcs_template = dedent('''
def func():
return {func}({scalar1}, {scalar2})
''')
funcs = ['min', 'max']
scalars = ['7', '2', '3', '-3', '3.14', '0.125', '-0.5', '2.0', '-2.0']
scalar_pairs = [(scalar1, scalar2) for scalar1 in scalars for scalar2 in scalars]
def run_test(code):
scope = {}
execWrapper(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
self.assertEqual(cu.func(), scope['func']())
for scalar1, scalar2 in scalar_pairs:
for op in ops:
code = ops_template.format(op=op, scalar1=scalar1, scalar2=scalar2)
run_test(code)
for func in funcs:
code = funcs_template.format(func=func, scalar1=scalar1, scalar2=scalar2)
run_test(code)
# test Scalar overloads
for scalar1, scalar2 in scalar_pairs:
item1 = 'torch.tensor(' + scalar1 + ').item()'
item2 = 'torch.tensor(' + scalar2 + ').item()'
for op in ops:
code = ops_template.format(op=op, scalar1=item1, scalar2=scalar2)
run_test(code)
code = ops_template.format(op=op, scalar1=scalar1, scalar2=item2)
run_test(code)
code = ops_template.format(op=op, scalar1=item1, scalar2=item2)
run_test(code)
for func in funcs:
code = funcs_template.format(func=func, scalar1=item1, scalar2=scalar2)
run_test(code)
code = funcs_template.format(func=func, scalar1=scalar1, scalar2=item2)
run_test(code)
code = funcs_template.format(func=func, scalar1=item1, scalar2=item2)
run_test(code)
def test_number_abs(self):
def func1(x):
# type: (float) -> float
return abs(x)
def func2(x):
# type: (int) -> int
return abs(x)
def func3(x):
return abs(x)
self.checkScript(func1, (-3.14,))
self.checkScript(func1, (3.14,))
self.checkScript(func2, (-10,))
self.checkScript(func2, (10,))
self.checkScript(func3, (torch.tensor([-5, -10, -20]),))
self.checkScript(func3, (torch.tensor([5, 10, 20]),))
self.checkScript(func3, (torch.tensor([-5, 10, -20]),))
def test_number_div(self):
self.assertEqual(div_int_future(), torch.jit.script(div_int_future)())
self.checkScript(div_float_future, ())
self.checkScript(div_int_nofuture, ())
self.checkScript(div_float_nofuture, ())
# Testing bitwise shorthand aug assignment
def test_bool_augassign_bitwise_or(self):
def func(a: bool, b: bool) -> bool:
a |= b
return a
self.checkScript(func, (True, False), optimize=True)
self.checkScript(func, (True, True), optimize=True)
self.checkScript(func, (False, False), optimize=True)
self.checkScript(func, (False, True), optimize=True)
def test_bool_augassign_bitwise_and(self):
def func(a: bool, b: bool) -> bool:
a &= b
return a
self.checkScript(func, (True, False), optimize=True)
self.checkScript(func, (True, True), optimize=True)
self.checkScript(func, (False, False), optimize=True)
self.checkScript(func, (False, True), optimize=True)
def test_bool_augassign_bitwise_xor(self):
def func(a: bool, b: bool) -> bool:
a ^= b
return a
self.checkScript(func, (True, False), optimize=True)
self.checkScript(func, (True, True), optimize=True)
self.checkScript(func, (False, False), optimize=True)
self.checkScript(func, (False, True), optimize=True)
def test_number_augassign_bitwise_lshift(self):
def func() -> int:
z = 8
z <<= 2
return z
self.checkScript(func, (), optimize=True)
def test_number_augassign_bitwise_rshift(self):
def func() -> int:
z = 8
z >>= 2
return z
self.checkScript(func, (), optimize=True)
def test_number_augassign_bitwise_pow(self):
def func() -> float:
z = 8
z **= 2
return z
self.checkScript(func, (), optimize=True)
def test_number_augassign(self):
def func():
z = 1
z += 2
return z
self.checkScript(func, (), optimize=True)
def test_nested_select_assign(self):
class SubSubModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.abc = 11
def forward(self, x):
return self.abc
class SubModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 11
self.nested = SubSubModule()
def forward(self, x):
return self.a
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = SubModule()
self.hi = 1
def forward(self):
self.hi = 5
self.sub.a = 1
self.sub.nested.abc = 5
return self.sub.a * 20 + self.sub.nested.abc * 3 + self.hi
self.checkModule(TestModule(), ())
def test_number_neg(self):
# int -> int
def func1():
return -8
# float -> float
def func2():
return -3.14
self.checkScript(func1, (), optimize=True)
self.checkScript(func2, (), optimize=True)
def test_compare_two_bool_inputs(self):
def compare_eq(a: bool, b: bool):
return a == b
def compare_ne(a: bool, b: bool):
return a != b
scripted_fn_eq = torch.jit.script(compare_eq)
scripted_fn_ne = torch.jit.script(compare_ne)
self.assertEqual(scripted_fn_eq(True, False), compare_eq(True, False))
self.assertEqual(scripted_fn_eq(False, True), compare_eq(False, True))
self.assertEqual(scripted_fn_eq(True, True), compare_eq(True, True))
self.assertEqual(scripted_fn_eq(False, False), compare_eq(False, False))
self.assertEqual(scripted_fn_ne(True, False), compare_ne(True, False))
self.assertEqual(scripted_fn_ne(False, True), compare_ne(False, True))
self.assertEqual(scripted_fn_ne(True, True), compare_ne(True, True))
self.assertEqual(scripted_fn_ne(False, False), compare_ne(False, False))
def _test_tensor_number_math(self, device='cpu'):
template = dedent('''
def func(t):
return {lhs} {op} {rhs}
''')
def test(op, tensor, const, swap_args, template=template):
args = ('t', const)
if swap_args:
args = (const, 't')
code = template.format(lhs=args[0], rhs=args[1], op=op)
scope = {}
execWrapper(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
message = f'with code `{args[0]} {op} {args[1]}` and t={tensor}'
res1 = cu.func(tensor)
res2 = scope['func'](tensor)
self.assertEqual(res1, res2, msg=message + "\nres1=" + str(res1) + "\nres2=" + str(res2))
self.assertEqual(res1.dtype, res2.dtype, msg=message + "\nres1=" + str(res1) + "\nres2=" + str(res2))
var_int = [2, -2]
var_float = [1.4321, -1.2]
ops = ['+', '-', '*', '%', '<', '<=', '>', '>=', '==', '!=', '/']
float_tensor = torch.randn(5, 5, device=device)
double_tensor = torch.randn(5, 5, dtype=torch.double, device=device)
long_tensor = torch.randint(-5, 5, (5, 5), dtype=torch.long, device=device)
long_tensor[long_tensor == 0] = 2
tensors = [float_tensor, double_tensor, long_tensor]
consts = var_int + var_float
for op, tensor, const, swap_args in product(ops, tensors, consts, [True, False]):
# FIXME: things like 2 / long_tensor are not implemented correctly
# Look in torch/_tensor.py to see how pytorch implements it.
if op == '/' and tensor.data_ptr() == long_tensor.data_ptr():
continue
# % operator does not take: const % tensor
if op == '%' and swap_args is True:
continue
test(op, tensor, const, swap_args)
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
def test_tensor_number_math(self):
self._test_tensor_number_math()
def test_torch_tensor_bad_input(self):
with self.assertRaisesRegex(RuntimeError, "must be of ints, floats, "
"or bools, got None"):
@torch.jit.script
def test():
return torch.tensor([None])
test()
with self.assertRaisesRegex(RuntimeError, r"Empty lists default to List\[Tensor\]"):
@torch.jit.script
def tmp():
return torch.tensor([])
tmp()
@torch.jit.script
def foo():
return torch.tensor([[2, 2], [1]])
with self.assertRaisesRegex(RuntimeError, "Expected sequence of length"):
foo()
@suppress_warnings
def test_torch_tensor_as_tensor_empty_list(self):
tensor_template = dedent('''
def func():
empty_list = torch.jit.annotate(List[int], [])
ten1 = torch.{tensor_op}({input})
return ten1
''')
ops = ['tensor', 'as_tensor']
inputs = ['empty_list', '[empty_list, empty_list]', '[[[empty_list]]]']
for op in ops:
for inp in inputs:
code = tensor_template.format(tensor_op=op, input=inp)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
t1 = cu.func()
t2 = scope['func']()
if inp == 'empty_list':
# torchscript returns int tensor, python returns float tensor
self.assertNotEqual(t1.dtype, t2.dtype)
self.assertEqual(t1, t2, exact_dtype=False)
self.assertEqual(t1.device, t2.device)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "Simple Executor doesn't have any shapes to propagate")
def test_tensor_as_tensor_shape_prop(self):
tensor_template = dedent('''
def func():
return torch.{tensor_op}({input})
''')
ops = ['tensor', 'as_tensor']
inputs = ['[1]', '[False]', '[2.5]', '0.5', '1', 'False', '[[1]]', 'torch.jit.annotate(List[List[int]], [])']
expected_shape = ["Long(*, device=cpu)", "Bool(*, device=cpu)",
"Float(*, device=cpu)", "Float(device=cpu)",
"Long(device=cpu)", "Bool(device=cpu)", "Long(*, *, device=cpu)"]
for op in ops:
for inp, expect in zip(inputs, expected_shape):
code = tensor_template.format(tensor_op=op, input=inp)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
torch._C._jit_pass_complete_shape_analysis(cu.func.graph, (), False)
FileCheck().check(expect).check(f"aten::{op}").run(cu.func.graph)
@torch.jit.script
def test_dtype(inp_dtype: torch.dtype):
a = torch.tensor(1.0, dtype=torch.float, requires_grad=True)
return a, torch.tensor(1.0, dtype=inp_dtype)
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
g = test_dtype.graph_for(5, profile_and_replay=True)
# both should have completed shapes
FileCheck().check("Tensor = aten::tensor").check("Float(device=cpu) = prim::BailOut") \
.check("Tensor = aten::tensor").check("Half(device=cpu) = prim::BailOut").run(g)
else:
g = test_dtype.graph_for(5)
# first should have type set second should not
FileCheck().check("Float(requires_grad=1, device=cpu) = aten::tensor") \
.check("Tensor(requires_grad=0) = aten::tensor").run(g)
@torch.jit.script
def test_as_tensor_tensor_input(input):
a = torch.as_tensor(input, dtype=input.dtype)
return a, torch.as_tensor(input, dtype=torch.float)
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
g = test_as_tensor_tensor_input.graph_for(torch.ones(3, 4), profile_and_replay=True)
FileCheck().check("Tensor = aten::as_tensor").check("Float(3, 4) = prim::BailOut") \
.check("Tensor = aten::as_tensor").check("Float(3, 4) = prim::BailOut").run(g)
else:
g = test_as_tensor_tensor_input.graph_for(torch.ones(3, 4))
FileCheck().check("Tensor = aten::as_tensor").check("Float(*, *, requires_grad=0, device=cpu) = aten::as_tensor").run(g)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "testing legacy behavior")
def test_tensor_requires_grad(self):
@torch.jit.script
def test(b):
# type: (bool) -> Tuple[Tensor, Tensor, Tensor]
a = torch.tensor(1., requires_grad=b)
b = torch.tensor(1., requires_grad=True)
c = torch.tensor(1., requires_grad=False)
return a, b, c
g = test.graph_for(True)
out = next(g.outputs())
out_inp = list(out.node().inputs())
self.assertTrue(out_inp[0].requires_grad())
self.assertTrue(out_inp[1].requires_grad())
self.assertFalse(out_inp[2].requires_grad())
def test_grad_from_script(self):
def test():
a = torch.tensor(2.5, requires_grad=True)
b = a * 2
return a, b
a, b = test()
b.backward()
a_script, b_script = torch.jit.script(test)()
b_script.backward()
self.assertEqual(a.grad, a_script.grad)
def test_torch_tensor_as_tensor(self):
tensor_template = dedent('''
def func():
li = {list_create}
ten1 = torch.{tensor_op}(li {options})
return ten1
''')
lists = ["2.5", "4", "True", "False", "[2]", "[-.5]", "[False, True, False]", "[2, 2]", "(1, 1)",
"torch.jit.annotate(List[List[int]], [])",
"torch.jit.annotate(List[int], [])", "[2.5, 2.5]", "[[2], [2]]", "[[-.5], [2.2]]", "[[False], [True]]"]
dtypes = ["", ", dtype=torch.float", ", dtype=torch.double", ", dtype=torch.half",
", dtype=torch.uint8", ", dtype=torch.int8", ", dtype=torch.short",
", dtype=torch.int", ", dtype=torch.long", ", dtype=torch.cfloat",
", dtype=torch.cdouble"]
ops = ['tensor', 'as_tensor']
devices = ['', ", device='cpu'"]
if RUN_CUDA:
devices.append(", device='cuda'")
option_pairs = [dtype + device for dtype in dtypes for device in devices]
for op in ops:
for li in lists:
for option in option_pairs:
# tensor from empty list is type float in python and annotated type in torchscript
if "annotate" in li and "dtype" not in option:
continue
# Skip unsigned tensor initialization for signed values on 3.10
if "torch.uint8" in option and "-" in li:
continue
code = tensor_template.format(list_create=li, tensor_op=op, options=option)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
t1 = cu.func()
t2 = scope['func']()
if t1.dtype == torch.float16: # equality NYI for half tensor
self.assertTrue(str(t1) == str(t2))
else:
self.assertEqual(t1, t2)
self.assertEqual(t1.dtype, t2.dtype)
self.assertEqual(t1.device, t2.device)
def test_as_tensor_tensor_input(input):
# type: (Tensor) -> Tuple[Tensor, Tensor, Tensor]
return torch.as_tensor(input, dtype=torch.cfloat), torch.as_tensor(input, dtype=torch.float), \
torch.as_tensor(input, dtype=torch.int32)
inp = torch.randn(3, 4, dtype=torch.cfloat)
self.checkScript(test_as_tensor_tensor_input, (inp,))
def test_torch_tensor_dtype(self):
def foo(s: float):
return torch.tensor(s), torch.tensor([s, s])
# need to clear function cache so we re run shape analysis
with set_default_dtype(torch.double):
self.assertEqual(torch.jit.script(foo)(1.), foo(1.), exact_dtype=True)
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
FileCheck().check("Double").check_same("aten::tensor").run(torch.jit.last_executed_optimized_graph())
with set_default_dtype(torch.float):
del torch.jit._state._jit_caching_layer[foo]
self.assertEqual(torch.jit.script(foo)(1.), foo(1.), exact_dtype=True)
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
FileCheck().check("Float").check_same("aten::tensor").run(torch.jit.last_executed_optimized_graph())
with set_default_dtype(torch.half):
del torch.jit._state._jit_caching_layer[foo]
self.assertEqual(torch.jit.script(foo)(1.), foo(1.), exact_dtype=True)
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
FileCheck().check("Half").check_same("aten::tensor").run(torch.jit.last_executed_optimized_graph())
def test_shape_analysis_grad_property(self):
@torch.jit.script
def foo(x):
return torch.sub(x, torch.tanh(x))
torch._C._jit_pass_complete_shape_analysis(foo.graph, (torch.tensor([0.39]),), False)
# requires_grad property shouldn't be accidentally set by shape analysis
self.assertTrue(foo.graph.findNode("aten::sub").output().requiresGrad() is None)
def test_empty_like_memory_format_bc(self):
def f(x):
# type: (Tensor) -> Tensor
return torch.zeros_like(x, memory_format=None)
scripted_f = torch.jit.script(f)
x = torch.rand(3, 4)
self.assertEqual(scripted_f(x), f(x))
def test_multiline_string_dedents(self):
def foo() -> None:
multiline_string_dedent_1 = """
This is a string dedent """
multiline_string_dedent_2 = """ This is a
string dedent """
multiline_string_dedent_3 = """
This is a string
dedent """
multiline_string_dedent_4 = """ This is a string dedent """
scripted_foo = torch.jit.script(foo)
self.assertEqual(scripted_foo(), foo())
def test_class_with_comment_at_lower_indentation(self):
class Foo(torch.nn.Module):
def forward(self, x):
x = torch.neg(x)
# This comment is at the wrong indent
return x
torch.jit.script(Foo())
# adapted from test in test_torch
def test_tensor_to(self):
template = dedent('''
def func(t):
cuda = "{cuda}"
device = "{device}"
non_blocking = {non_blocking}
return {to_str}
''')
def s(t, to_str, non_blocking=None, device=None, cuda=None):
device = device if device is not None else str(t.device)
non_blocking = non_blocking if non_blocking is not None else False
cuda = "cuda" if cuda is None else cuda
code = template.format(to_str=to_str, device=device, non_blocking=non_blocking, cuda=cuda)
scope = {}
cu = torch.jit.CompilationUnit(code)
return cu.func(t, profile_and_replay=True)
def test_copy_behavior(t, non_blocking=False):
self.assertIs(t, s(t, 't.to(t, non_blocking=non_blocking)', non_blocking))
self.assertIs(t, s(t, 't.to(t.dtype, non_blocking=non_blocking)', non_blocking))
self.assertIs(t, s(t, 't.to(torch.empty_like(t), non_blocking=non_blocking)', non_blocking))
self.assertIsNot(t, s(t, 't.to(t, non_blocking=non_blocking, copy=True)', non_blocking))
self.assertIsNot(t, s(t, 't.to(t.dtype, non_blocking=non_blocking, copy=True)', non_blocking))
self.assertIsNot(t, s(t, 't.to(torch.empty_like(t), non_blocking=non_blocking, copy=True)', non_blocking))
devices = [t.device]
if t.device.type == 'cuda':
if t.device.index == -1:
devices.append(f'cuda:{torch.cuda.current_device()}')
elif t.device.index == torch.cuda.current_device():
devices.append('cuda')
for device in devices:
self.assertIs(t, s(t, 't.to(device, non_blocking=non_blocking)', non_blocking, device))
self.assertIs(t, s(t, 't.to(device, t.dtype, non_blocking=non_blocking)', non_blocking, device))
self.assertIsNot(t, s(t, 't.to(device, non_blocking=non_blocking, copy=True)', non_blocking, device))
self.assertIsNot(t, s(t, 't.to(device, t.dtype, non_blocking=non_blocking, copy=True)',
non_blocking, device))
t = torch.tensor(5)
test_copy_behavior(t)
self.assertEqual(t.device, s(t, "t.to('cpu')").device)
self.assertEqual(t.device, s(t, "t.to('cpu', dtype=torch.float32)").device)
self.assertIs(torch.float32, s(t, "t.to('cpu', dtype=torch.float32)").dtype)
self.assertEqual(t.device, s(t, "t.to(torch.float32)").device)
self.assertIs(torch.float32, s(t, "t.to(dtype=torch.float32)").dtype)
self.assertEqual(t.data_ptr(), s(t, "t.to('cpu')").data_ptr())
self.assertEqual(t.data_ptr(), s(t, "t.to(dtype=t.dtype, device=t.device, copy=False)").data_ptr())
self.assertEqual(t.data_ptr(), s(t, "t.to('cpu', copy=False)").data_ptr())
self.assertNotEqual(t.data_ptr(), s(t, "t.to('cpu', copy=True)").data_ptr())
a = torch.tensor(5)
if torch.cuda.is_available():
for non_blocking in [True, False]:
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = torch.tensor(5., device=cuda)
test_copy_behavior(b, non_blocking)
self.assertEqual(b.device, s(b, "t.to(cuda, non_blocking=non_blocking).device", cuda=cuda))
self.assertEqual(a.device, s(b, "t.to('cpu', non_blocking=non_blocking).device"))
self.assertEqual(b.device, s(b, "t.to(cuda, non_blocking=non_blocking).device", cuda=cuda))
self.assertIs(torch.int32, s(b, "t.to('cpu', dtype=torch.int32, non_blocking=non_blocking)").dtype)
self.assertEqual(a.device, s(b, "t.to('cpu', dtype=torch.int32, non_blocking=non_blocking)").device)
self.assertIs(torch.int32, s(b, "t.to(dtype=torch.int32)").dtype)
self.assertEqual(b.device, s(b, "t.to(dtype=torch.int32)").device)
# Test AD: aten::to(Tensor self, int dtype, bool non_blocking, bool copy) -> Tensor
t = torch.tensor(5).float().requires_grad_()
out_ref = t.to(torch.float32)
out = s(t, "t.to(torch.float32)")
self.assertEqual(out_ref, out)
grad_ref = torch.autograd.grad(out_ref.sum(), t)
grad = torch.autograd.grad(out.sum(), t)
self.assertEqual(grad_ref, grad)
# Test AD: aten::to(Tensor self, Device? device, int? dtype, bool non_blocking, bool copy) -> Tensor
out_ref = t.to('cpu')
out = s(t, "t.to('cpu')")
self.assertEqual(out_ref, out)
grad_ref = torch.autograd.grad(out_ref.sum(), t)
grad = torch.autograd.grad(out.sum(), t)
self.assertEqual(grad_ref, grad)
# Test AD: aten::to(Tensor self, Tensor other, bool non_blocking, bool copy) -> Tensor
@torch.jit.script
def func2(t, t_ref):
return t.to(t_ref)
with disable_autodiff_subgraph_inlining():
t_ref = torch.tensor(4).double()
out_ref = t.to(t_ref)
out = func2(t, t_ref)
grad_ref = torch.autograd.grad(out_ref.sum(), t)
grad = torch.autograd.grad(out.sum(), t)
self.assertEqual(grad_ref, grad)
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_tensor_number_math_cuda(self):
self._test_tensor_number_math(device='cuda')
def test_not(self):
# test not operator in python
# TODO: add more tests when bool conversions ready
def test_not_op(a):
return not bool(a > 1)
self.checkScript(test_not_op, (torch.tensor(2), ), optimize=True)
def test_is_isnot(self):
# test is and is not operator in python
template = dedent('''
def func():
# type: () -> bool
return {lhs} {op} {rhs}
''')
def test(op, args):
code = template.format(lhs=args[0], rhs=args[1], op=op)
scope = {}
execWrapper(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
self.assertEqual(
cu.func(),
scope['func'](),
msg=f"Failed with op: {op}, lhs: {args[0]}, rhs: {args[1]}"
)
ops = ['is', 'is not']
type_literals = [True, False, None, [1, 1], 1, 2, .5, 1.5]
# do literals product to try any types combinations
for op, lhs, rhs in product(ops, type_literals, type_literals):
test(op, [lhs, rhs])
def test_isinstance_refinement(self):
@torch.jit.script
def foo(a):
# type: (Optional[int]) -> int
if isinstance(a, int):
return a + 3
else:
return 4
self.assertEqual(foo(4), 7)
self.assertEqual(foo(None), 4)
@torch.jit.script
def foo2(a, b):
# type: (Optional[int], Optional[int]) -> int
if not isinstance(a, int) or not isinstance(b, int):
return 0
else:
return a + b
self.assertEqual(foo2(3, 4), 7)
self.assertEqual(foo2(None, 4), 0)
self.assertEqual(foo2(4, None), 0)
@torch.jit.script
def any_refinement(a, b):
# type: (Any, Any) -> int
if isinstance(a, int) and isinstance(b, int):
return a + b
return 0
self.assertEqual(any_refinement(3, 4), 7)
self.assertEqual(any_refinement(3, "hi"), 0)
@torch.jit.script
def any_refinement2(a):
# type: (Any) -> Tensor
if isinstance(a, Tensor):
return a
return torch.tensor(3)
self.assertEqual(any_refinement2(3), torch.tensor(3))
self.assertEqual(any_refinement2(torch.tensor(5)), torch.tensor(5))
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "bug persists in deprecated executor")
def test_unspecialized_any_binding(self):
# any binding will infer the type, if it infers
# a specialized tensor type `x` Dict type will fail isinstance check
@torch.jit.script
def foo(x: Any):
assert isinstance(x, Dict[str, torch.Tensor])
foo({"1": torch.tensor(3)})
with self.assertRaises(Exception):
foo(2)
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
def test_isinstance(self):
# test isinstance operator for static type checking
template = dedent('''
def func(x):
# type: ({type_hint}) -> bool
return isinstance(x, {typ})
''')
def test(inp, typ, type_hint):
code = template.format(typ=typ, type_hint=type_hint)
scope = {}
execWrapper(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
self.assertEqual(
cu.func(inp),
scope['func'](inp),
msg=f"Failed with typ: {typ}"
)
inputs = [True, 1, 1.0, torch.tensor(1), [1, 2], (1.0,), [1, 2], 1]
type_literals = ['bool', 'int', 'float', 'torch.Tensor', 'list', 'tuple',
'(list, tuple)', '(int, float, bool)']
type_annotations = ['bool', 'int', 'float', 'Tensor', 'List[int]', 'Tuple[float]',
'List[int]', 'int']
# do zipping to try different types
for inp, typ, type_hint in zip(inputs, type_literals, type_annotations):
test(inp, typ, type_hint)
# test optional isinstance check
@torch.jit.script
def opt_func(x):
# type: (Optional[int]) -> bool
return isinstance(x, int)
self.assertTrue(opt_func(3))
self.assertFalse(opt_func(None))
def test_dropout_eval(self):
class ScriptedConv2d(torch.jit.ScriptModule):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
@torch.jit.script_method
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.Conv2d_1a_3x3 = ScriptedConv2d(3, 32, kernel_size=3, stride=2)
@torch.jit.script_method
def forward(self, x):
x = self.Conv2d_1a_3x3(x)
return F.dropout(x, training=self.training)
class EagerConv2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class EagerMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.Conv2d_1a_3x3 = EagerConv2d(3, 32, kernel_size=3, stride=2)
def forward(self, x):
x = self.Conv2d_1a_3x3(x)
return F.dropout(x, training=self.training)
script_input = torch.rand(4, 3, 299, 299)
eager_input = script_input.clone()
with freeze_rng_state():
script_mod = ScriptMod()
script_mod.eval()
script_output = script_mod(script_input)
with freeze_rng_state():
eager_mod = EagerMod()
eager_mod.eval()
eager_output = eager_mod(eager_input)
self.assertEqual(script_output, eager_output)
with freeze_rng_state():
script_mod = ScriptMod()
script_mod.train()
script_output = script_mod(script_input)
with freeze_rng_state():
eager_mod = EagerMod()
eager_mod.train()
eager_output = eager_mod(eager_input)
self.assertEqual(script_output, eager_output)
def test_nested_breaks(self):
def no_bool_loop_outputs(g):
# testing that the "did exit" transform values are not loop block
# outputs (and thus not affecting one loop from another)
loops = g.findAllNodes("prim::Loop")
for loop in loops:
for out in loop.outputs():
self.assertTrue(out.type() != BoolType.get())
def test(y):
# type: (int)
ret = 0
tensor = torch.tensor(0)
while int(tensor.add_(1)) < 4:
if y == 1:
continue
for _ in range(y):
continue
ret += 1
ret += 1
return ret, int(tensor)
self.assertEqual(torch.jit.script(test)(1), test(1))
self.assertEqual(torch.jit.script(test)(2), test(2))
no_bool_loop_outputs(torch.jit.script(test).graph)
def foo():
y = torch.tensor(0)
z = 0
while int(y.add_(1)) < 20:
if int(y) < 10:
for i in range(6):
if i == 3:
continue
else:
if i > 3:
break
z += 2
if int(y) == 18:
break
if int(y) == 15:
continue
z += 1
return int(y), z
no_bool_loop_outputs(torch.jit.script(foo).graph)
self.checkScript(foo, ())
def test_nested_two():
i = 0
k = 0
while i < 5:
for j in range(5):
k += 1
if j == 3:
continue
i += 1
k += 1
if i == 4:
break
return i, k
self.checkScript(test_nested_two, ())
no_bool_loop_outputs(torch.jit.script(test_nested_two).graph)
def test_breaks_continues(self):
def foo_continue(cond):
# type: (int)
j = 1
for i in range(5):
if i == cond:
continue
j += 1
return j
def foo_break(cond):
# type: (int)
j = 1
for i in range(5):
if i == cond:
break
j += 1
return j
for i in range(1, 4):
self.checkScript(foo_continue, (i,))
self.checkScript(foo_break, (i,))
def test_refine_outside_loop():
if 1 == 1:
x = None
else:
x = 1
i = 0
j = 0
while (x is None or torch.jit._unwrap_optional(x) > 3):
if i < 3:
if i < 3:
x = torch.jit.annotate(Optional[int], None)
i += 1
continue
x = 1
else:
x = 1 if x is None else x
x = x + 1
j = x + x
return x, j
self.checkScript(test_refine_outside_loop, ())
def assign_after_break(y):
# type: (int)
x = 0
for i in range(y):
x = y * 2 + i
break
x = 4
return x
self.checkScript(assign_after_break, (1,))
self.checkScript(assign_after_break, (2,))
self.checkScript(assign_after_break, (3,))
def assign_after_break_nested(y):
# type: (int)
x = 0
for _ in range(y):
if y == 1:
x = 5
break
assert 1 == 2
else:
x = x + 1
break
assert 1 == 2
x = -30
assert 1 == 2
return x
self.checkScript(assign_after_break_nested, (1,))
self.checkScript(assign_after_break_nested, (2,))
self.checkScript(assign_after_break_nested, (3,))
def may_break(y):
# type: (int)
x = 0
for _ in range(y):
if y == 1:
x = 5
else:
x = x + 1
break
x = -30
return x
self.checkScript(may_break, (1,))
self.checkScript(may_break, (2,))
self.checkScript(may_break, (3,))
def test(x, y):
# type: (int, int)
a = 1
while (x > 0):
if y == 3:
for i in range(y):
a += (1 % (i + 1))
x -= 1
if x == 3:
a = x * 3
break
if x < 3:
if x == 1:
a -= 2
x -= 1
break
a -= 1
x -= 3
return a, x
self.checkScript(test, (10, 3))
self.checkScript(test, (10, 2))
self.checkScript(test, (3, 2))
self.checkScript(test, (5, 3))
self.checkScript(test, (2, 3))
def test_delete_after_break(x):
# type: (int)
a = 1
b = 1
for i in range(x):
a = i * 3
break
b = i * 5
return a, b
self.checkScript(test_delete_after_break, (0,))
self.checkScript(test_delete_after_break, (1,))
def test_will_break_after_guard(x):
# type: (int)
a = 1
for i in range(x):
if i == 4:
a = 3
break
a -= 1
break
assert 1 == 2
a -= -100
return a
self.checkScript(test_will_break_after_guard, (0,))
self.checkScript(test_will_break_after_guard, (2,))
self.checkScript(test_will_break_after_guard, (4,))
def test_varexit(cond):
# type: (int)
m = 0
for _ in range(3):
if cond == 2:
if cond == 2:
m = 2
break
k = 1
else:
k = 2
m += k
return m
# use of k tests the pathway where we have to insert uninitialized
self.checkScript(test_varexit, (3,))
self.checkScript(test_varexit, (2,))
def test_break_true():
i = 0
while True:
i += 1
if i == 3:
break
while False:
i += 1
return i
self.checkScript(test_break_true, ())
def test_break_continue_error(self):
with self.assertRaisesRegex(RuntimeError, "Syntax"):
cu = torch.jit.CompilationUnit('''
def other_func(a):
break
''')
with self.assertRaisesRegex(RuntimeError, "Syntax"):
cu = torch.jit.CompilationUnit('''
def other_func(a):
for i in range(5):
def foo():
break
''')
with self.assertRaisesRegex(RuntimeError, "do not support break or continue inside"):
@torch.jit.script
def foo(x):
i = 0
for a in (1, "2", 1.5):
b = a
if x:
break
return b
def test_python_call(self):
def pyfunc(a):
return a * 3.0
cu = torch.jit.CompilationUnit('''
def other_func(a):
return a + a
def test_call_python(a):
b = pyfunc(a)
b = other_func(b)
i = 0
step = 1
while i < 10:
b = pyfunc(b)
if bool(b > 3.0):
b = pyfunc(b)
i = 11
return b
''')
inputs = self._make_scalar_vars([1], torch.float)
outputs = self._make_scalar_vars([54], torch.float)
self.assertEqual(cu.test_call_python(*inputs), outputs[0])
def test_python_call_failure(self):
with self.assertRaisesRegex(RuntimeError, "undefined value pyfunc2"):
def pyfunc(a):
return a * 3.0
cu = torch.jit.CompilationUnit('''
def other_func(a):
return a + a
def test_call_python(a):
b = pyfunc(a)
b = other_func(b)
i = 0
step = 1
while i < 10:
b = pyfunc2(b)
if b > 3.0:
b = pyfunc(b)
i = 11
return b
''')
inputs = self._make_scalar_vars([1], torch.float)
outputs = self._make_scalar_vars([54], torch.float)
self.assertEqual(cu.test_call_python(*inputs), outputs)
def test_type_call_in_script(self):
@torch.jit.script
def fn(x):
return type(x)
with self.assertRaisesRegex(RuntimeError, "value of type _TensorMeta"):
fn(torch.tensor(.5))
def test_python_call_annotation(self):
def pyfunc(a):
return a * 3.0
@torch.jit.script
def foo(a):
return pyfunc(a) + pyfunc(a)
inputs = self._make_scalar_vars([1], torch.float)
outputs = self._make_scalar_vars([6], torch.float)
self.assertEqual(foo(*inputs), outputs[0])
def test_python_call_annoytation_failure(self):
with self.assertRaisesRegex(RuntimeError, "undefined value pyfunc2"):
def pyfunc(a):
return a * 3.0
@torch.jit.script
def foo(a):
return pyfunc2(a) + pyfunc(a) # noqa: F821
inputs = self._make_scalar_vars([1], torch.float)
outputs = self._make_scalar_vars([6], torch.float)
self.assertEqual(foo(*inputs), outputs[0])
def test_desugar_module(self):
import torch.nn.functional as F
def fn(x, slope):
a = torch.abs(x)
b = torch.nn.functional.prelu(x, slope)
c = F.prelu(x, slope)
return a, b, c
x = torch.arange(-3., 4)
slope = torch.tensor([0.5])
self.checkScript(fn, [x, slope], optimize=True)
def test_script_docstring(self):
@torch.jit.script
def with_docstring(x):
"""test str"""
y = x
"""y is the same as x"""
return y
self.assertEqual(with_docstring.__doc__, 'test str')
def test_script_method_docstring(self):
class A(torch.jit.ScriptModule):
@torch.jit.script_method
def with_docstring(self, x):
"""test str"""
y = x
"""y is the same as x"""
return y
a = A()
self.assertEqual(a.with_docstring.__doc__, 'test str')
def test_script_module(self):
class M1(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class PModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = nn.Parameter(torch.randn(2, 3))
def forward(self, a):
return self.a.mm(a)
class M2(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
# test submodule
self.sub = M1()
self.sub2 = PModule()
# test parameters
self.weight = nn.Parameter(torch.randn(2, 3))
self.bias = nn.Parameter(torch.randn(2))
# test defining a method from a string
self.define("""
def hi(self, a):
return self.weight.mm(a)
""")
# test script methods
@torch.jit.script_method
def doit(self, input):
# test use of parameter
return self.weight.mm(input)
@torch.jit.script_method
def doit2(self, input):
return self.weight.mm(input)
@torch.jit.script_method
def forward(self, input):
a = self.doit(input)
b = self.doit2(input)
c = self.hi(input)
d = self.sub2(input)
return a + b + self.bias + self.sub(a) + c + d
with torch.jit.optimized_execution(False):
m2 = M2()
input = torch.randn(3, 2)
a = m2.weight.mm(input)
b = m2.weight.mm(input)
c = m2.weight.mm(input)
d = m2.sub2.a.mm(input)
ref = a + b + m2.bias + m2.sub.weight + a + c + d
self.assertEqual(ref, m2.forward(input))
m2.weight = nn.Parameter(torch.zeros_like(m2.weight))
m2.bias = nn.Parameter(torch.zeros_like(m2.bias))
m2.sub.weight = nn.Parameter(torch.zeros_like(m2.sub.weight))
m2.sub2.a.data.zero_()
self.assertEqual(torch.zeros(2, 2), m2.forward(torch.randn(3, 2)))
def test_irparser(self):
graph_str = """graph(%0 : Double(5, 5)):
# CHECK: aten::relu
%1 : Double(5, 5) = aten::relu(%0)
return (%1)
"""
FileCheck().run(graph_str, parse_ir(graph_str))
def test_parse_tensor_constants(self):
def foo():
return torch.zeros([4, 4])
foo_s = torch.jit.script(foo)
torch._C._jit_pass_constant_propagation(foo_s.graph)
g = str(foo_s.graph)
g_parsed = parse_ir(g, parse_tensor_constants=True)
self.assertEqual(str(canonical(g_parsed)), str(canonical(foo_s.graph)))
func = torch._C._create_function_from_graph("forward", g_parsed)
out_parsed = func()
out_func = foo()
# not checking data, just dtype, size etc
out_parsed[:] = 0
out_func[:] = 0
self.assertEqual(out_func, out_parsed)
with self.assertRaises(RuntimeError):
parse_ir(g, parse_tensor_constants=False)
def test_parse_scalar_tensor_constants(self):
for dtype_str, dtype, value in [
("Float", torch.float32, 1234.5),
("Double", torch.float64, 1234.5),
("BFloat16", torch.bfloat16, 123.5),
("Int", torch.int32, 12345),
("Long", torch.int64, 12345),
("Short", torch.int16, 12345),
]:
g_str = f"""
graph():
%1 : {dtype_str}(requires_grad=0, device=cpu) = prim::Constant[value={{{value}}}]()
return (%1)
"""
jit_graph = parse_ir(g_str, parse_tensor_constants=True)
node = next(
n
for n in jit_graph.nodes()
if isinstance(n.output().type(), torch.TensorType)
)
assert isinstance(node.output().type(), torch.TensorType)
t = node.t("value")
assert isinstance(t, torch.Tensor)
self.assertEqual(t.dtype, dtype)
self.assertEqual(t.item(), value)
with self.assertRaises(RuntimeError):
g_str = """
graph():
%1 : Long(requires_grad=0, device=cpu) = prim::Constant[value={invalid}]()
return (%1)
"""
jit_graph = parse_ir(g_str, parse_tensor_constants=True)
def test_parse_nested_names(self):
g_str = """
graph(%x.1 : Tensor):
%3 : int = prim::Constant[value=1]()
%2 : int = prim::Constant[value=2]()
%hi.submod.value.5 : Tensor = aten::add(%x.1, %2, %3)
return (%hi.submod.value.5)
"""
g = parse_ir(g_str)
round_trip_g = parse_ir(str(g))
self.assertEqual(canonical(g), canonical(round_trip_g))
func1 = torch._C._create_function_from_graph("forward", g)
func2 = torch._C._create_function_from_graph("forward", round_trip_g)
self.assertEqual(func1(torch.ones([2])), func2(torch.ones([2])))
def test_is_after_use(self):
def sorted_input_use(g):
uses = list(next(g.inputs()).uses())
return sorted(uses, key=functools.cmp_to_key(type(uses[0]).isAfter))
@torch.jit.script
def foo(x):
a = x + 1
return (x, x, a)
uses_sorted = sorted_input_use(foo.graph)
# sorts last use to the end
self.assertFalse(uses_sorted[0].isAfter(uses_sorted[1]))
self.assertTrue(uses_sorted[0].user.kind() == "aten::add")
self.assertEqual(uses_sorted[1].offset, 0)
@torch.jit.script
def foo(x, cond: bool):
if cond:
return x + 3
else:
return x - 3
uses_sorted = sorted_input_use(foo.graph)
self.assertTrue(uses_sorted[0].user.kind() == "aten::add")
self.assertTrue(uses_sorted[1].user.kind() == "aten::sub")
@torch.jit.script
def foo(x, cond: bool, cond2: bool):
if cond:
return x + 3
elif cond2 :
return x - 3
return x / 3
graph1 = foo.graph
@torch.jit.script
def foo(x, cond: bool, cond2: bool):
if cond:
return x + 3
else:
if cond2 :
return x - 3
return x / 3
graph2 = foo.graph
for graph in [graph1, graph2]:
uses_sorted = sorted_input_use(graph)
self.assertTrue(uses_sorted[0].user.kind() == "aten::add")
self.assertTrue(uses_sorted[1].user.kind() == "aten::sub")
self.assertTrue(uses_sorted[2].user.kind() == "aten::div")
def test_canonicalize_control_outputs(self):
def test_all_outputs(g):
ifs = g.findAllNodes("prim::If")
loops = g.findAllNodes("prim::Loop")
def contained_blocks(node):
return len(node.findAllNodes("prim::If")) * 2 + len(node.findAllNodes("prim::Loop"))
for node in ifs + loops:
outs = list(node.outputs())
out_name = [x.debugName() for x in outs]
if len(out_name) == 0:
continue
fc = FileCheck()
# find the last output, then all subsequent uses
fc.check(out_name[-1] + " : ")
# skip past node body
for _ in range(contained_blocks(node)):
fc.check("->")
if (node.kind() == "prim::If"):
fc.check("->").check("->").check("\n")
else:
fc.check("->").check("\n")
# the canonical order is the same order as the first use
# appears in text
for name in out_name:
fc.check(name)
fc.run(g)
@torch.jit.script
def test(x):
# type: (bool) -> Tuple[int, int]
b = 2
a = 1
if x:
a = 1
b = 2
x = False
if x:
b = a
else:
a = b
return a, b
test_all_outputs(test.graph)
@torch.jit.script
def test2(x):
# type: (bool) -> Tuple[int, int]
b = 2
a = 1
if x:
a = 1
b = 2
x = False
if x:
print(a)
else:
if x:
print(b)
return a, b
test_all_outputs(test2.graph)
@torch.jit.script
def test_loop(x, iter):
# type: (bool, int) -> (None)
a = 1
b = 2
c = 3
for _ in range(iter):
a = 4
b = 5
c = 6
x = True
print(c)
if x:
print(a, b)
test_all_outputs(test_loop.graph)
@torch.jit.script
def loop_unused(iter):
# type: (int) -> (None)
a = 1
b = 2
c = 3
for _ in range(iter):
c = c + 1
b = b + 1
a = a + 1
print(a, b)
print(c)
# c is used, then unused should be ordered by alphabetical
FileCheck().check(r"%c : int, %a : int, %b : int").run(loop_unused.graph)
def test_filecheck(self):
def test_check():
file = "232"
FileCheck().check("2").check("3").check("2").run(file)
FileCheck().check("232").run(file)
with self.assertRaisesRegex(RuntimeError, 'Expected to find "22"'):
FileCheck().check("22").run(file)
with self.assertRaisesRegex(RuntimeError, "CHECK: 3"):
FileCheck().check("3").check("3").run(file)
test_check()
def test_check_count():
file = "22222"
FileCheck().check_count("2", 5).run(file)
FileCheck().check_count("22", 2).run(file)
FileCheck().check_count("222", 1).run(file)
with self.assertRaisesRegex(RuntimeError, 'Expected to not find'):
FileCheck().check_count("2", 4, exactly=True).run(file)
with self.assertRaisesRegex(RuntimeError, 'Expected to find "22"'):
FileCheck().check_count("22", 3).run(file)
with self.assertRaisesRegex(RuntimeError, "CHECK-COUNT-6: 2"):
FileCheck().check_count("2", 6).run(file)
test_check_count()
def test_check_same():
file = "22\n33"
FileCheck().check_same("22").run(file)
with self.assertRaisesRegex(RuntimeError, "Expected to not find"):
FileCheck().check_same("33").run(file)
file = "22 1 3"
FileCheck().check("2").check_same("3").run(file)
FileCheck().check_count("2", 2).check_same("3").run(file)
test_check_same()
def test_check_next():
file = "\n1\n2\n3"
FileCheck().check("1").check_next("2").check_next("3").run(file)
FileCheck().check_next("1").check_next("2").check_next("3").run(file)
with self.assertRaisesRegex(RuntimeError, "Expected to find"):
FileCheck().check("1").check_next("2").run("12")
with self.assertRaisesRegex(RuntimeError, "Expected to not find"):
FileCheck().check("1").check_next("2").run("1\n\n2")
test_check_next()
def test_check_dag():
fc = FileCheck().check_dag("1").check_dag("2").check_not("2")
fc.run("12")
fc.run("21")
fc = FileCheck()
fc.check_not("3").check_dag("1").check_dag("2").check_not("3")
fc.run("1 3 2")
fc.run("2 3 1")
fc = FileCheck().check_dag("1").check_dag("2").check("3")
with self.assertRaisesRegex(RuntimeError, 'Expected to find "3" but did not find it'):
fc.run("1 3 2")
test_check_dag()
def test_check_not():
FileCheck().check_not("2").check("1").run("12")
FileCheck().check("2").check_not("2").run("12")
with self.assertRaisesRegex(RuntimeError, 'Expected to not find "2"'):
FileCheck().check_not("2").check("1").run("21")
with self.assertRaisesRegex(RuntimeError, 'Expected to not find "1"'):
FileCheck().check("2").check_not("1").run("21")
# checks with distinct range matchings
fb = FileCheck().check_count("2", 2).check_count("2", 2).check_not("2")
with self.assertRaisesRegex(RuntimeError, 'Expected to not find "2"'):
fb.run("22 2 22")
fb = FileCheck().check_count("2", 2).check_not("1").check_count("2", 2)
with self.assertRaisesRegex(RuntimeError, 'Expected to not find "1"'):
fb.run("22 1 22")
def _dtype_to_jit_name(self, dtype):
if dtype == torch.float32:
return "Float"
if dtype == torch.float64:
return "Double"
if dtype == torch.int64:
return "Long"
if dtype == torch.int32:
return "Int"
if dtype == torch.bool:
return "Bool"
raise RuntimeError('dtype not handled')
def _dtype_to_expect(self, dtype, dim=0):
param = ', '.join(['*'] * dim + ['device=cpu'])
param = '(' + param + ')'
jit_type = self._dtype_to_jit_name(dtype)
if dim >= 0:
return jit_type + param
# special case representing wrapped number
else:
return jit_type.lower()
def _test_dtype_op_shape(self, ops, args, input_dims=1):
if input_dims < 1:
raise RuntimeError("input dims must be at least 1")
dtypes = [torch.float32, torch.float64, torch.int64, torch.int32]
str_args = ', '.join([str(arg) for arg in args]) + (', ' if len(args) else '')
tensor_data = ('[' * input_dims) + '1, 2, 3' + (input_dims * ']')
template = dedent('''
def func():
return {return_line}
''')
for op in ops:
for dtype in (dtypes + [None]):
for tensor_type in dtypes:
# a couple of ops aren't implemented for non-floating types
if not tensor_type.is_floating_point or (dtype is not None and not dtype.is_floating_point):
if op in ['mean', 'softmax', 'log_softmax']:
continue
return_line = f"torch.tensor({tensor_data}, dtype={tensor_type}).{op}({str_args}dtype={dtype})"
# uncomment for debugging a failed test:
# print("testing {}".format(return_line))
code = template.format(return_line=return_line)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
graph = cu.func.graph
torch._C._jit_pass_complete_shape_analysis(graph, (), False)
input_array = [1, 2, 3]
for _ in range(1, input_dims):
input_array = [input_array]
t = torch.tensor(input_array, dtype=tensor_type)
attr = getattr(t, op)
kwargs = {'dtype': dtype}
result = attr(*args, **kwargs)
expect = self._dtype_to_expect(result.dtype, result.dim())
FileCheck().check("aten::tensor").check(expect).run(graph)
def test_dtype_op_shape(self):
ops = ['prod']
self._test_dtype_op_shape(ops, args=[])
self._test_dtype_op_shape(ops, args=[0, False])
self._test_dtype_op_shape(ops, args=[0, False])
self._test_dtype_op_shape(ops, args=[0, True])
def test_dtype_op_shape2(self):
ops = ['cumprod', 'cumsum', 'softmax', 'log_softmax']
self._test_dtype_op_shape(ops, args=[0])
self._test_dtype_op_shape(ops, args=[1], input_dims=4)
def _test_binary_op_shape(self, ops, input_dims=1):
dtypes = [torch.float32, torch.float64, torch.int64, torch.int32, torch.bool]
if input_dims == 0:
shape = '1'
else:
shape = '[' + ('1,' * 4) + ']'
for _ in range(1, input_dims):
shape = '[' + ",".join([shape] * 4) + ']'
template = dedent('''
def func():
arg1 = {}
arg2 = {}
return torch.{}(arg1, arg2)
''')
args = []
for dtype in dtypes:
args = args + [f"torch.tensor({shape}, dtype={dtype})"]
args = args + [1, 1.5]
def isBool(arg):
return type(arg) is bool or (type(arg) is str and "torch.bool" in arg)
for op in ops:
for first_arg in args:
for second_arg in args:
# subtract not supported for bool
if (op == 'sub' or op == 'div') and (isBool(first_arg) or isBool(second_arg)):
continue
# div is not implemented correctly for mixed-type or int params
if (op == 'div' and (type(first_arg) is not type(second_arg) or
isinstance(first_arg, int) or
(isinstance(first_arg, str) and 'int' in first_arg))):
continue
return_line = f"torch.{op}({first_arg}, {second_arg})"
# uncomment for debugging a failed test:
# print("testing {}".format(return_line))
code = template.format(first_arg, second_arg, op)
scope = {}
exec(code, globals(), scope)
non_jit_result = scope['func']()
cu = torch.jit.CompilationUnit(code)
graph = cu.func.graph
torch._C._jit_pass_complete_shape_analysis(graph, (), False)
# use dim=-1 to represent a python/jit scalar.
dim = -1 if type(first_arg) is not str and type(second_arg) is not str else non_jit_result.dim()
dtype = non_jit_result.dtype
# jit only supports int/float scalars.
if dim < 0:
if dtype == torch.int64:
dtype = torch.int32
if dtype == torch.float64:
dtype = torch.float32
expect = self._dtype_to_expect(dtype, dim)
jit_output = next(graph.outputs())
check = FileCheck()
check.check(expect).run(str(jit_output))
def test_binary_op_shape(self):
self._test_binary_op_shape(['mul', 'div', 'add', 'sub'], 0)
self._test_binary_op_shape(['mul', 'div', 'add', 'sub'], 3)
def test_no_dtype_shape(self):
@torch.jit.script
def foo(x):
scalar_number = x.item()
return x.add(scalar_number)
@torch.jit.script
def foo2(x):
scalar_number = x.item()
return torch.tensor(1).add(scalar_number)
t = torch.tensor(5)
g = foo.graph_for(t)
type = next(g.outputs())
self.assertTrue(type.type() == torch._C.TensorType.get())
g2 = foo2.graph_for(t)
type = next(g.outputs())
self.assertTrue(type.type() == torch._C.TensorType.get())
def test_filecheck_parse(self):
def test_check():
file = """
# CHECK: 2
# CHECK: 3
# CHECK: 2
232
"""
FileCheck().run(checks_file=file, test_file=file)
file = """
# CHECK: 232
232
"""
FileCheck().run(file, "232")
with self.assertRaisesRegex(RuntimeError, 'Expected to find "232"'):
FileCheck().run(file, "22")
with self.assertRaisesRegex(RuntimeError, 'Expected to find "22"'):
FileCheck().run("# CHECK: 22", "23")
test_check()
def test_check_count():
file = "22222"
FileCheck().run("# CHECK-COUNT-5: 2", file)
FileCheck().run("# CHECK-COUNT-EXACTLY-5: 2", file)
FileCheck().run("# CHECK-COUNT-2: 22", file)
FileCheck().run("# CHECK-COUNT-1: 222", file)
with self.assertRaisesRegex(RuntimeError, 'Expected to not find'):
FileCheck().run("# CHECK-COUNT-EXACTLY-2: 2", file)
test_check_count()
def test_check_same():
file = "22\n33"
FileCheck().run("# CHECK-SAME: 22", file)
with self.assertRaisesRegex(RuntimeError, "Expected to not find"):
FileCheck().run("# CHECK-SAME: 33", file)
file = "22 1 3"
FileCheck().run("# CHECK: 2\n # CHECK-SAME: 3", file)
FileCheck().run("# CHECK-COUNT-2: 2\n # CHECK-SAME: 3", file)
test_check_same()
def test_bad_input():
with self.assertRaisesRegex(RuntimeError, "Check for bad input"):
FileCheck().run("", "1")
with self.assertRaisesRegex(RuntimeError, "Could not parse check"):
FileCheck().run("# CHECK1", "")
test_bad_input()
def test_script_module_call_noscript(self):
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.value = 1
@torch.jit.ignore
def foo(self):
return torch.ones(2, 2) + self.value
@torch.jit.script_method
def forward(self, input):
return input + self.foo()
with torch.jit.optimized_execution(False):
m = M()
input = torch.randn(2, 2)
o = m(input)
self.assertEqual(o, input + torch.ones(2, 2) + 1)
# check that we can change python attributes
# and that those changes are picked up in script methods
m.value = 2
o = m(input)
self.assertEqual(o, input + torch.ones(2, 2) + 2)
def test_script_module_nochange_submodule(self):
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.sub = nn.Linear(5, 5)
@torch.jit.script_method
def forward(self, input):
return self.sub(input)
with torch.jit.optimized_execution(False):
m = M()
input = torch.randn(1, 5, 5)
o = m(input)
self.assertEqual(o, m.sub(input))
with self.assertRaisesRegex(RuntimeError, "Cannot re-assign"):
m.sub = nn.Linear(5, 5)
def test_module_apis(self):
class Sub(torch.nn.Module):
def forward(self, thing):
return thing - 2
class Double(torch.nn.Module):
def forward(self, thing):
return thing * 2
class MyMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mod = (Sub())
self.mod2 = (Sub())
self.mod3 = nn.Sequential(nn.Sequential(Sub()))
self.mod4 = nn.Sequential(Sub(), Double())
@torch.jit.export
def method(self, x, x1, y, y1):
mod_names = ""
for name, mod in self.named_modules():
mod_names = mod_names + " " + name
x = mod(x)
children_names = ""
for name, mod in self.named_children():
children_names = children_names + " " + name
x1 = mod(x1)
for mod in self.modules():
y = mod(y)
for mod in self.children():
y1 = mod(y1)
return mod_names, children_names, x, x1, y, y1
def forward(self, x):
return x + 2
mod = torch.jit.script(MyMod())
inps = tuple([torch.tensor(i) for i in range(1, 5)])
self.assertEqual(mod.method(*inps), MyMod().method(*inps))
def test_script_module_const(self):
class M(torch.jit.ScriptModule):
__constants__ = ['b', 'i', 'c', 's']
def __init__(self) -> None:
super().__init__()
self.b = False
self.i = 1
self.c = 3.5
self.s = ["hello"]
@torch.jit.script_method
def forward(self):
return self.b, self.i, self.c
with torch.jit.optimized_execution(False):
m = M()
o0, o1, o2 = m()
self.assertEqual(o0, 0)
self.assertEqual(o1, 1)
self.assertEqual(o2, 3.5)
def test_script_module_fail_exist(self):
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x + self.whatisgoingon
with self.assertRaisesRegex(RuntimeError, "Module 'M' has no attribute"):
M()
@unittest.skip("[module dedupe] currently NoneType refinement on optional attributes doesn't work.")
def test_script_module_none_exist_fail(self):
class M(torch.jit.ScriptModule):
def __init__(self, my_optional):
super().__init__()
self.my_optional = my_optional
@torch.jit.script_method
def forward(self, x):
if self.my_optional is not None:
return torch.neg(x) + self.my_optional
return torch.neg(x)
with self.assertRaisesRegex(RuntimeError, "has no attribute 'my_optional'"):
x = torch.rand(3, 4)
fb = M(None)
fb(x)
def test_script_module_invalid_consts(self):
class Foo(torch.jit.ScriptModule):
__constants__ = ['invalid']
def __init__(self) -> None:
super().__init__()
self.invalid = [nn.Linear(3, 4)]
with self.assertRaisesRegex(
TypeError,
"Linear' object in attribute 'Foo.invalid' is not a valid constant"):
Foo()
class Foo2(torch.jit.ScriptModule):
__constants__ = ['invalid']
def __init__(self) -> None:
super().__init__()
self.invalid = int
with self.assertRaisesRegex(TypeError, "not a valid constant"):
Foo2()
class Foo3(torch.jit.ScriptModule):
__constants__ = ['invalid']
def __init__(self) -> None:
super().__init__()
self.invalid = (3, 4, {})
with self.assertRaisesRegex(TypeError, "not a valid constant"):
Foo3()
class Foo4(torch.jit.ScriptModule):
__constants__ = ['invalid']
def __init__(self) -> None:
super().__init__()
self.invalid = np.int64(5)
# verify that we capture human understandable class name
with self.assertRaisesRegex(TypeError, "numpy.int64"):
Foo4()
def test_script_module_param_buffer_mutation(self):
# TODO: add param mutation test case after JIT support it
class ModuleBufferMutate(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.running_var = nn.Buffer(torch.tensor(0, dtype=torch.long))
@torch.jit.script_method
def forward(self):
if self.training:
self.running_var += 1
return self.running_var
with torch.jit.optimized_execution(False):
m = ModuleBufferMutate()
self.assertEqual(m(), 1)
m.eval()
self.assertEqual(m(), 1)
def test_script_module_for(self):
class M(torch.jit.ScriptModule):
__constants__ = ['b']
def __init__(self) -> None:
super().__init__()
self.b = [1, 2, 3, 4]
@torch.jit.script_method
def forward(self):
sum = 0
for i in self.b:
sum += i
return sum
with torch.jit.optimized_execution(False):
m = M()
self.assertEqual(m(), 10)
def test_override_magic(self):
class OverrideMagic(nn.Module):
@torch.jit.export
def __len__(self):
return 10
mod = OverrideMagic()
self.assertEqual(len(mod), len(torch.jit.script(mod)))
class OverrideMagicSeq(nn.Sequential):
@torch.jit.export
def __len__(self):
return 10
mod = OverrideMagicSeq()
self.assertEqual(len(mod), len(torch.jit.script(mod)))
self.assertTrue(torch.jit.script(mod))
def test_script_module_for2(self):
class Sub(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.mods = nn.ModuleList([Sub() for i in range(10)])
@torch.jit.script_method
def forward(self, v):
for m in self.mods:
v = m(v)
return v
with torch.jit.optimized_execution(False):
i = torch.empty(2)
m = M()
o = m(i)
v = i
for sub in m.mods:
v = sub(v)
self.assertEqual(o, v)
with self.assertRaisesRegex(Exception, "object is not iterable"):
print(list(m))
def test_attr_qscheme_script(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.qscheme = torch.per_tensor_affine
def forward(self):
if self.qscheme == torch.per_tensor_symmetric:
return 3
else:
return 4
f = Foo()
scripted = torch.jit.script(f)
self.assertEqual(f(), scripted())
def test_script_module_const_submodule_fail(self):
class Sub(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.mods = [Sub() for _ in range(10)]
@torch.jit.script_method
def forward(self):
for _ in self.mods:
print(1)
return 4
with self.assertRaisesRegex(RuntimeError, "has no attribute 'mods'"):
M()
class DerivedStateModule(torch.jit.ScriptModule):
def __init__(self) -> None:
super(TestScript.DerivedStateModule, self).__init__()
self.param = torch.nn.Parameter(torch.ones(3, 4, dtype=torch.float))
self.derived = nn.Buffer(torch.neg(self.param).detach().clone())
# This is a flag so we can test that the pack method was called
self.pack_called = nn.Buffer(torch.zeros(1, dtype=torch.long))
# This is a flag so we can test that the unpack method was called
self.unpack_called = nn.Buffer(torch.zeros(1, dtype=torch.long))
@torch.jit.script_method
def _pack(self):
self.pack_called.set_(torch.ones(1, dtype=torch.long))
self.derived.set_(torch.rand(1).detach())
@torch.jit.script_method
def _unpack(self):
self.unpack_called.set_(torch.ones(1, dtype=torch.long))
self.derived.set_(torch.neg(self.param).detach())
@torch.jit.script_method
def forward(self, x):
return x + self.derived
def test_pack_unpack_state(self):
sm = TestScript.DerivedStateModule()
x = torch.rand(3, 4)
torch.testing.assert_close(sm(x), x + torch.neg(torch.ones(3, 4, dtype=torch.float)))
# Test save path
self.assertFalse(sm.pack_called.item())
self.assertFalse(sm.unpack_called.item())
imported = self.getExportImportCopyWithPacking(sm)
# ensure pack was called before serialization
self.assertTrue(sm.pack_called.item())
# ensure unpack was called after serialization so as to leave the module in an initialized state
self.assertTrue(sm.unpack_called.item())
torch.testing.assert_close(sm.derived, torch.neg(sm.param))
# Test load paths
self.assertTrue(imported.unpack_called.item())
torch.testing.assert_close(imported(x), x + torch.neg(torch.ones(3, 4, dtype=torch.float)))
@unittest.skipIf(not TEST_MKL, "PyTorch is built without MKL support")
@unittest.skipIf(True, "Skipping while landing PR stack")
def test_torch_functional(self):
def stft(input, n_fft):
# type: (Tensor, int) -> Tensor
return torch.stft(input, n_fft, return_complex=True)
inps = (torch.randn(10), 7)
self.assertEqual(stft(*inps), torch.jit.script(stft)(*inps))
def istft(input, n_fft):
# type: (Tensor, int) -> Tensor
return torch.istft(input, n_fft)
inps2 = (stft(*inps), inps[1])
self.assertEqual(istft(*inps2), torch.jit.script(istft)(*inps2))
def lu_unpack(x):
A_LU, pivots = torch.linalg.lu_factor(x)
return torch.lu_unpack(A_LU, pivots)
for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3)):
a = torch.randn(*shape)
self.checkScript(lu_unpack, (a,))
def cdist_fn():
a = torch.tensor([[0.9041, 0.0196], [-0.3108, -2.4423], [-0.4821, 1.059]])
b = torch.tensor([[-2.1763, -0.4713], [-0.6986, 1.3702]])
return torch.cdist(a, b, compute_mode="use_mm_for_euclid_dist")
self.checkScript(cdist_fn, ())
def norm():
c = torch.tensor([[1, 2, 3], [-1, 1, 4]], dtype=torch.float)
return torch.norm(c, p="fro"), torch.norm(c, p="nuc"), torch.norm(c), torch.norm(c, p=.5)
self.checkScript(norm, ())
def torch_unique(dim: Optional[int]):
ten = torch.unique(torch.tensor([[1, 3], [2, 3]], dtype=torch.long))
a = torch.unique(ten, dim=dim)
b = torch.unique(ten, return_counts=True, dim=dim)
c = torch.unique(ten, return_inverse=True, dim=dim)
d = torch.unique(ten, return_counts=True, return_inverse=True, dim=dim)
return a, b, c, d
self.checkScript(torch_unique, (None,))
self.checkScript(torch_unique, (0,))
def torch_unique_consecutive(dim: Optional[int]):
ten = torch.unique(torch.tensor([[1, 3], [3, 2], [3, 2], [2, 3]], dtype=torch.long))
a = torch.unique_consecutive(ten, dim=dim)
b = torch.unique_consecutive(ten, return_counts=True, dim=dim)
c = torch.unique_consecutive(ten, return_inverse=True, dim=dim)
d = torch.unique_consecutive(ten, return_counts=True, return_inverse=True, dim=dim)
return a, b, c, d
self.checkScript(torch_unique_consecutive, (None,))
self.checkScript(torch_unique_consecutive, (0,))
def test_torch_functional_tensordot_int(self):
def tensordot_dims_int(a: torch.Tensor, b: torch.Tensor, dims: int):
return torch.tensordot(a, b, dims=dims)
a = torch.arange(120.).reshape(2, 3, 4, 5)
b = torch.arange(840.).reshape(4, 5, 6, 7)
dims = 2
self.checkScript(tensordot_dims_int, (a, b, dims))
for dims in [-1, 5]:
try:
tensordot_dims_int(a, b, dims)
except RuntimeError as error:
if dims < 0:
self.assertEqual(str(error), "tensordot expects dims >= 0, but got dims=" + str(dims))
if dims > min(a.dim(), b.dim()):
self.assertEqual(str(error), "tensordot expects dims < ndim_a or ndim_b, but got dims=" + str(dims))
def test_torch_functional_tensordot_tensor(self):
def tensordot_dims_tensor(a: torch.Tensor, b: torch.Tensor, dims: torch.Tensor):
return torch.tensordot(a, b, dims=dims)
a = torch.arange(120.).reshape(2, 3, 4, 5)
b = torch.arange(840.).reshape(4, 5, 6, 7)
dims = torch.tensor([2])
self.checkScript(tensordot_dims_tensor, (a, b, dims))
a = torch.arange(60.).reshape(3, 4, 5)
b = torch.arange(24.).reshape(4, 3, 2)
dims = torch.tensor([[1, 0], [0, 1]], dtype=torch.long)
self.checkScript(tensordot_dims_tensor, (a, b, dims))
def test_torch_functional_tensordot_list(self):
def tensordot_dims_list(a: torch.Tensor, b: torch.Tensor, dims: List[List[int]]):
return torch.tensordot(a, b, dims=dims)
a = torch.arange(60.).reshape(3, 4, 5)
b = torch.arange(24.).reshape(4, 3, 2)
dims = [[1, 0], [0, 1]]
self.checkScript(tensordot_dims_list, (a, b, dims))
def test_torch_functional_tensordot_tuple(self):
def tensordot_dims_tuple(a: torch.Tensor, b: torch.Tensor, dims: Tuple[List[int], List[int]]):
return torch.tensordot(a, b, dims=dims)
a = torch.arange(60.).reshape(3, 4, 5)
b = torch.arange(24.).reshape(4, 3, 2)
dims = ([1, 0], [0, 1])
self.checkScript(tensordot_dims_tuple, (a, b, dims))
def test_missing_getstate(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = 1
def forward(self, x):
return x * self.x
@torch.jit.export
def __setstate__(self, state):
self.x = state[0]
self.training = state[1]
with self.assertRaisesRegex(RuntimeError, "getstate"):
scripted = torch.jit.script(Foo())
def test_inlining_cleanup(self):
def foo(x):
return F.linear(x, x)
@torch.jit.script
def fee(x):
return foo(x)
# inlining optimizations should have cleaned up linear if statement
self.run_pass("inline", fee.graph)
FileCheck().check_not("prim::If").run(fee.graph)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_pack_unpack_nested(self):
class SubSubMod(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.buf = nn.Buffer(torch.ones(3, 4) * 3)
@torch.jit.script_method
def _pack(self):
self.buf.set_(torch.zeros(1))
@torch.jit.script_method
def _unpack(self):
self.buf.set_(torch.ones(3, 4) * 3)
@torch.jit.script_method
def forward(self, x):
return x + self.buf
class SubMod(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.buf = nn.Buffer(torch.ones(3, 4) * 2)
self.ssm = SubSubMod()
@torch.jit.script_method
def _pack(self):
self.buf.set_(torch.zeros(1))
@torch.jit.script_method
def _unpack(self):
self.buf.set_(torch.ones(3, 4) * 2)
@torch.jit.script_method
def forward(self, x):
return self.ssm(x + self.buf)
class Mod(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.submod = SubMod()
self.buf = nn.Buffer(torch.ones(3, 4) * 1)
@torch.jit.script_method
def _pack(self):
self.buf.set_(torch.zeros(1))
@torch.jit.script_method
def _unpack(self):
self.buf.set_(torch.ones(3, 4))
@torch.jit.script_method
def forward(self, x):
return self.submod(x + self.buf)
m = Mod()
torch.testing.assert_close(m(torch.zeros(3, 4)), torch.ones(3, 4) * 6)
m.apply(lambda s: s._pack())
torch.testing.assert_close(m(torch.zeros(3, 4)), torch.zeros(3, 4))
m.apply(lambda s: s._unpack())
torch.testing.assert_close(m(torch.zeros(3, 4)), torch.ones(3, 4) * 6)
def test_torch_any(self):
def fn(x):
return torch.any(x)
def fn1(x, dim: int):
return torch.any(x, dim)
self.checkScript(fn, (torch.randn(3, 4), ))
self.checkScript(fn, (torch.empty(3), ))
self.checkScript(fn, (torch.empty(1), ))
self.checkScript(fn, (torch.ones(3, 4),))
self.checkScript(fn, (torch.zeros(5, 7, 1),))
self.checkScript(fn1, (torch.empty(3, 4), -2))
self.checkScript(fn1, (torch.randn(3, 8), 1))
self.checkScript(fn1, (torch.zeros(3, 6, 9), -3))
self.checkScript(fn1, (torch.empty(5), 0))
def test_any(self):
def fn(x: List[int]):
return any(x)
def fn1(x: List[float]):
return any(x)
def fn2(x: List[bool]):
return any(x)
def fn3(x: List[str]):
return any(x)
self.checkScript(fn, ([0, 0, 0, 0], ))
self.checkScript(fn, ([0, 3, 0], ))
self.checkScript(fn, ([], ))
self.checkScript(fn1, ([1.0, 2.0, 3.0], ))
self.checkScript(fn1, ([0.0, 0.0, 0.0], ))
self.checkScript(fn1, ([0, 0, 0], ))
self.checkScript(fn1, ([], ))
self.checkScript(fn2, ([True, False, False], ))
self.checkScript(fn2, ([False, False, False], ))
self.checkScript(fn2, ([True, True, True, True], ))
self.checkScript(fn2, ([], ))
self.checkScript(fn3, (["", "", ""], ))
self.checkScript(fn3, (["", "", "", "-1"], ))
self.checkScript(fn3, ([], ))
def test_script_module_not_tuple(self):
class M(torch.jit.ScriptModule):
__constants__ = ['mods']
def __init__(self) -> None:
super().__init__()
self.mods = 1
@torch.jit.script_method
def forward(self, v):
for m in self.mods:
print(m)
return v
with self.assertRaisesRegex(RuntimeError, "'int' object is not iterable"):
M()
def test_attr_module_constants(self):
class M2(torch.jit.ScriptModule):
def __init__(self, mod_list):
super().__init__()
self.mods = mod_list
@torch.jit.script_method
def forward(self, x):
return self.mods.forward(x)
with torch.jit.optimized_execution(False):
m = M2(nn.Sequential(nn.ReLU()))
self.assertExportImportModule(m, (torch.randn(2, 2),))
def test_script_sequential_for(self):
class Sub(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.mods = nn.Sequential(Sub(), Sub(), Sub())
@torch.jit.script_method
def forward(self, v):
for m in self.mods:
v = m(v)
return v
@torch.jit.script_method
def forward2(self, v):
return self.mods(v)
with torch.jit.optimized_execution(False):
i = torch.empty(2)
m = M()
o = m(i)
v = i
for sub in m.mods._modules.values():
v = sub(v)
self.assertEqual(o, v)
o2 = m.forward2(i)
self.assertEqual(o2, v)
def test_script_sequential_sliced_iteration(self):
class seq_mod(nn.Module):
def __init__(self) -> None:
super().__init__()
self.layers = [nn.ReLU(), nn.ReLU(), nn.ReLU()]
self.layers = nn.Sequential(*self.layers)
def forward(self, input):
x = self.layers[0].forward(input)
for layer in self.layers[1:3]:
x = layer.forward(x)
for layer in self.layers[2:]:
x = layer.forward(x)
return x
seq = seq_mod()
self.checkModule(seq, [torch.tensor([-2, 1, -1, 2])])
def test_script_sequential_orderdict(self):
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.mods = nn.Sequential(OrderedDict([
("conv", nn.Conv2d(1, 20, 5)),
("relu", nn.ReLU())
]))
@torch.jit.script_method
def forward(self, input):
return self.mods(input)
m = M()
self.assertTrue('mods.conv.weight' in m.state_dict())
def test_script_sequential_multi_output_fail(self):
class Sub(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class ReturnMulti(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x, x, x
class HaveSequential(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.someseq = nn.Sequential(
Sub(),
ReturnMulti(),
Sub()
)
@torch.jit.script_method
def forward(self, x):
return self.someseq(x)
with self.assertRaisesRegex(RuntimeError, "(Tensor, Tensor, Tensor)"):
with torch.jit.optimized_execution(False):
hs = HaveSequential()
i = torch.empty(2)
hs(i)
@_tmp_donotuse_dont_inline_everything
def test_script_sequential_in_mod_list(self):
class Sub(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.mods = nn.ModuleList([Sub(), nn.Sequential(Sub(), nn.Sequential(Sub(), Sub()), Sub())])
@torch.jit.script_method
def forward(self, v):
for mod in self.mods:
v = mod(v)
return v
m = M()
graph = str(m.graph)
self.assertTrue(graph.count("prim::CallMethod") == 2)
self.assertTrue("python" not in graph)
@_tmp_donotuse_dont_inline_everything
def test_script_nested_mod_list(self):
class Sub(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.mods = nn.ModuleList([nn.ModuleList([Sub()]), nn.Sequential(Sub()), nn.ModuleList([Sub(), Sub()])])
@torch.jit.script_method
def forward(self, v):
for mod in self.mods:
for m in mod:
v = m(v)
return v
m = M()
graph = str(m.graph)
self.assertTrue(graph.count("prim::CallMethod") == 4)
self.assertTrue("python" not in graph)
def test_constant_as_attr(self):
class M(torch.jit.ScriptModule):
__constants__ = ['dim']
def __init__(self) -> None:
super().__init__()
self.dim = 1
@torch.jit.script_method
def forward(self, v):
return torch.cat([v, v, v], dim=self.dim)
v = torch.zeros(1, 1)
with torch.jit.optimized_execution(False):
self.assertEqual(torch.cat([v, v, v], dim=1), M()(v))
class StarTestSumStarred(torch.nn.Module):
def __init__(self) -> None:
super(TestScript.StarTestSumStarred, self).__init__()
def forward(self, *inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output
class StarTestReturnThree(torch.nn.Module):
def __init__(self) -> None:
super(TestScript.StarTestReturnThree, self).__init__()
def forward(self, rep):
return rep, rep, rep
def test_script_star_expr(self):
class M2(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.m = torch.jit.trace(TestScript.StarTestSumStarred(),
(torch.ones(4, 3), torch.ones(4, 3), torch.ones(4, 3)))
self.g = torch.jit.trace(TestScript.StarTestReturnThree(), torch.ones(4, 3))
@torch.jit.script_method
def forward(self, rep):
tup = self.g(rep)
return self.m(*tup)
m = M2()
self.assertEqual(m(torch.zeros(4, 3)), 3 * torch.zeros(4, 3))
def test_script_star_expr_string(self):
class M2(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.m = torch.jit.trace(TestScript.StarTestSumStarred(),
(torch.ones(4, 3), torch.ones(4, 3), torch.ones(4, 3)))
self.g = torch.jit.trace(TestScript.StarTestReturnThree(), torch.ones(4, 3))
self.define('''
def forward(self, rep):
tup = self.g(rep)
return self.m(*tup)
''')
m = M2()
self.assertEqual(m(torch.zeros(4, 3)), 3 * torch.zeros(4, 3))
class StarTestSumAndReturnThree(torch.nn.Module):
def __init__(self) -> None:
super(TestScript.StarTestSumAndReturnThree, self).__init__()
def forward(self, *inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output, output, output
def test_script_star_assign(self):
class M2(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.g = torch.jit.trace(TestScript.StarTestSumAndReturnThree(), torch.ones(4, 3))
self.define('''
def forward(self, rep):
head, *tail = self.g(rep)
return head
''')
m = M2()
self.assertEqual(m(torch.zeros(4, 3)), 3 * torch.zeros(4, 3))
def test_script_module_star_assign2(self):
class M2(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.g = torch.jit.trace(
TestScript.StarTestSumAndReturnThree(),
(torch.ones(4, 3), torch.ones(4, 3), torch.ones(4, 3)),
_force_outplace=True)
self.define('''
def forward(self, rep):
*head, tail = self.g(rep, rep, rep)
return tail
''')
m = M2()
self.assertEqual(m(torch.ones(4, 3)), 3 * torch.ones(4, 3))
def test_script_module_star_assign2_inplace(self):
class M2(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.g = torch.jit.trace(
TestScript.StarTestSumAndReturnThree(),
(torch.ones(4, 3), torch.ones(4, 3), torch.ones(4, 3)),
_force_outplace=False)
self.define('''
def forward(self, rep):
*head, tail = self.g(rep, rep, rep)
return tail
''')
m = M2()
# since forward() makes three aliases to the input `rep` before passing
# it to StarTestSumAndReturnThree(), in-place behavior will be different
# than the above out of place.
self.assertEqual(m(torch.ones(4, 3)), 4 * torch.ones(4, 3))
def test_script_module_star_assign_fail_pythonop(self):
with self.assertRaisesRegex(RuntimeError, "cannot be used as a tuple"):
class M2(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
@torch.jit.ignore
def myfunc():
return torch.zeros(1, 2, 3), torch.zeros(1, 2, 3)
self.define('''
def forward(self, rep):
a, *b = myfunc()
return a
''')
m = M2()
m(torch.zeros(4, 3))
def test_script_module_star_assign_fail_builtin(self):
with self.assertRaisesRegex(RuntimeError, "cannot be used as a tuple"):
class M2(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.define('''
def forward(self, rep):
a, *b = torch.neg(rep)
return a
''')
m = M2()
m(torch.zeros(4, 3))
def test_script_pack_padded_sequence(self):
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
def pack_padded_pad_packed_script(x, seq_lens):
x = pack_padded_sequence(x, seq_lens)
x, lengths = pad_packed_sequence(x)
return x, lengths
T, B, C = 3, 5, 7
x = torch.ones((T, B, C))
seq_lens = torch.tensor([3, 3, 2, 2, 1])
# set padding value so we can test equivalence
for b in range(B):
if seq_lens[b] < T:
x[seq_lens[b]:, b, :] = 0
eager_seq, eager_lengths = pack_padded_pad_packed_script(x, seq_lens)
with torch._jit_internal._disable_emit_hooks():
scripted_pack_padded_seq = torch.jit.script(pack_padded_pad_packed_script)
script_seq, script_lengths = scripted_pack_padded_seq(x, seq_lens)
self.assertEqual(eager_seq, script_seq)
self.assertEqual(eager_lengths, script_lengths)
class ExperimentalLSTM(torch.nn.Module):
def __init__(self, input_dim, hidden_dim):
super().__init__()
def forward(self, input):
# type: (Tensor)
packed = pack_padded_sequence(
input=input, lengths=torch.tensor([1, 2]), enforce_sorted=False
)
output, lengths = pad_packed_sequence(
sequence=packed, total_length=2
)
# lengths is flipped, so is output
return output[0]
lstm = ExperimentalLSTM(input_dim=2, hidden_dim=2)
with torch._jit_internal._disable_emit_hooks():
self.checkModule(lstm, [torch.ones(2, 2)])
def test_script_pad_sequence_pack_sequence(self):
from torch.nn.utils.rnn import pad_sequence, pack_sequence, pad_packed_sequence
def pad_sequence_func(tensor_list, batch_first=False, padding_value=0.0, padding_side="right"):
# type: (List[Tensor], bool, float, str) -> Tensor
return pad_sequence(tensor_list, batch_first, padding_value, padding_side)
def pack_sequence_func(tensor_list, enforce_sorted=True):
# type: (List[Tensor], bool) -> Tensor
return pad_packed_sequence(pack_sequence(tensor_list, enforce_sorted))[0]
ones3 = torch.ones(3, 5)
ones4 = torch.ones(4, 5)
ones5 = torch.ones(5, 5)
tensor1 = torch.tensor([1, 2, 3])
tensor2 = torch.tensor([4, 5])
tensor3 = torch.tensor([6])
with torch._jit_internal._disable_emit_hooks():
self.checkScript(pad_sequence_func,
([ones3, ones4, ones5],))
self.checkScript(pad_sequence_func,
([ones3, ones4, ones5], True))
self.checkScript(pad_sequence_func,
([ones3, ones4, ones5], True, 2.5))
self.checkScript(pad_sequence_func,
([ones3, ones4, ones5], True, 2.5, "left"))
self.checkScript(pad_sequence_func,
([ones3, ones4, ones5], False, 2.5, "left"))
self.checkScript(pack_sequence_func,
([tensor1, tensor2, tensor3],))
self.checkScript(pack_sequence_func,
([tensor1, tensor2, tensor3], False))
def test_script_get_tracing_state(self):
def test_if_tracing(x):
if torch._C._get_tracing_state():
return x + 1
else:
return x - 1
inp = torch.randn(3, 3)
self.checkScript(test_if_tracing, (inp,))
def test_script_is_tracing(self):
def test_is_tracing(x):
if torch.jit.is_tracing():
return x + 1
else:
return x - 1
inp = torch.randn(3, 3)
self.checkScript(test_is_tracing, (inp,))
def test_is_scripting(self):
def foo():
return torch.jit.is_scripting()
self.assertFalse(foo())
scripted = torch.jit.script(foo)
self.assertTrue(scripted())
def test_comment_ignore_indent(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
# useless comment that is not indented correctly # noqa: E115
super().__init__()
def forward(self):
return 5
# should compile without an error
self.checkModule(Model(), ())
def test_script_outputs(self):
with self.assertRaisesRegex(RuntimeError, "cannot be used as a tuple"):
@torch.jit.script
def foo(a):
c, d = a + a
return c + d
@torch.jit.script
def return3():
return 1, 2, 3
with self.assertRaisesRegex(RuntimeError, "too many values to unpack"):
@torch.jit.script
def bind2():
a, b = return3()
print(a)
print(b)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_script_get_device_cuda(self):
@torch.jit.script
def foo(a):
return a.get_device()
v = torch.randn(1, device='cuda')
self.assertEqual(foo(v), 0)
def test_script_chunk(self):
@torch.jit.script
def foo(a):
b, c = torch.chunk(a, dim=0, chunks=2)
return b
v = torch.rand(10, 3)
self.assertEqual(torch.chunk(v, dim=0, chunks=2)[0], foo(v))
def test_script_copy(self):
class M(torch.nn.Module):
__annotations__ = {
"val": Optional[torch.Tensor]
}
def __init__(self) -> None:
super().__init__()
self.val = None
def some_method(self):
return 3
def forward(self, x):
# type: (Tensor) -> Tensor
self.val = x + self.some_method()
return x
m = torch.jit.script(M())
# test copy
copy.copy(m)
copy.deepcopy(m)
def test_script_forward_method_replacement(self):
# We want to support the use case of attaching a different `forward` method
class LowLevelModule(torch.nn.Module):
def forward(self, input: torch.Tensor):
# Generic forward dispatch
return self.forward_pytorch(input) * 2
class TestModule(LowLevelModule):
def __init__(self) -> None:
super().__init__()
# Replace the forward method
self.forward = types.MethodType(LowLevelModule.forward, self)
def forward_pytorch(self, input: torch.Tensor):
return torch.tensor(123)
def forward(self, input: torch.Tensor):
# Should not use this forward method
raise AssertionError("This method should not be used")
return self.forward_pytorch(input)
m = TestModule()
self.assertEqual(m(torch.tensor(1)), torch.tensor(246))
m_scripted = torch.jit.script(m)
self.assertEqual(m_scripted(torch.tensor(1)), torch.tensor(246))
def test_python_call_non_tensor(self):
def foo(a, b, c):
# type: (Tensor, int, Tuple[Tensor, int]) -> Tuple[int, Tensor]
d, e = c
return b + e, a + d
@torch.jit.script
def bar():
x = torch.ones(3, 4)
a, b = foo(x, 3, (x, 3))
return a, b
self.assertEqual((6, torch.ones(3, 4) + 1), bar())
def test_python_call_non_tensor_wrong(self):
with self.assertRaisesRegex(RuntimeError, r"but instead got value of type tuple"):
@torch.jit.ignore
def foo():
# type: () -> Tensor
return ((3, 4),) # noqa: T484
@torch.jit.script
def bar():
return foo()
bar()
def test_if_different_type(self):
with self.assertRaisesRegex(RuntimeError, "c0 is set to type "
"int in the true branch and type "
"float in the false branch"):
@torch.jit.script
def diff_type_used():
if 1 == 2:
c0 = 1
else:
c0 = 1.0
return c0
with self.assertRaisesRegex(RuntimeError, "Variable 'c0' previously had type float"):
@torch.jit.script
def diff_existing_type(x):
c0 = 1.0
if 1 == 2:
c0 = 1
print(x)
return x
@torch.jit.script
def diff_type_unused():
if 1 == 1:
c0 = 1
print(c0)
else:
c0 = 1.0
print(c0)
return 1
def test_if_not_defined_error(self):
with self.assertRaisesRegex(RuntimeError, "c0 is not defined in the false branch"):
@torch.jit.script
def test():
if 1 == 1:
c0 = 1
return c0
with self.assertRaisesRegex(RuntimeError, "c0 is not defined in the true branch"):
@torch.jit.script
def test2():
if 1 == 1:
pass
else:
c0 = 1
return c0
def test_if_list_cat(self):
# testing that different length lists don't throw error on cat in shape prop
@torch.jit.script
def test_list(x):
if bool(x.sum() < 1):
c = [x, x]
else:
c = [x, x, x]
return torch.cat(c)
b = torch.zeros(2, 4)
_propagate_shapes(test_list.graph, (b,), False)
def test_if_supertype(self):
@torch.jit.script
def tensor_unifying(x, y, z):
# testing dynamic is appropriately set for y and z
if bool(x):
x, y, z = x + 1, y, z # noqa: PLW0127
else:
x, y, z = x + 1, x, y
return x, y, z
a = torch.zeros(2, 2, dtype=torch.float)
b = torch.zeros(2, 4, dtype=torch.long)
c = torch.zeros(2, 4, dtype=torch.float)
graph = _propagate_shapes(tensor_unifying.graph, (a, b, c), False)
if_outputs = list(graph.findNode("prim::If").outputs())
self.assertTrue(if_outputs[0].type().str() == "Float(*, *, requires_grad=0, device=cpu)")
self.assertTrue(if_outputs[1].type().str() == "Tensor(*, *, requires_grad=0, device=cpu)")
self.assertTrue(if_outputs[2].type().str() == "Tensor(*, *, requires_grad=0, device=cpu)")
def test_list_unify(self):
# allowing a unififed int?[] would cause a runtime error b/c
# the index operation expects int?[] to be a generic list,
# but in the true branch the IValue will be a int list
with self.assertRaisesRegex(RuntimeError, "int[] in the true branch and type None[]"):
@torch.jit.script
def list_optional_fails(x):
# type: (bool) -> Optional[int]
if x:
y = [1]
else:
y = [None] # noqa: T484
return y[0]
@torch.jit.script
def list_tensors(x):
# type: (bool) -> Tuple[Tensor, List[Tensor]]
if x:
a = torch.zeros([1, 1])
y = [a]
else:
a = torch.zeros([1, 2])
y = [a]
return a, y
self.run_pass('constant_propagation', list_tensors.graph)
m = self.createFunctionFromGraph(list_tensors.graph)
# testing that tensor type of lists is unified
self.getExportImportCopy(m)
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@_inline_everything
def test_import_constants_not_specialized(self):
class Mod(torch.nn.Module):
def forward(self, x):
return torch.cat(2 * [x], dim=0)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self, mod):
super().__init__()
x = torch.zeros(1, 3)
mod_fn = lambda : mod(x) # noqa: E731
self.mod = torch.jit.trace(mod_fn, ())
@torch.jit.script_method
def forward(self):
return self.mod()
cm = ScriptMod(Mod())
# specialized tensor in graph
FileCheck().check("Float(1, 3, strides=[3, 1], requires_grad=0, device=cpu)").run(cm.forward.graph)
buffer = io.BytesIO()
torch.jit.save(cm, buffer)
buffer.seek(0)
# when tensor is loaded as constant it isn't specialized
cm_load = torch.jit.load(buffer)
FileCheck().check_not("Float(1, 3)").run(cm_load.forward.graph)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_type_annotations_repeated_list(self):
@torch.jit.script
def float_fn(x, y):
# type: (float, BroadcastingList3[float]) -> List[float]
return y
self.assertEqual(float_fn(2.0, 1.0), float_fn(2.0, [1.0, 1.0, 1.0]))
self.assertEqual(float_fn(2.0, 1.0), float_fn(2.0, (1.0, 1.0, 1.0)))
@torch.jit.script
def float_fn_call():
print(float_fn(1.0, 1.0))
print(float_fn(1.0, (1.0, 1.0, 1.0)))
@torch.jit.script
def int_fn(x):
# type: (BroadcastingList3[int]) -> List[int]
return x
self.assertEqual(int_fn(1), int_fn([1, 1, 1]))
self.assertEqual(int_fn(1), int_fn((1, 1, 1)))
@torch.jit.script
def int_fn_call():
print(int_fn(1))
print(int_fn((1, 1, 1)))
with self.assertRaisesRegex(RuntimeError, "must be a positive integer:"):
@torch.jit.script # noqa: T484
def fn(x):
# type: (BroadcastingListx[int]) -> List[int] # noqa: T484
return x
# using CU so that flake8 error on int[2] is not raised (noqa not working)
with self.assertRaisesRegex(RuntimeError, "Unknown type constructor"):
cu = torch.jit.CompilationUnit('''
def nested(x, y):
# type: (int, Tuple[int, int[2]]) -> List[int]
return x # noqa: T484
''')
@torch.jit.script
def f(x: BroadcastingList2[int]):
return x
out = f(1)
self.assertTrue(isinstance(out[0], int))
self.assertEqual(out, [1, 1])
def test_ntuple_builtins(self):
from torch.nn.modules.utils import _single, _pair, _triple, _quadruple
def test_ints():
return _single(1), _pair(2), _triple(3), _quadruple(4)
def test_floats():
return _single(1), _pair(2.1), _triple(3.1), _quadruple(4.1)
self.checkScript(test_ints, ())
self.checkScript(test_floats, ())
def test_embedding_renorm_grad_error(self):
# Testing that the builtin call to embedding_renorm_ correctly throws
# Error when .backward() is called on its input
def embedding_norm(input, embedding_matrix, max_norm):
F.embedding(input, embedding_matrix, max_norm=0.01)
@torch.jit.script
def embedding_norm_script(input, embedding_matrix, max_norm):
# type: (Tensor, Tensor, float) -> None
F.embedding(input, embedding_matrix, max_norm=0.01)
for _ in [embedding_norm, embedding_norm_script]:
input = torch.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])
embedding_matrix = torch.randn(10, 3)
var1 = torch.randn(10, 3, requires_grad=True)
var2 = var1.detach().requires_grad_()
output1 = var1 * embedding_matrix
output2 = var2 * embedding_matrix
output1.sum().backward()
ignore = F.embedding(input, embedding_matrix, max_norm=0.01)
with self.assertRaisesRegex(RuntimeError, "modified"):
output2.sum().backward()
def test_type_annotations(self):
def fn(x, y):
# type: (Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor]
return x, x * 2, x * 3
with self.assertRaisesRegex(RuntimeError, r"need 4 values .* found only 3"):
@torch.jit.script
def script_fn(x):
x, y, z, w = fn(x, x)
with self.assertRaisesRegex(RuntimeError, r"too many values .* need 2 but found 3"):
@torch.jit.script
def script_fn2(x):
x, y = fn(x, x)
def fn_unpack(x):
y, z, w = fn(x, x)
return y
def fn_index(x):
q = fn(x, x)
return x
def fn_string(str, strpair):
# type: (str, Tuple[str, str]) -> Tuple[str, int, str, str]
str1, str2 = strpair
return str, 2, str1, str2
x = torch.ones(2, 2)
self.checkScript(fn_unpack, (x,), optimize=True)
self.checkScript(fn_index, (x,), optimize=True)
self.checkScript(fn_string, ("1", ("3", "4")), optimize=True)
def test_type_annotations_varargs(self):
@torch.jit.ignore
def fn_varargs(x, *args):
return args[0] if args else x
def fn1(x, y, z):
return fn_varargs(x)
def fn2(x, y, z):
return fn_varargs(x, y)
def fn3(x, y, z):
return fn_varargs(x, y, z)
x, y, z = (torch.randn(2, 2) for _ in range(3))
self.checkScript(fn1, (x, y, z), optimize=True)
self.checkScript(fn2, (x, y, z), optimize=True)
self.checkScript(fn3, (x, y, z), optimize=True)
def test_type_annotation_py3(self):
code = dedent("""
import torch
from torch import Tensor
from typing import Tuple
def fn(x : torch.Tensor, y : Tensor, z) -> Tuple[Tensor, Tensor, Tensor]:
return (x, y + z, z)
""")
with tempfile.TemporaryDirectory() as tmp_dir:
script_path = os.path.join(tmp_dir, 'script.py')
with open(script_path, 'w') as f:
f.write(code)
fn = get_fn('test_type_annotation_py3', script_path)
fn = torch.jit.ignore(fn)
with self.assertRaisesRegex(RuntimeError, r"Expected a value of type 'Tensor' for argument"
r" 'x' but instead found type 'Tuple\[Tensor,"):
@torch.jit.script
def bad_fn(x):
x, y = fn((x, x), x, x)
return y
with self.assertRaisesRegex(RuntimeError, r"too many values .* need 2 but found 3"):
@torch.jit.script
def bad_fn2(x):
x, y = fn(x, x, x)
return y
with self.assertRaisesRegex(RuntimeError, r"need 4 values .* found only 3"):
@torch.jit.script
def bad_fn3(x):
x, y, z, w = fn(x, x, x)
return y
def good_fn(x):
y, z, w = fn(x, x, x)
return y, z, w
self.checkScript(good_fn, (torch.ones(2, 2),), optimize=True)
def test_type_annotation_module(self):
class BaseModule(torch.jit.ScriptModule):
@torch.jit.ignore
def foo(self, x):
# type: (Tensor) -> Tensor
return x + 1
@torch.jit.ignore
def bar(self, x, y):
# type: (Tensor, Tensor) -> Tuple[Tensor, Tensor]
return x + y, y
@torch.jit.ignore
def baz(self, x, y):
return x
class ModuleTooMany(BaseModule):
@torch.jit.script_method
def method(self, x):
return self.foo(x, x)
class ModuleTooFew(BaseModule):
@torch.jit.script_method
def method(self, x):
return self.bar(x)
class ModuleTooManyAssign(BaseModule):
@torch.jit.script_method
def method(self, x):
y, z, w = self.bar(x, x)
return x
class ModuleDefault(BaseModule):
@torch.jit.script_method
def method(self, x):
y = self.baz(x)
return x
with self.assertRaisesRegex(RuntimeError, "Expected at most 2 arguments but found 3"):
ModuleTooMany()
with self.assertRaisesRegex(RuntimeError, "Argument y not provided"):
ModuleTooFew()
with self.assertRaisesRegex(RuntimeError, "need 3 values .* found only 2"):
ModuleTooManyAssign()
with self.assertRaisesRegex(RuntimeError, "Argument y not provided."):
ModuleDefault()
def test_type_inferred_from_empty_annotation(self):
"""
Test that the type inferred from an empty or missing annotation is Torch.Tensor with `inferred=true`
"""
@torch.jit.script
def fn(x):
return x
graph = fn.graph
n = next(graph.inputs())
self.assertTrue(n.type() == torch._C.TensorType.getInferred())
with self.assertRaisesRegex(RuntimeError, "Inferred 'x' to be of type 'Tensor"):
fn("1")
def test_script_define_order(self):
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def call_foo(self, input):
return self.foo(input)
@torch.jit.script_method
def foo(self, input):
return input + 1
m = M()
self.assertEqual(2, m.call_foo(torch.ones((), dtype=torch.int64)))
def test_script_define_order_recursive_fail(self):
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def call_foo(self, input):
return self.foo(input)
@torch.jit.script_method
def foo(self, input):
self.call_foo(input)
with self.assertRaisesRegex(RuntimeError, 'called recursively'):
M()
def test_script_kwargs_fn_call(self):
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def call_foo(self, input):
return self.foo(input=input, bar=1)
@torch.jit.script_method
def foo(self, bar, input):
# type: (int, Tensor) -> Tensor
return input + bar
m = M()
self.assertEqual(2, m.call_foo(torch.ones((), dtype=torch.int64)))
def test_if_define(self):
@torch.jit.script
def foo(a):
if bool(a == 0):
b = 1
else:
b = 0
return b + 1
@torch.jit.script
def foo2(a):
b = 0
if bool(a == 0):
b = 1
return b + 1
@torch.jit.script
def foo3(a):
b = 1
if bool(a == 0):
c = 4
else:
b = 0
return b + 1
a = torch.ones(1, dtype=torch.long)
b = torch.zeros(1, dtype=torch.long)
self.assertEqual(1, foo(a))
self.assertEqual(2, foo(b))
self.assertEqual(1, foo2(a))
self.assertEqual(2, foo2(b))
self.assertEqual(1, foo3(a))
self.assertEqual(2, foo3(b))
def test_script_module_export_submodule(self):
class M1(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M2(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
# test submodule
self.sub = M1()
self.weight = nn.Parameter(torch.randn(2, 3))
self.bias = nn.Parameter(torch.randn(2))
self.define("""
def hi(self, a):
return self.weight.mm(a)
""")
@torch.jit.script_method
def doit(self, input):
return self.weight.mm(input)
@torch.jit.script_method
def doit2(self, input):
return self.weight.mm(input)
@torch.jit.script_method
def doit3(self, input):
return input + torch.ones([1], dtype=torch.double)
@torch.jit.script_method
def forward(self, input):
a = self.doit(input)
b = self.doit2(input)
c = self.hi(input)
return a + b + self.bias + c
with torch.jit.optimized_execution(False):
m_orig = M2()
m_import = self.getExportImportCopy(m_orig)
input = torch.randn(3, 2)
self.assertEqual(m_orig.doit(input), m_import.doit(input))
self.assertEqual(m_orig.hi(input), m_import.hi(input))
self.assertEqual(m_orig.doit3(input), m_import.doit3(input))
self.assertEqual(m_orig.forward(input), m_import.forward(input))
@slowTest
def test_compile_module_with_constant(self):
class Double(nn.Module):
def __init__(self, downsample=None):
super().__init__()
def forward(self, input):
return input * 2
class Mod(nn.Module):
__constants__ = ['downsample']
def __init__(self, downsample=None):
super().__init__()
self.downsample = downsample
def forward(self, input):
if self.downsample is not None:
return self.downsample(input)
return input
none_mod = torch.jit.script(Mod(None))
double_mod = torch.jit.script(Mod(Double()))
self.assertEqual(none_mod(torch.tensor(1)), torch.tensor(1))
self.assertEqual(double_mod(torch.tensor(1)), torch.tensor(1) * 2)
def test_device_kwarg(self):
from torch import device
def f():
return device(type='cuda'), torch.device(type='cpu')
self.checkScript(f, ())
def test_script_module_export_tensor_type(self):
class M(torch.jit.ScriptModule):
def __init__(self, type):
super().__init__()
self.param = torch.nn.Parameter(torch.zeros((5, 5), dtype=type).random_())
@torch.jit.script_method
def foo(self):
return self.param
with torch.jit.optimized_execution(False):
for type in [torch.float, torch.double]:
m_orig = M(type)
m_import = self.getExportImportCopy(m_orig)
# check to make sure the storage wasn't resized
self.assertTrue(m_orig.param.storage().size() == 25)
self.assertEqual(m_orig.foo(), m_import.foo())
self.assertTrue(m_orig.foo().dtype == m_import.foo().dtype)
@unittest.skipIf(not RUN_CUDA, "testing cuda tensors require CUDA")
def test_script_module_export_tensor_cuda(self):
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.zeros((5, 5), device='cuda:0').random_())
@torch.jit.script_method
def foo(self):
return self.param
m_orig = M()
m_import = self.getExportImportCopy(m_orig)
# check to make sure the storage wasn't resized
self.assertTrue(m_orig.param.storage().size() == 25)
self.assertTrue(m_import.foo().device == torch.device('cuda:0'))
self.assertEqual(m_orig.foo(), m_import.foo())
self.assertTrue(m_orig.foo().dtype == m_import.foo().dtype)
def test_script_module_export_blocks(self):
class M(torch.jit.ScriptModule):
def __init__(self, n, m):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(n, m))
@torch.jit.script_method
def forward(self, input):
if bool(input.sum() > 0):
output = self.weight.mv(input)
else:
output = self.weight + input
return output
m_orig = M(200, 200)
m_import = self.getExportImportCopy(m_orig)
t = torch.rand(200)
self.assertEqual(m_orig(t), m_import(t))
def test_script_module_export_shared_storage(self):
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.param1 = torch.nn.Parameter(torch.rand(5, 5))
self.param2 = torch.nn.Parameter(self.param1[3])
self.param3 = torch.nn.Parameter(torch.rand(5, 5))
self.param4 = torch.nn.Parameter(torch.rand(11, 5)[1:6])
@torch.jit.script_method
def foo(self):
return self.param1 + self.param2 + self.param3 + self.param4
with torch.jit.optimized_execution(False):
m_orig = M()
m_import = self.getExportImportCopy(m_orig)
self.assertEqual(m_orig.foo(), m_import.foo())
self.assertTrue(m_import.param1.storage().data_ptr() == m_import.param2.storage().data_ptr())
self.assertTrue(m_import.param1.storage().data_ptr() != m_import.param3.storage().data_ptr())
def test_sequential_intermediary_types(self):
class A(torch.nn.Module):
def forward(self, x):
return x + 3
class B(torch.nn.Module):
def forward(self, x):
return {"1": x}
class C(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Sequential(A(), B())
def forward(self, x):
return self.foo(x)
self.checkModule(C(), (torch.tensor(1),))
def test_ellipsis_const_mid(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, Ellipsis, 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_const_mid_select(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, Ellipsis, 4, 4, 4:8, 2].size()
dummy = torch.zeros(8, 8, 8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_const_start(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[Ellipsis, 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_const_end(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[0:4, 2, Ellipsis].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_mid(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, ..., 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_mid_select(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, ..., 4, 4, 4:8, 2].size()
dummy = torch.zeros(8, 8, 8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_start(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[..., 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_end(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[0:4, 2, ...].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_torch_manual_seed(self):
with freeze_rng_state():
def test():
torch.manual_seed(2)
return torch.rand(1)
script = torch.jit.script(test)
self.assertEqual(test(), script())
graph = script.graph_for()
FileCheck().check("aten::manual_seed").run(graph)
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
def test_index_select_shape_prop(self):
@torch.jit.script
def foo(x, y):
return torch.index_select(x, index=y, dim=1)
a = torch.zeros(2, 2)
b = torch.zeros(4, dtype=torch.long)
torch._C._jit_pass_complete_shape_analysis(foo.graph, (a, b), False)
FileCheck().check("Float(2, 4, strides=[4, 1], requires_grad=0, device=cpu)").run(str(foo.graph))
def test_shape_analysis_loop(self):
def foo(a, b, x):
c = a
# on the first iteration of the loop it appears that
# c should have a expand to the size of b
# but on the second+ iterations, there is no broadcast and the
# sizes are different.
# previously this would cause the compiler to (1) enter an infinite
# loop trying to compute the shape, and (2) insert invalid
# broadcasts.
# this test ensure we don't regress on these issues
for _ in range(2):
a = c + b
c = x
b = x
return a
self.checkScript(foo, (torch.zeros(1), torch.zeros(4), torch.zeros(5)), optimize=False)
def test_intlist_args(self):
def func_1(x):
return torch.nn.functional.adaptive_avg_pool1d(x, 1)
def func_2(x):
return torch.nn.functional.adaptive_avg_pool1d(x, output_size=1)
def func_3(x):
return torch.nn.functional.adaptive_avg_pool1d(x, output_size=[1])
x = torch.randn(8, 8, 8)
self.checkScript(func_1, [x], optimize=True)
self.checkScript(func_2, [x], optimize=True)
self.checkScript(func_3, [x], optimize=True)
def test_wrong_implicit_expand(self):
@_trace(torch.zeros(3), torch.zeros(1))
def foo(a, b):
return a + b
a = torch.rand(4)
b = torch.rand(4)
self.assertEqual(a + b, foo(a, b))
def test_builtin_args_fails(self):
with self.assertRaisesRegex(RuntimeError, 'Argument self not provided'):
@torch.jit.script
def f1(a):
torch.sum(foo=4)
with self.assertRaisesRegex(RuntimeError, 'specified twice'):
@torch.jit.script
def f2(a):
torch.sum(a, self=a)
with self.assertRaisesRegex(RuntimeError, 'not provided'):
@torch.jit.script
def f3(a):
torch.sum(dim=4)
with self.assertRaisesRegex(RuntimeError, 'for argument \'tensors\' but instead found type \'Tensor'):
@torch.jit.script
def f4(a):
torch.cat(a)
with self.assertRaisesRegex(RuntimeError, r'argument \'tensors\' but instead found type \'List\[int\]'):
@torch.jit.script
def f5(a):
torch.cat([3])
with self.assertRaisesRegex(RuntimeError, r'Expected a value of'
r' type \'List\[int\]\' for argument'
r' \'size\' but instead found type '
r'\'List\[Union\[List\[int\], int\]\]'):
@torch.jit.script
def f6(a):
a.expand(size=[3, [4]])
def test_builtin_args(self):
def t0(a):
# default arg dim
return torch.cat([a, a])
self.checkScript(t0, (torch.zeros(1, 1),))
def t1(a):
# keywords out of order
return torch.cat(dim=1, tensors=[a, a])
self.checkScript(t1, (torch.zeros(1, 1, 2),))
def t2(a):
# mix const/non-const attributes
if 1 == 1:
b = 1
else:
b = 0
return torch.sum(a, dim=b, keepdim=False)
self.checkScript(t2, (torch.zeros(1, 1, 2),))
def test_parser_type_annotations(self):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[Tuple[Tensor, Tensor], Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
self.assertExpected(str(cu.foo.schema))
def test_parser_type_annotations_comment(self):
cu = torch.jit.CompilationUnit('''
def foo(x, y):
# type: (Tensor, Tuple[Tuple[Tensor, Tensor], Tensor]) -> Tuple[Tensor, Tensor]
return x, x
''')
self.assertExpected(str(cu.foo.schema))
def test_parser_type_annotations_unknown_type(self):
with self.assertRaisesRegex(RuntimeError, "Unknown type name 'Foo'"):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[Tuple[Foo, Tensor], Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_parser_type_annotations_subscript_non_ident(self):
with self.assertRaisesRegex(RuntimeError, r'Subscripted type must be a type identifier'):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[Tensor, Tensor][Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_parser_type_annotations_subscript_tensor(self):
with self.assertRaisesRegex(RuntimeError, r'Unknown type constructor Tensor'):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tensor[Tensor, Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_parser_type_annotations_incompatible_expression(self):
with self.assertRaisesRegex(RuntimeError, r'Expression of type \+ cannot be used in a type expression'):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[3 + 4, Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_gather_dynamic_index(self):
def t(x):
gather1 = x[0]
idx = 0 + 1
gather2 = x[idx]
return gather1 + gather2
self.checkScript(t, (torch.zeros(3, 2, 3),))
def test_torch_ignore_conversion_to_none(self):
class A(torch.nn.Module):
@torch.jit.ignore
def ignored(self, a: int) -> None:
l: int = len([2 for i in range(a) if i > 2])
return
def forward(self) -> int:
a: int = 4
b: int = 5
self.ignored(a)
return a + b
class B(torch.nn.Module):
@torch.jit.ignore
def ignored(self, a: int):
l: int = len([2 for i in range(a) if i > 2])
return
def forward(self) -> int:
a: int = 4
b: int = 5
self.ignored(a)
return a + b
modelA = torch.jit.script(A())
self.assertEqual(modelA(), 9)
modelB = torch.jit.script(B())
self.assertEqual(modelB(), 9)
def test_addmm_grad(self):
""" This test checks several things:
1. An expand node was inserted before the addmm operating on the
bias term.
2. The fused form of addmm appears in the ultimate graph that's
executed.
3. A sum op was emitted for accumulating gradients along the 0th
(expanded) dimension of the bias term.
4. The correct symbolic representation for the backward pass of the
mm operator was emitted (x.t() -> mm)
TODO: we should actually check these conditions once we have a way
to dump the GraphExecutor state. Namely the processed forward graph
and the backward graph.
"""
@torch.jit.script
def addmm_grad_test(b, x, w):
return torch.addmm(b, x, w)
# Initialize param and input values
w_init = torch.rand(2, 5)
b_init = torch.rand(5)
x = torch.rand(3, 2)
# Clone trainable params
b = b_init.clone()
b.requires_grad_()
w = w_init.clone()
w.requires_grad_()
# Test symbolic differentiation
y = addmm_grad_test(b, x, w)
y.sum().backward()
# clone params for autograd reference
b_ref = b_init.clone()
b_ref.requires_grad_()
w_ref = w_init.clone()
w_ref.requires_grad_()
y_ref = torch.addmm(b_ref, x, w_ref)
y_ref.sum().backward()
self.assertEqual(w.grad, w_ref.grad)
self.assertEqual(b.grad, b_ref.grad)
@unittest.skipIf(not RUN_CUDA, "running tests on cuda to verify cudnn fix")
def test_batch_norm_inference_backward_cuda(self):
with enable_profiling_mode_for_profiling_tests():
class MyBatchNorm(torch.nn.Module):
def __init__(self, num_features, affine, track_running_stats):
super().__init__()
self.bn = torch.nn.BatchNorm2d(
num_features, 1e-5, affine=affine, track_running_stats=track_running_stats).float()
def forward(self, x: torch.Tensor):
o = self.bn(x)
o = torch.nn.functional.relu(o)
return o
batch = 4
c = 2
hw = 3
# Initialize param and input values
x_init = torch.randn(batch, c, hw, hw, dtype=torch.float).cuda()
grad = torch.randn(batch, c, hw, hw, dtype=torch.float).cuda()
training = False
affine = True
track_running_stats = True
module = torch.jit.script(MyBatchNorm(c, affine, track_running_stats)).cuda()
ref_module = MyBatchNorm(c, affine, track_running_stats).cuda()
module.eval()
ref_module.eval()
jit_module = torch.jit.script(module)
ref_module.load_state_dict(module.state_dict())
x = x_init.detach().clone()
x.requires_grad_()
x_ref = x_init.detach().clone()
x_ref.requires_grad_()
# Test symbolic differentiation
# Run Forward and Backward thrice to trigger autodiff graph
for _ in range(3):
y = jit_module(x)
y.backward(grad)
x.grad.zero_()
module.bn.running_mean.zero_()
module.bn.running_var.fill_(1.0)
ref_module.bn.running_mean.zero_()
ref_module.bn.running_var.fill_(1.0)
# run jitted module
y = jit_module(x)
y.backward(grad)
# reference computation
y_ref = ref_module(x_ref)
y_ref.backward(grad)
self.assertEqual(y_ref, y)
self.assertEqual(x.grad, x_ref.grad)
self.assertEqual(module.bn.running_mean, ref_module.bn.running_mean)
self.assertEqual(module.bn.running_var, ref_module.bn.running_var)
def test_zeros(self):
class M(torch.jit.ScriptModule):
__constants__ = ['d']
def __init__(self) -> None:
super().__init__()
self.d = torch.device('cpu')
@torch.jit.script_method
def create(self):
return torch.zeros([1, 1, 2], dtype=torch.float, device=self.d, layout=torch.strided)
r = M().create()
self.assertEqual(r.dtype, torch.float)
self.assertEqual(torch.zeros([1, 1, 2], dtype=torch.float), r)
def fn():
return torch.zeros((1, 2, 3))
self.checkScript(fn, ())
def test_vararg_zeros(self):
def foo():
return torch.zeros(3, 4, 5, dtype=torch.int)
self.checkScript(foo, ())
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "the original version of test_rand")
def test_rand(self):
def test_rand():
a = torch.rand([3, 4])
return a + 1.0 - a
self.checkScript(test_rand, ())
fn = torch.jit.script(test_rand)
out = fn()
self.assertEqual(out.dtype, torch.get_default_dtype())
g = fn.graph_for()
# Testing shape analysis correctly setting type
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
FileCheck().check("Double(*, *, requires_grad=0, device=cpu)") \
.check_not("Float(*, *, requires_grad=0, device=cpu)").run(g)
@torch.jit.script
def randint():
return torch.randint(0, 5, [1, 2])
out = randint()
self.assertEqual(out.dtype, torch.int64)
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
FileCheck().check("Long(*, *, requires_grad=0, device=cpu)") \
.check_not("Float(*, *, requires_grad=0, device=cpu)") \
.check_not("Double(*, *, requires_grad=0, device=cpu)") \
.run(randint.graph_for())
@unittest.skipIf(not RUN_CUDA, "no CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "skip if profiling isn't enabled")
def test_autodiff_complex(self):
def foo(x: torch.Tensor, y: torch.Tensor, W: torch.Tensor):
return torch.exp(torch.mm(torch.complex(x, y), W.cfloat()))
@torch.jit.script
def jitted_foo(x: torch.Tensor, y: torch.Tensor, W: torch.Tensor):
return torch.exp(torch.mm(torch.complex(x, y), W.cfloat()))
x = torch.randn(128, 16, dtype=torch.float32, device='cuda:0')
y = torch.randn(128, 16, dtype=torch.float32, device='cuda:0')
W = torch.randn(16, 1, dtype=torch.float32, device='cuda:0', requires_grad=True)
W.data /= 4
with enable_profiling_mode_for_profiling_tests():
for _ in range(4):
self.assertTrue((foo(x, y, W).grad_fn is None) == (jitted_foo(x, y, W).grad_fn is None))
def test_linear_grad(self):
with enable_profiling_mode_for_profiling_tests():
def t(x: torch.Tensor, w: torch.Tensor, b: Optional[torch.Tensor]):
return torch.nn.functional.linear(x, w, b)
x_init = torch.randn(4, 2)
w_init = torch.randn(3, 2)
b_init = torch.randn(3)
grad = torch.randn(4, 3)
with disable_autodiff_subgraph_inlining():
# script module
jit_t = torch.jit.script(t)
x = x_init.detach().requires_grad_()
w = w_init.detach().requires_grad_()
b = b_init.detach().requires_grad_()
x_ref = x_init.detach().requires_grad_()
w_ref = w_init.detach().requires_grad_()
b_ref = b_init.detach().requires_grad_()
# profiling/optimization runs
jit_o = jit_t(x, w, b)
jit_o.backward(grad)
jit_o = jit_t(x, w, b)
jit_o.backward(grad)
x.grad.zero_()
w.grad.zero_()
b.grad.zero_()
jit_o = jit_t(x, w, b)
jit_o.backward(grad)
o = t(x_ref, w_ref, b_ref)
o.backward(grad)
self.assertEqual(jit_o, o)
self.assertEqual(x.grad, x_ref.grad)
self.assertEqual(w.grad, w_ref.grad)
self.assertEqual(b.grad, b_ref.grad)
x.grad.zero_()
w.grad.zero_()
x_ref.grad.zero_()
w_ref.grad.zero_()
jit_o = jit_t(x, w, None)
jit_o.backward(grad)
o = t(x_ref, w_ref, None)
o.backward(grad)
self.assertEqual(jit_o, o)
self.assertEqual(x.grad, x_ref.grad)
self.assertEqual(w.grad, w_ref.grad)
@skipIfTorchDynamo("TorchDynamo doesn't support profile")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "the profiling version of test_rand")
def test_rand_profiling(self):
def test_rand():
a = torch.rand([3, 4])
return a + 1.0 - a
# Testing shape analysis correctly setting type
with enable_profiling_mode_for_profiling_tests():
with num_profiled_runs(1):
fn = torch.jit.script(test_rand)
out = fn()
graph_str = torch.jit.last_executed_optimized_graph()
self.assertEqual(out.dtype, torch.float)
FileCheck().check("Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)") \
.check_not("Double(3, 4, strides=[4, 1], requires_grad=0, device=cpu)").run(graph_str)
# fn = self.checkScript(test_rand, ())
# out = fn()
# self.assertEqual(out.dtype, torch.float)
@torch.jit.script
def randint():
return torch.randint(0, 5, [1, 2])
with enable_profiling_mode_for_profiling_tests():
with num_profiled_runs(1):
out = randint()
graph_str = torch.jit.last_executed_optimized_graph()
self.assertEqual(out.dtype, torch.int64)
FileCheck().check("profiled_type=Long(1, 2, strides=[2, 1], requires_grad=0, device=cpu)").run(graph_str)
def test_erase_number_types(self):
def func(a):
b = 7 + 1 + 3
c = a + b
c += b
return c
graph = torch.jit.script(func).graph
FileCheck().check("int = prim::Constant").check("aten::add_").run(str(graph))
self.run_pass("erase_number_types", graph)
FileCheck().check_not("int = prim::Constant").run(str(graph))
def test_refine_tuple_types(self):
# TupleConstruct output type is not correct here.
graph_str = """
graph(%a : Float(123), %b : Float(4, 5, 6)):
%c : (Tensor, Tensor) = prim::TupleConstruct(%a, %b)
return (%c)
"""
graph = parse_ir(graph_str)
torch._C._jit_pass_refine_tuple_types(graph)
# After the pass, the output type should've been updated.
self.assertTrue('(Float(123), Float(4, 5, 6))' in str(graph.findNode('prim::TupleConstruct').output()))
# TODO(henrytu): Add test for RefineTypes for NamedTuple when it's supported by IR parser.
def test_remove_dropout(self):
weight_0_shape = (20, 5)
weight_1_shape = (20, 20)
input_shape = (10, 5)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight_0 = torch.nn.Parameter(torch.rand(weight_0_shape))
self.weight_1 = torch.nn.Parameter(torch.rand(weight_1_shape))
def forward(self, x):
o = F.linear(x, self.weight_0)
o = F.dropout(o, training=self.training)
o = F.linear(o, self.weight_1)
return o
data = torch.rand(input_shape)
m = M()
m = torch.jit.script(m)
with self.assertRaisesRegex(RuntimeError, r'Dropout removal module in training mode is not yet supported'):
torch._C._jit_pass_remove_dropout(m._c)
m.eval()
ref_res = m(data)
# Need to inline otherwise we see instances of Function.
# We would have to use torch.linear/dropout to get around it otherwise.
from torch.jit._recursive import wrap_cpp_module
m = wrap_cpp_module(torch._C._freeze_module(m._c))
torch._C._jit_pass_remove_dropout(m._c)
res = m(data)
FileCheck().check_not("aten::dropout").run(str(m.graph))
torch.testing.assert_close(ref_res, res, rtol=1e-2, atol=1e-3)
def test_unfold_zero_dim(self):
def fn(x):
return x.unfold(0, 1, 1)
graph = torch.jit.script(fn).graph
torch._C._jit_pass_complete_shape_analysis(graph, (torch.tensor(0.39),), False)
out_dims = fn(torch.tensor(0.3923)).ndim
self.assertEqual(graph.findNode("aten::unfold").output().type().dim(), out_dims)
def test_mm_batching(self):
with enable_profiling_mode_for_profiling_tests():
lstm_cell = torch.jit.script(LSTMCellS)
def lstm(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
for i in range(x.size(0)):
hx, cx = lstm_cell(x[i], hx, cx, w_ih, w_hh, b_ih, b_hh)
return hx
slstm = torch.jit.script(lstm)
inputs = get_lstm_inputs('cpu', training=True, seq_length=10)
slstm(*inputs, profile_and_replay=True).sum().backward(retain_graph=True)
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
slstm(*inputs, profile_and_replay=True).sum().backward()
fw_graph = slstm.graph_for(*inputs)
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
bw_graph = backward_graph(slstm, diff_graph_idx=0)
self.assertTrue('prim::MMBatchSide' in str(fw_graph))
self.assertTrue('prim::MMTreeReduce' in str(bw_graph))
sout = slstm(*inputs)
out = lstm(*inputs)
self.assertEqual(sout, out)
self.assertEqual(torch.autograd.grad(sout.sum(), inputs),
torch.autograd.grad(out.sum(), inputs))
def test_loop_unrolling(self):
def fn(x):
y = 0
for i in range(int(x)):
y -= i
return y
graph = torch.jit.script(fn).graph
self.run_pass('loop_unrolling', graph)
unroll_factor = 8
FileCheck().check("prim::Loop").check_count("aten::sub", unroll_factor) \
.check("prim::Loop").check("aten::sub").run(str(graph))
self.checkScript(fn, (torch.tensor(10),))
def test_loop_unrolling_const(self):
def fn():
y = 0
for _ in range(10):
y -= 1
return y
def fn2():
y = 0
for i in range(10):
y -= i
return y
def check(fn, name):
graph = torch.jit.script(fn).graph
self.run_pass('loop_unrolling', graph)
# entirely unrolled
FileCheck().check_not("prim::Loop'").run(str(graph))
self.checkScript(fn, ())
check(fn, 'add_const')
check(fn2, 'add_iter')
def test_loop_unrolling_nested(self):
def fn(x):
y = 0
for _ in range(10):
for j in range(int(x)):
y -= j
return y
graph = torch.jit.script(fn).graph
self.run_pass('loop_unrolling', graph)
# inner loop with 8 subs followed by loop epilogue
unroll_factor = 8
FileCheck().check("prim::Loop").check("prim::Loop").check_count('aten::sub', unroll_factor) \
.check("prim::Loop").check("aten::sub").run(str(graph))
self.checkScript(fn, (torch.tensor(10),))
def test_loop_unroll_unused_counter(self):
def fn(x):
y = 0
for _ in range(int(x)):
y -= 1
return y
graph = torch.jit.script(fn).graph
self.run_pass('loop_unrolling', graph)
FileCheck().check("prim::Loop").check_not("aten::add").check("return") \
.run(str(graph))
def test_loop_unroll_negative(self):
def fn(x):
y = 0
for _ in range(int(x)):
y += 1
return y
self.checkScript(fn, (torch.tensor(-20),))
self.checkScript(fn, (torch.tensor(-2),))
self.checkScript(fn, (torch.tensor(-1),))
self.checkScript(fn, (torch.tensor(0),))
self.checkScript(fn, (torch.tensor(1),))
self.checkScript(fn, (torch.tensor(2),))
def test_where(self):
def fn(x, y):
return torch.where(x > 0.0, x, y)
self.checkScript(fn, (torch.randn(3, 2, dtype=torch.float), torch.ones(3, 2, dtype=torch.float)))
def test_where_method(self):
def fn(x, y):
return x.where(x > 0.0, y)
self.checkScript(fn, (torch.randn(3, 2, dtype=torch.float), torch.ones(3, 2, dtype=torch.float)))
def test_union_to_number(self):
@torch.jit.script
def fn(x: Union[int, complex, float], y: Union[int, complex, float]):
return x + y
FileCheck().check(": Scalar):").run(fn.graph)
def test_reassign_module_lhs(self):
with self.assertRaisesRegex(RuntimeError, 'Cannot re-assign \'self\''):
class ReassignSelfLHS(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for _ in range(20):
self = x
return self
ReassignSelfLHS()
def test_reassign_module_rhs(self):
with self.assertRaisesRegex(RuntimeError, 'Cannot re-assign \'x\' to a value of type module'):
class ReassignSelfRHS(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for _ in range(20):
x = self
return self
ReassignSelfRHS()
def test_unknown_builtin(self):
with self.assertRaisesRegex(RuntimeError, 'object has no attribute or method'):
@torch.jit.script
def unknown_builtin(x):
return x.splork(3)
def test_return_tuple(self):
def return_tuple(x):
a = (x, x)
return a, x
self.checkScript(return_tuple, (torch.rand(4),))
def test_add_tuple_optional(self):
def foo(input: Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]) -> Optional[torch.Tensor]:
changed_input = input[0] + 1
value: Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]] = (changed_input,) + input[1:]
return value[2]
inp: Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]] = (torch.rand(4), None, None)
self.checkScript(foo, (inp,))
def test_add_tuple_non_optional(self):
def foo(input: Tuple[torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
changed_input = input[0] + 1
value: Tuple[torch.Tensor, torch.Tensor, torch.Tensor] = (changed_input,) + input[1:]
return torch.sum(value[2]) + 4
inp: Tuple[torch.Tensor, torch.Tensor, torch.Tensor] = (torch.rand(4), torch.rand(4), torch.rand(4))
self.checkScript(foo, (inp,))
def test_add_tuple_different_types(self):
def foo(a: Tuple[int, float], b: Tuple[int]) -> int:
c: Tuple[int, float, int] = a + b
d: Tuple[int, float, int, int] = c + b
return d[3] + 1
a = (1, 2.0)
b = (3,)
self.checkScript(foo, (a, b))
def test_add_tuple_same_types(self):
def foo(a: Tuple[int, int], b: Tuple[int, int, int]) -> int:
c: Tuple[int, int, int, int, int] = a + b
d: Tuple[int, int, int, int, int, int, int, int] = c + b
return d[6] - 2
a = (1, 2)
b = (3, 4, 5)
self.checkScript(foo, (a, b))
def test_method_no_self(self):
with self.assertRaisesRegex(RuntimeError, 'methods must have a self argument'):
class MethodNoSelf(torch.jit.ScriptModule):
@torch.jit.script_method # noqa: B902
def forward(): # noqa: B902
return torch.zeros(3, 4)
MethodNoSelf()
def test_return_stmt_not_at_end(self):
def return_stmt(x):
if bool(x > 3):
return x + 3
else:
return x
self.checkScript(return_stmt, (torch.rand(1),))
def test_for_in_range(self):
def fn():
c = 0
for i in range(100):
c += i
return c
self.checkScript(fn, ())
def test_for_in_range_dynamic(self):
def fn():
c = 0
for i in range(100):
acc = 0
for j in range(i):
acc += j
c += acc
return c
self.checkScript(fn, (), optimize=False)
def test_for_in_range_ast(self):
def test_script_for_in_range_ast():
c = 0
for i in range(100):
acc = 0
for j in range(i):
acc += j
c += acc
return c
self.checkScript(test_script_for_in_range_ast, ())
def test_for_in_range_if_ast(self):
@torch.jit.script
def test_script_for_in_range_if_ast(x):
output = x
for i in range(20):
if i == 0:
output = x.unsqueeze(0)
else:
output = torch.cat((output, x.unsqueeze(0)), dim=0)
return output
inputs = self._make_scalar_vars([0], torch.int64)
self.assertEqual(test_script_for_in_range_if_ast(*inputs).shape[0], 20)
def test_for_in_range_start_end(self):
def fn():
x = 0
for i in range(7, 100):
x += i
return x
self.checkScript(fn, ())
def test_for_in_range_start_end_step(self):
def fn(start, end, step):
# type: (int, int, int) -> int
x = 0
for i in range(start, end, step):
x += i
return x
self.checkScript(fn, (7, 100, 7))
self.checkScript(fn, (7, 100, -7))
self.checkScript(fn, (2, -11, -3))
self.checkScript(fn, (2, -11, 3))
self.checkScript(fn, (2, 10, 3))
self.checkScript(fn, (-2, -10, -10))
def test_for_in_range_zero_step(self):
@torch.jit.script
def fn():
x = 0
for i in range(2, -11, 0):
x += i
return x
with self.assertRaisesRegex(RuntimeError, "must not be zero"):
fn()
def test_range_args(self):
with self.assertRaisesRegex(RuntimeError, r'range expected at least 1 arguments, got 0'):
@torch.jit.script
def range_no_arg(x):
for _ in range():
x += 1
return x
with self.assertRaisesRegex(RuntimeError, r'found float'):
@torch.jit.script
def range_non_float():
for i in range(.5):
print(i)
def test_parse_empty_tuple_annotation(self):
cu = torch.jit.CompilationUnit('''
def foo(x : Tuple[()]) -> Tuple[()]:
return x
''')
foo_code = cu.find_function('foo').code
FileCheck().check("Tuple[()]").check("Tuple[()]").run(foo_code)
def test_parse_empty_tuple_annotation_element_error(self):
with self.assertRaisesRegex(
RuntimeError, 'Tuple literal in Tuple type annotation must not have any elements'):
cu = torch.jit.CompilationUnit('''
def foo(x : Tuple[(int,)]) -> Tuple[(int,)]:
return x
''')
def test_parse_none_type_annotation(self):
cu = torch.jit.CompilationUnit('''
def foo(x : NoneType) -> NoneType:
return x
''')
foo_code = cu.find_function('foo').code
FileCheck().check(": NoneType").check("-> NoneType").run(foo_code)
def test_empty_tuple_str(self):
empty_tuple_type = torch._C.TupleType([])
g = {'Tuple' : typing.Tuple}
python_type = eval(empty_tuple_type.annotation_str, g)
assert python_type is typing.Tuple[()]
def test_tuple_str(self):
tuple1_type = torch._C.TupleType([torch._C.StringType.get()])
self.assertEqual(tuple1_type.annotation_str, "Tuple[str]")
tuple2_type = torch._C.TupleType([torch._C.StringType.get(), torch._C.StringType.get()])
self.assertEqual(tuple2_type.annotation_str, "Tuple[str, str]")
def test_dict_str(self):
dict_type = torch._C.DictType(torch._C.StringType.get(), torch._C.StringType.get())
self.assertEqual(dict_type.annotation_str, "Dict[str, str]")
def test_none_type_str(self):
none_type = torch._C.NoneType.get()
g = {'NoneType' : type(None)}
python_type = eval(none_type.annotation_str, g)
assert python_type is type(None)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_zip_enumerate_modulelist(self):
class Sub(torch.nn.Module):
def forward(self, thing):
return thing - 2
class Double(torch.nn.Module):
def forward(self, thing):
return thing * 2
# zipping over two
class ZipModLists(torch.nn.Module):
def __init__(self, mods, mods2):
super().__init__()
self.mods = mods
self.mods2 = mods2
def forward(self, x):
iter = 0
for mod1, mod2 in zip(self.mods, self.mods2):
x = mod2(mod1(x))
iter += 1
return x, iter
class ZipWithValues(torch.nn.Module):
__constants__ = ['tup_larger', 'tup_smaller']
def __init__(self, mods, mods2):
super().__init__()
self.mods = mods
self.mods2 = mods2
self.tup_larger = list(range(len(mods2) + 1))
self.tup_smaller = list(range(max(len(mods2) + 1, 1)))
def forward(self, x):
iter = 0
x2 = x
for val, mod1, mod2 in zip(self.tup_larger, self.mods, self.mods2):
x = mod2(mod1(x)) + val
iter += 1
for val, mod1, mod2 in zip(self.tup_smaller, self.mods, self.mods2):
x2 = mod2(mod1(x2)) + val
iter += 1
return x, iter
mods = nn.ModuleList([Double()]), nn.ModuleList([Double(), Sub(), Sub()]), nn.ModuleList([Sub(), Double()])
for i in range(len(mods)):
for j in range(len(mods)):
mod = ZipModLists(mods[i], mods[j])
self.checkModule(mod, (torch.tensor(.5),))
mod2 = ZipWithValues(mods[i], mods[j])
self.checkModule(mod2, (torch.tensor(.5),))
def test_enumerate_modlist_range(self):
class Double(torch.nn.Module):
def forward(self, thing):
return thing * 2
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods = nn.ModuleList([Double(), Double()])
def forward(self, x):
x2 = x
iter = 0
for val, mod in enumerate(self.mods):
x2 = mod(x2) * val
iter += 1
return iter, x, x2
self.checkModule(Mod(), (torch.tensor(.5),))
# variable length, modulelist
class Mod2(Mod):
def forward(self, x):
for val, mod in zip(range(int(x)), self.mods):
x = mod(x) * val
return x
with self.assertRaisesRegex(Exception, "that does not have a statically determinable length"):
torch.jit.script(Mod2())
# modulelist, variable length
class Mod3(Mod):
def forward(self, x):
for val, mod in zip(self.mods, range(int(x))):
x = mod(x) * val
return x
with self.assertRaisesRegex(Exception, "that does not have a statically determinable length"):
torch.jit.script(Mod3())
def test_for_in_enumerate(self):
def fn(x):
# type: (List[int]) -> int
sum = 0
for (i, v) in enumerate(x):
sum += i * v
return sum
self.checkScript(fn, ([1, 2, 3, 4, 5],))
def fn_enumerate_start_arg(x):
# type: (List[int]) -> int
sum = 0
for (i, v) in enumerate(x, 1):
sum += i * v
return sum
self.checkScript(fn_enumerate_start_arg, ([1, 2, 3, 4, 5],))
def fn_enumerate_start_kwarg(x):
# type: (List[int]) -> int
sum = 0
for (i, v) in enumerate(x, start=1):
sum += i * v
return sum
self.checkScript(fn_enumerate_start_kwarg, ([1, 2, 3, 4, 5],))
def fn_nested_enumerate(x):
# type: (List[int]) -> int
sum = 0
for (i, (j, v)) in enumerate(enumerate(x)):
sum += i * j * v
return sum
self.checkScript(fn_nested_enumerate, ([1, 2, 3, 4, 5],))
with self.assertRaisesRegex(RuntimeError, r'enumerate expected at least 1 arguments, got 0'):
@torch.jit.script
def enumerate_no_arg(x):
# type: (List[int]) -> int
sum = 0
for _ in enumerate():
sum += 1
return sum
with self.assertRaisesRegex(RuntimeError, r'enumerate expected at most 2 arguments, got 3'):
@torch.jit.script
def enumerate_too_many_args(x):
# type: (List[int]) -> int
sum = 0
for _ in enumerate(x, x, x):
sum += 1
return sum
def test_list_comprehension_modulelist(self):
class Inner(torch.nn.Module):
def forward(self, x):
return x + 10
class M(torch.nn.Module):
def __init__(self, mod_list):
super().__init__()
self.module_list = mod_list
def forward(self, x):
out = torch.jit.annotate(List[Tensor], [mod(x) for mod in self.module_list])
return out
mod = M(nn.ModuleList([Inner(), Inner()]))
self.checkModule(mod, (torch.tensor(3),))
mod = M(nn.ModuleList([]))
torch.jit.script(mod)
class M2(M):
def __init__(self, mod_list):
super().__init__(mod_list)
def forward(self, x):
out = [mod(x) for mod in self.module_list]
return out
mod = M2(nn.ModuleList([Inner(), Inner()]))
self.checkModule(mod, (torch.tensor(3),))
mod = M2(nn.ModuleList([]))
# defaults to List of Tensor for empty modulelist
self.assertEqual(torch.jit.script(mod)(torch.tensor(.5)), [])
def bad_type_annotation():
out = torch.jit.annotate(int, [x for x in [1, 2, 3]]) # noqa: C416
return out
with self.assertRaisesRegex(Exception, "Expected an annotation"
" of type List"):
torch.jit.script(bad_type_annotation)
def test_list_comprehension_variable_write(self):
# i in comprehension doesn't write to function scope
def foo():
i = 1
x = [i if i != 5 else 3 for i in range(7)] # noqa: C416
return i, x
self.assertEqual(foo(), torch.jit.script(foo)())
def test_for_in_zip(self):
def fn(x, y):
# type: (List[int], List[int]) -> int
sum = 0
for (i, j) in zip(x, y):
sum += i * j
return sum
self.checkScript(fn, ([1, 2, 3, 4, 5], [2, 3, 4, 5, 6]))
def fn_multi_inputs(x, y, z):
# type: (List[int], List[int], List[int]) -> int
sum = 0
for (i, j, k) in zip(x, y, z):
sum += i * j * k
return sum
self.checkScript(fn_multi_inputs, ([1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]))
def fn_nested_zip(x, y, z):
# type: (List[int], List[int], List[int]) -> int
sum = 0
for (i, (j, k)) in zip(x, zip(y, z)):
sum += i * j * k
return sum
self.checkScript(fn_multi_inputs, ([1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]))
with self.assertRaisesRegex(RuntimeError, r'zip expected at least 1 arguments, got 0'):
@torch.jit.script
def zip_no_arg(x):
# type: (List[int]) -> int
sum = 0
for _ in zip():
sum += 1
return sum
with self.assertRaisesRegex(RuntimeError, r'too many values to unpack: need 2 but found 3'):
@torch.jit.script
def fn_nested_zip_wrong_target_assign(x, y, z):
# type: (List[int], List[int], List[int]) -> int
sum = 0
for (i, (j, k)) in zip(x, y, z):
sum += i * j * k
return sum
def test_for_in_zip_enumerate(self):
def fn_zip_enumerate(x, y):
# type: (List[int], List[int]) -> int
sum = 0
for (i, (j, v), k) in zip(x, enumerate(y), range(100)):
sum += i * j * v * k
return sum
self.checkScript(fn_zip_enumerate, ([1, 2, 3, 4], [2, 3, 4, 5]))
def fn_enumerate_zip(x, y):
# type: (List[int], List[int]) -> int
sum = 0
for (i, (j, v)) in enumerate(zip(x, y)):
sum += i * j * v
return sum
self.checkScript(fn_enumerate_zip, ([1, 2, 3, 4], [2, 3, 4, 5]))
def test_for_in_tensors(self):
def test_sizes(x):
sumz = 0
for _ in x:
sumz += 1
return sumz
self.checkScript(test_sizes, (torch.rand(5, 4, 3, 2, 1),))
self.checkScript(test_sizes, (torch.rand(777),))
self.checkScript(test_sizes, (torch.rand(0),))
def test_for_in_tensors_rank0(self):
with self.assertRaisesRegex(RuntimeError, "of a 0-d tensor"):
@torch.jit.script
def test_sizes(x):
sumz = 0
for _ in x:
sumz += 1
return sumz
test_sizes(torch.tensor(1))
def test_for_in_tensors_fail_scalar(self):
with self.assertRaisesRegex(RuntimeError, "'float' object is not iterable"):
@torch.jit.script
def test_sizes(x):
# type: (float) -> int
sumz = 0
for _ in x:
sumz += 1
return sumz
test_sizes(0.0)
def test_for_in_tensors_nested(self):
def test_sizes(x):
sumz = 0
for n in x:
for _ in n:
sumz += 1
return sumz
self.checkScript(test_sizes, (torch.rand(5, 4, 3, 2, 1),))
# to avoid defining sum_list in multiple tests
def get_sum_list_fn(self):
def sum_list(a):
# type: (List[int]) -> int
sum = 0
for i in a:
sum += i
return sum
return sum_list
def test_sum_list_diff_elms(self):
self.checkScript(self.get_sum_list_fn(), ([1, 2, 3, 4, 5],))
def test_sum_list_empty(self):
self.checkScript(self.get_sum_list_fn(), ([],))
def test_sum_list_one(self):
self.checkScript(self.get_sum_list_fn(), ([1],))
def test_sum_list_literal(self):
def sum_list():
# type: () -> int
sum = 0
for i in [1, 2, 3, 4, 5]:
sum += i
return sum
self.checkScript(sum_list, ())
def test_sum_list_wrong_type(self):
with self.assertRaisesRegex(RuntimeError, "'int' object is not iterable"):
@torch.jit.script
def sum_list(a):
# type: (int) -> int
sum = 0
for i in a: # noqa: T484
sum += i
return sum
sum_list(1)
def test_list_iterables(self):
with self.assertRaisesRegex(RuntimeError, 'List of iterables is not supported currently'):
cu = torch.jit.CompilationUnit('''
def list_iterables(x):
for i, j in [2, 3, 4], [5, 6, 7]:
x += i
x += j
return x
''')
def test_for_in_string(self):
def test_strings(x):
# type: (str) -> str
reverse = ""
for c in x:
reverse = c + reverse
return reverse
self.checkScript(test_strings, ("hello",))
self.checkScript(test_strings, ("",))
def test_list_strings(x):
# type: (List[str]) -> str
result = ""
for sub_str in x:
result += sub_str
return result
self.checkScript(test_list_strings, (["hello", "world"],))
self.checkScript(test_list_strings, (["hello", " ", "world", ""],))
def test_for_in_dict(self):
def test_dicts(x):
# type: (Dict[str, int]) -> int
sum = 0
for key in x:
sum += x[key]
return sum
self.checkScript(test_dicts, ({"a": 1, "b": 2, "c": 3},))
def test_dict_keys_values(x):
# type: (Dict[str, int]) -> Tuple[str, int]
key_str = ""
sum = 0
for key in x:
key_str += key
for val in x.values():
sum += val
return key_str, sum
self.checkScript(test_dicts, ({"a": 1, "b": 2, "c": 3},))
def test_for_tuple_unpack(self):
def for_tuple_unpack(x, y):
for i, j in [[3, 4], [5, 6], [7, 8]]:
x += i
y += j
return x, y
self.checkScript(for_tuple_unpack, (torch.tensor(3), torch.tensor(5)))
def nested_tuple_unpack(x, y):
# type: (List[int], List[int]) -> int
sum = 0
for i, (j, k), v in zip(x, enumerate(x), y):
sum += i + j + k + v
return sum
self.checkScript(nested_tuple_unpack, ([1, 3, 5], [2, 4, 6]))
def test_for_tuple_assign(self):
def test_simple_assign(x):
# type: (Tuple[int, float]) -> float
sum = 0.0
for a in x:
sum += float(a)
return sum
self.checkScript(test_simple_assign, ((1, 2.5),))
def test_tuple_assign(x):
# type: (Tuple[Tuple[int, int], Tuple[int, int]]) -> int
sum = 0
for a in x:
sum += a[0]
sum += a[1]
return sum
self.checkScript(test_tuple_assign, (((1, 2), (4, 7)), ))
def test_single_starred_lhs(self):
with self.assertRaisesRegex(RuntimeError, 'A Starred expression may only appear on the lhs within the presence'
' of another non-starred expression'):
cu = torch.jit.CompilationUnit('''
def single_starred_lhs(x):
a = (x, x, x)
*b, = a
return b
''')
def test_singleton_tuple_unpack(self):
def foo(a):
b, = (a,)
return b + 1
self.checkScript(foo, (torch.rand(3),))
def test_tuple_assignments(self):
def var_tuple_assign(x, y):
# type: (Tuple[Tensor, Tensor], Tensor) -> Tensor
(a, b), c = x, y
return a + b + c
tuple_inputs = (torch.randn(1, 4), torch.randn(3, 4))
self.checkScript(var_tuple_assign, (tuple_inputs, torch.randn(3, 4)))
def nested_tuple_assign(x, y, z):
# type: (int, Tuple[int, Tuple[int, int]], Tuple[int, int]) -> int
a, (b, (c, d)), (e, f) = x, y, z
return a + b + c + d + e + f
self.checkScript(nested_tuple_assign, ((1, (2, (3, 4)), (5, 6))))
def subscript_tuple_assign(a, x, i):
# type: (List[int], Tensor, int) -> Tuple[int, Tensor, int]
a[i], (x[i], b) = 1, (2, 3)
return a[i] + 1, x + 5, b
self.checkScript(subscript_tuple_assign, ([12, 7, 9, 11], torch.tensor((3, 13, 17)), 0))
def star_tuple_assign():
# type: () -> Tuple[int, int, Tuple[int, int], Tuple[int, int]]
a, (b, *c), *d = 1, (2, 3, 4), 5, 6
return a, b, c, d
self.checkScript(star_tuple_assign, ())
def subscript_tuple_augmented_assign(a):
# type: (Tuple[int, int]) -> Tuple[int, int]
a[0] += 1
return a
with self.assertRaisesRegex(RuntimeError, 'does not support augmented assign'):
scripted_aug_assign = torch.jit.script(subscript_tuple_augmented_assign)
class AttrTupleAssignmentTestClass:
def __init__(self, a: int, b: int):
self.a = a
self.b = b
def set_ab(self, a: int, b: int):
self.a, self.b = (a, b)
def get(self) -> Tuple[int, int]:
return (self.a, self.b)
make_global(AttrTupleAssignmentTestClass)
@torch.jit.script
def attr_tuple_assignment(o: AttrTupleAssignmentTestClass, a: int, b: int):
o.set_ab(a, b)
return o
o = AttrTupleAssignmentTestClass(1, 2)
self.assertEqual(attr_tuple_assignment(o, 3, 4).get(), (3, 4))
def test_multiple_assign(self):
def test():
a = b, c = d, f = (1, 1)
# side effect
ten = torch.tensor(1)
ten1 = ten2 = ten.add_(1)
# ordering
x = 1
y = 3
x, y = y, x + y
return a, b, c, d, f, ten, ten1, ten2, x, y
self.checkScript(test, ())
def test_multi_reduction(self):
with self.assertRaisesRegex(
RuntimeError,
'augmented assignment can only have one LHS expression'):
cu = torch.jit.CompilationUnit('''
def multi_reduction(x):
a, b += x
return a, b
''')
def test_invalid_call_arguments(self):
with self.assertRaisesRegex(RuntimeError, 'but instead found type '):
@torch.jit.script
def invalid_call_arguments(x):
return torch.unsqueeze(3, 4, 5, 6, 7, 8)
def test_invalid_lhs_assignment(self):
with self.assertRaisesRegex(RuntimeError, 'unexpected expression'):
cu = torch.jit.CompilationUnit('''
def invalid_lhs_assignment(x):
x + 1 = x
return x
''')
def test_multi_starred_expr_lhs(self):
with self.assertRaisesRegex(RuntimeError, 'Only one starred expression is allowed on the lhs'):
cu = torch.jit.CompilationUnit('''
def multi_starred_expr_lhs():
a, *b, *c = [1, 2, 3, 4, 5, 6]
return a
''')
def test_pack_tuple_into_non_var(self):
with self.assertRaisesRegex(RuntimeError, 'Cannot pack a tuple into a non-variable'):
cu = torch.jit.CompilationUnit('''
def pack_tuple_into_non_var(x):
a, *1 = (3, 4, 5)
return x
''')
def test_print_kwargs(self):
with self.assertRaisesRegex(RuntimeError, 'print doesn\'t accept any keyword arguments'):
cu = torch.jit.CompilationUnit('''
def print_kwargs(x):
print(x, flush=True)
return x
''')
def test_builtin_use_as_value(self):
with self.assertRaisesRegex(RuntimeError, 'builtin cannot be used as a value'):
@torch.jit.script
def builtin_use_as_value(x):
return x.unsqueeze
def test_wrong_use_as_tuple(self):
with self.assertRaisesRegex(RuntimeError, 'cannot be used as a tuple'):
def test_fn():
return 3
@torch.jit.script
def wrong_use_as_tuple(self):
a, b = test_fn
return a
def test_wrong_attr_lookup(self):
with self.assertRaisesRegex(RuntimeError, 'attribute lookup is not defined on builtin'):
@torch.jit.script
def wrong_attr_lookup(self, x):
a = x.unsqueeze.myattr
return a
def test_wrong_use_as_callable(self):
with self.assertRaisesRegex(RuntimeError, 'cannot call a value'):
@torch.jit.script
def wrong_use_as_callable(x):
return x(3, 4, 5)
def test_python_val_doesnt_have_attr(self):
with self.assertRaisesRegex(RuntimeError, 'object has no attribute abcd'):
@torch.jit.script
def python_val_doesnt_have_attr():
# this has to be a module otherwise attr lookup would not be
# allowed in the first place
return shutil.abcd
def test_wrong_module_attr_lookup(self):
with self.assertRaisesRegex(RuntimeError, 'python value of type \'type\' cannot be used as a value'):
import io
@torch.jit.script
def wrong_module_attr_lookup():
return io.BytesIO
def test_wrong_method_call_inputs(self):
with self.assertRaisesRegex(RuntimeError, 'Argument y not provided'):
class SomeModule(torch.jit.ScriptModule):
@torch.jit.script_method
def foo(self, x, y):
return x
@torch.jit.script_method
def forward(self, x, y):
return self.foo(x)
SomeModule()
def test_single_starred_expr_for_loop(self):
with self.assertRaisesRegex(RuntimeError, 'A Starred expression may only appear'):
cu = torch.jit.CompilationUnit('''
def test():
x = 0
for *a in [1, 2, 3]:
x = x + 1
return x
''')
def test_call_ge(self):
with self.assertRaisesRegex(RuntimeError, 'Expected at most 1 arguments but found 3'):
@_trace(torch.zeros(1, 2, 3))
def foo(x):
return x
@torch.jit.script
def test_fn():
return foo(torch.full([1], 1), torch.full([1], 2), torch.full([1], 3))
def test_wrong_return_type(self):
with self.assertRaisesRegex(RuntimeError, 'but instead got value of type tuple'):
@torch.jit.ignore
def somefunc():
# type: () -> Tuple[Tuple[Tensor, Tensor]]
return torch.zeros(3, 4), torch.zeros(4, 5) # noqa: T484
@torch.jit.script
def wrong_return_type():
return somefunc()
wrong_return_type()
# Tests for calling between different front-end modes
def test_call_python_fn_from_tracing_fn(self):
def python_fn(x):
return torch.neg(x)
@_trace(torch.rand(3, 4))
def traced_fn(x):
return python_fn(x) + 1
# The neg op in the python function should be properly inlined to the
# graph
FileCheck().check("aten::neg").run(str(traced_fn.graph))
def test_call_python_mod_from_tracing_fn(self):
class PythonMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3), requires_grad=False)
def forward(self, x):
return torch.mm(x, self.param)
pm = PythonMod()
@_trace(torch.rand(3, 4))
def traced_fn(x):
return pm(x) + 1.0
# Note: the parameter self.param from the Python module is inlined
# into the graph
self.assertTrue(len(list(traced_fn.graph.inputs())) == 1)
FileCheck().check("aten::mm").check("aten::add").run(str(traced_fn.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_fn_from_tracing_fn(self):
@_trace(torch.rand(3, 4))
def traced_fn1(x):
return torch.neg(x)
@_trace(torch.rand(3, 4))
def traced_fn(x):
return traced_fn1(x) + 1
FileCheck().check("traced_fn").check("prim::CallFunction").check("aten::add") \
.run(str(traced_fn.graph))
@unittest.skip("error in first class mode")
def test_call_traced_mod_from_tracing_fn(self):
class TracedModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3), requires_grad=False)
def forward(self, x):
return torch.mm(x, self.param)
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
with self.assertRaisesRegex(RuntimeError, "must be registered as submodules"):
@_trace(torch.rand(3, 4))
def traced_fn(x):
return tm(x) + 1.0
@_tmp_donotuse_dont_inline_everything
def test_call_script_fn_from_tracing_fn(self):
@torch.jit.script
def script_fn(x):
return torch.neg(x)
@_trace(torch.rand(3, 4))
def traced_fn(x):
return script_fn(x) + 1
FileCheck().check("prim::CallFunction").check("aten::add").run(str(traced_fn.graph))
@unittest.skip("error in first class mode")
def test_call_script_mod_from_tracing_fn(self):
with self.assertRaisesRegex(RuntimeError, "must be registered as submodules"):
class ScriptMod(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4), requires_grad=False)
@torch.jit.script_method
def forward(self, x):
for _ in range(4):
x += self.param
return x
sm = ScriptMod()
@_trace(torch.rand(3, 4))
def traced_fn(x):
return sm(x) + 1.0
def test_call_python_fn_from_traced_module(self):
def python_fn(x):
return torch.neg(x)
class TracedModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return torch.mm(python_fn(x), self.param)
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
# Note: parameter self.param from the traced module should appear as
# an input to the graph and the neg op from the Python function should
# be properly inlined
self.assertTrue(len(list(tm.graph.inputs())) == 2)
FileCheck().check("aten::neg").check("aten::mm").run(str(tm.graph))
def test_call_python_mod_from_traced_module(self):
class PythonModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(5, 7))
def forward(self, x):
return torch.mm(x, self.param)
class TracedModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
self.mod = PythonModule()
def forward(self, x):
return self.mod(torch.mm(x, self.param)) + 1.0
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
FileCheck().check_not("value=<Tensor>").check("aten::mm")\
.check('prim::CallMethod[name="forward"]').check("aten::add") \
.run(str(tm.graph))
FileCheck().check("aten::mm").run(str(tm.mod.graph))
def test_op_dtype(self):
def check_equal_and_dtype(a, b):
self.assertEqual(a, b)
self.assertEqual(a.dtype, b.dtype)
def fn():
a = torch.arange(10)
b = torch.arange(10, dtype=torch.float)
c = torch.arange(1, 10, 2)
d = torch.arange(1, 10, 2, dtype=torch.float)
e = torch.arange(1, 10., 2)
f = torch.arange(1, 10., 2, dtype=torch.float)
return a, b, c, d, e, f
scripted_fn = torch.jit.script(fn)
eager_out = fn()
script_out = scripted_fn()
for a, b in zip(eager_out, script_out):
check_equal_and_dtype(a, b)
def test_floor_div(self):
@torch.jit.script
def foo(a, b):
# type: (int, int) -> int
return a // b
for i in range(-8, 8):
for j in range(-8, 8):
if j != 0:
self.assertEqual(foo(i, j), i // j)
def test_floordiv(self):
funcs_template = dedent('''
def fn():
ten = {a_construct}
ten_or_scalar = {b_construct}
return ten // ten_or_scalar, torch.floor_divide(ten, ten_or_scalar)
''')
lhs = ["torch.tensor([5.5, 3.2])", "torch.tensor([2, 2])", "torch.tensor([3, 2])"]
rhs = ["1.5", "2", "4", "1.1"] + lhs
for tensor in lhs:
for tensor_or_scalar in rhs:
funcs_str = funcs_template.format(a_construct=tensor, b_construct=tensor_or_scalar)
scope = {}
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
f_script = cu.fn
f = scope['fn']
self.assertEqual(f_script(), f())
def test_call_python_fn_from_script_fn(self):
@torch.jit.ignore
def python_fn(x):
return torch.neg(x)
@torch.jit.script
def script_fn(x):
return python_fn(x) + 1
# Note: the call to python_fn appears as `^python_fn()` and is called
# as a PythonOp in the interpreter
a = torch.tensor(1)
self.assertEqual(script_fn(a), torch.tensor(0))
FileCheck().check("python_fn").run(str(script_fn.graph))
def test_call_python_mod_from_script_fn(self):
class PythonModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(5, 7))
def forward(self, x):
return torch.mm(x, self.param)
pm = PythonModule()
@torch.jit.script
def script_fn(x):
return pm(x) + 1
# Note: call to pm(x) appears as ^<python_value>() in the trace.
# Parameters are NOT inlined.
FileCheck().check("python_value").check("aten::add").run(str(script_fn.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_fn_from_script_fn(self):
@torch.jit.script
def script_fn1(x):
return torch.neg(x)
@torch.jit.script
def script_fn(x):
return script_fn1(x) + 1
FileCheck().check("prim::CallFunction").run(str(script_fn.graph))
def test_call_script_mod_from_script_fn(self):
with self.assertRaisesRegex(RuntimeError, "Cannot call a ScriptModule that is not a submodule of the caller"):
class ScriptMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, torch.zeros([4, 3]))
sm = ScriptMod()
@torch.jit.script
def script_fn(x):
return sm(x) + 1
def test_call_python_fn_from_script_module(self):
@torch.jit.ignore
def python_fn(x):
return torch.neg(x)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
@torch.jit.script_method
def forward(self, x):
return python_fn(torch.mm(x, self.param))
sm = ScriptMod()
FileCheck().check("aten::mm").check("python_fn") \
.run(str(sm.forward.graph))
def test_call_python_mod_from_script_module(self):
class PythonMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 5))
@torch.jit.ignore
def forward(self, x):
return torch.mm(x, self.param)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
self.pm = PythonMod()
@torch.jit.script_method
def forward(self, x):
return self.pm(torch.mm(x, self.param))
sm = ScriptMod()
# Note: the call into PythonMod appears as ^forward(). Parameters
# are NOT inlined
FileCheck().check("aten::mm").check("forward").run(str(sm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_fn_from_script_module(self):
@torch.jit.script
def script_fn(x):
return torch.neg(x)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
@torch.jit.script_method
def forward(self, x):
return script_fn(torch.mm(x, self.param))
sm = ScriptMod()
graph = (sm.forward.graph)
FileCheck().check("aten::mm").check("prim::CallFunction").run(str(graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_mod_from_script_module(self):
class ScriptMod1(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 5))
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, self.param)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
self.tm = ScriptMod1()
@torch.jit.script_method
def forward(self, x):
return self.tm(torch.mm(x, self.param))
sm = ScriptMod()
# Note: the parameters from both modules should appear in the flattened
# input list to the graph. The mm op from ScriptMod1 should be properly
# inlined
# 3 % values in graph input lists, two mms in body
FileCheck().check_count('%', 3).check(":").check_count("mm", 1).check("prim::CallMethod").run(str(sm.graph))
def test_module_with_params_called_fails(self):
with self.assertRaisesRegex(RuntimeError, "Cannot call a ScriptModule that is not a submodule of the caller"):
class ScriptMod(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 3))
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, self.param)
sm = ScriptMod()
@torch.jit.script
def some_func(x):
return sm(x)
def test_tuple_index_to_list(self):
def test_non_constant_input(a):
# type: (bool) -> int
if a:
b = 1
else:
b = 0
c = (0, 1)
return c[b]
self.checkScript(test_non_constant_input, (True,))
self.checkScript(test_non_constant_input, (False,))
with self.assertRaisesRegex(RuntimeError, "because we cannot resolve the output type"):
@torch.jit.script
def test_non_constant_input(a):
# type: (bool) -> None
if a:
b = 1
else:
b = 0
c = (0, 1.1)
print(c[b])
def test_tuple_indexing(self):
def tuple_index(a):
if bool(a):
b = (1, 2)
else:
b = (0, 2)
return b[-2], b[1]
self.checkScript(tuple_index, (torch.tensor([0]),))
self.checkScript(tuple_index, (torch.tensor([1]),))
self.checkScript(tuple_index, (torch.tensor([1]),), optimize=True)
tuple_comp = torch.jit.script(tuple_index)
FileCheck().check_count("TupleIndex", 2, exactly=True).run(str(tuple_comp.graph))
with self.assertRaisesRegex(RuntimeError, "index must be an integer"):
@torch.jit.script
def test_indexing_float():
c = (1, 2)
return c[0.1]
def test_indexing_out_of_bounds_pos():
c = (1, 2)
return c[2]
self.checkScriptRaisesRegex(test_indexing_out_of_bounds_pos, (), Exception,
"out of range")
def test_indexing_out_of_bounds_neg():
c = (1, 2)
return c[-3]
self.checkScriptRaisesRegex(test_indexing_out_of_bounds_pos, (), Exception,
"out of range")
def negative_index():
tup = (1, 2, 3, 4)
return tup[-1]
self.checkScript(negative_index, [])
def really_negative_index():
tup = (1, 2, 3, 4)
return tup[-100]
self.checkScriptRaisesRegex(really_negative_index, [], Exception, "index out of range")
def negative_slice():
tup = (1, 2, 3, 4)
return tup[-3:4]
self.checkScript(negative_slice, [])
def really_slice_out_of_bounds():
tup = (1, 2, 3, 4)
return tup[-300:4000]
self.checkScript(really_slice_out_of_bounds, [])
def test_namedtuple_attr(self):
def f(x):
return x.max(dim=1).indices + torch.max(x, dim=1).indices
self.checkScript(f, (torch.rand(20, 20, 20),), optimize=True)
with self.assertRaisesRegex(RuntimeError, "object has no attribute or method"):
@torch.jit.script
def g1(x):
return x.max(dim=1).unknown_symbol
with self.assertRaisesRegex(RuntimeError, "object has no attribute or method"):
@torch.jit.script
def g2(x):
print((x, x, x).__doc__)
return x
def test_tuple_len(self):
@torch.jit.script
def foo():
return len((1, "str", None))
self.assertEqual(foo(), 3)
@torch.jit.script
def test_indexing_end_out_of_bounds():
c = (1, 2)
return c[2:10]
self.assertEqual(test_indexing_end_out_of_bounds(), ())
def test_lower_nested_tuples(self):
@torch.jit.script
def test():
return ((1, 2), 3)
self.run_pass('constant_propagation', test.graph)
FileCheck().check("prim::Constant").check_not("TupleConstruct").run(test.graph)
# fails if a tuple can't be lowered
self.run_pass('lower_all_tuples', test.graph)
def test_unwrap_optional_builtin(self):
def test(x):
# type: (Optional[int]) -> int
x = torch.jit._unwrap_optional(x)
x = x + x # noqa: T484
return x
self.checkScript(test, (3,))
with self.assertRaisesRegex(AssertionError, "Unwrapping null optional"):
test(None)
test_script = torch.jit.script(test)
with self.assertRaisesRegex(RuntimeError, "Unwrapping null optional"):
test_script(None)
@torch.jit.script
def test_test():
return torch.jit._unwrap_optional(1)
with self.assertRaisesRegex(RuntimeError, r"could not be inferred from actual type None"):
@torch.jit.script
def test_no_type():
# type: () -> int
return torch.jit._unwrap_optional(None)
def test_indexing_error(self):
with self.assertRaisesRegex(RuntimeError, "'int' object is not subscriptable"):
@torch.jit.script
def test_wrong_type():
a = 8
return a[0]
def test_unsupported_builtin_error(self):
with self.assertRaisesRegex(RuntimeError,
"Python builtin <built-in function hypot> is currently"):
@torch.jit.script
def test_unsupported(a):
return math.hypot(a, 2.0)
def test_annotated_script_fn(self):
@torch.jit.script
def foo(x, y, z):
# type: (Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tuple[Tensor, Tensor]]) -> Tensor
return x
self.assertExpected(str(foo.schema))
def test_annotated_script_method(self):
class SM(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
# type: (Tuple[Tensor, Tensor], Tensor) -> Tuple[Tensor, Tensor, Tensor]
return y, y, y
sm = SM()
self.assertExpectedStripMangled(str(sm.forward.schema))
def test_annotated_script_fn_return_mismatch(self):
with self.assertRaisesRegex(RuntimeError, "but is actually of type"):
@torch.jit.script
def return_tup(x):
# type: (Tensor) -> Tuple[Tuple[Tensor, Tensor], Tensor]
return x, x # noqa: T484
def test_annotated_script_fn_arg_mismatch(self):
with self.assertRaisesRegex(RuntimeError, r"Arguments for call are not valid"):
@torch.jit.script
def tuple_arg(x):
# type: (Tuple[Tensor, Tensor]) -> Tensor
return x + 1 # noqa: T484
def test_script_non_tensor_args_outputs(self):
@torch.jit.script
def fn(x, y):
# type: (Tensor, float) -> float
return float((x + y).sum())
x = torch.ones(2, 2)
z = fn(x, 1)
self.assertIsInstance(z, float)
self.assertEqual(z, 8.)
@unittest.skip('https://github.com/pytorch/pytorch/issues/9595')
def test_inline_and_run_annotated_script_fn(self):
@torch.jit.script
def to_inline(x, y):
# type: (Tuple[Tensor, Tensor], Tensor) -> Tensor
return y
@torch.jit.script
def some_func(x):
return to_inline((x, x), x)
x = torch.rand(3, 4)
self.assertEqual(some_func(x), x)
def _make_filereader_test_file(self):
filename = tempfile.mktemp()
writer = torch._C.PyTorchFileWriter(filename)
buffers = [os.urandom(size) for size in [random.randint(1, 100) for i in range(20)]]
offsets = []
for i, buf in enumerate(buffers):
writer.write_record(str(i), buf, len(buf))
offsets.append(i)
serialized_offsets = pickle.dumps(offsets)
writer.write_record("meta", serialized_offsets, len(serialized_offsets))
writer.write_end_of_file()
return filename, buffers, serialized_offsets
def test_file_format_serialization(self):
filename, buffers, serialized_offsets = self._make_filereader_test_file()
reader = torch._C.PyTorchFileReader(filename)
serialized_offsets_read = reader.get_record("meta")
parsed_serialized_offsets = pickle.loads(serialized_offsets)
for i, offset in enumerate(parsed_serialized_offsets):
data = reader.get_record(str(offset))
assert data == buffers[i]
def test_file_reader_no_memory_leak(self):
num_iters = 10000
filename, _, _ = self._make_filereader_test_file()
# Load from filename
tracemalloc.start()
for _ in range(num_iters):
torch._C.PyTorchFileReader(filename)
_, peak_from_string = tracemalloc.get_traced_memory()
tracemalloc.stop()
# Load from stream
tracemalloc.start()
with open(filename, 'rb') as f:
for _ in range(num_iters):
f.seek(0)
torch._C.PyTorchFileReader(f)
_, peak_from_file = tracemalloc.get_traced_memory()
tracemalloc.stop()
# Check if the peak sizes at most differ by an empirically obtained factor
self.assertLess(peak_from_file, peak_from_string * 500)
# for each type, the input type annotation and corresponding return type annotation
def type_input_return_pairs(self):
return [
('Tensor', 'Tensor'),
('torch.Tensor', 'Tensor'),
('str', 'str'),
('int', 'int'),
('bool', 'bool'),
('BroadcastingList3[float]', 'List[float]'),
('BroadcastingList2[int]', 'List[int]'),
('List[int]', 'List[int]'),
('Optional[int]', 'Optional[int]'),
]
# replacing code input & return type pair
def format_code(self, code, pair):
return code.format(input=pair[0], output=pair[1])
# ***** Type annotation tests ****
# Test combinations of:
# {String frontend, Python AST Frontend}
# {Python 3-style type annotations, MyPy-style type comments}
# {Script method, Script function}
# String frontend , Python 3-style type annotations , Script function
def test_annot_string_py3_fn(self):
code = '''
def foo(x : {input}, y : Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]:
return x, x
'''
test_str = []
for pair in self.type_input_return_pairs():
cu = torch.jit.CompilationUnit(self.format_code(code, pair))
test_str.append(str(cu.foo.schema))
self.assertExpected("\n".join(test_str) + "\n")
# String frontend , Python 3-style type annotations , Script method
def test_annot_string_py3_method(self):
class TestModule(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
code = '''
def foo(self, x : {input}, y : Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]:
return x, x
'''
test_str = []
for pair in self.type_input_return_pairs():
# clear the class registry as we will be defining foo multiple times
jit_utils.clear_class_registry()
tm = TestModule()
tm.define(self.format_code(code, pair))
test_str.append(str(tm.foo.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
# String frontend , MyPy-style type comments , Script function
def test_annot_string_mypy_fn(self):
code = '''
def foo(x, y):
# type: ({input}, Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]
return x, x
'''
test_str = []
for pair in self.type_input_return_pairs():
cu = torch.jit.CompilationUnit(self.format_code(code, pair))
test_str.append(str(cu.foo.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
# String frontend , MyPy-style type comments , Script method
def test_annot_string_mypy_method(self):
class TestModule(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
code = '''
def foo(self, x, y):
# type: ({input}, Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]
return x, x
'''
test_str = []
for pair in self.type_input_return_pairs():
# clear the class registry as we will be defining foo multiple times
jit_utils.clear_class_registry()
tm = TestModule()
tm.define(self.format_code(code, pair))
test_str.append(str(tm.foo.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
# Python AST Frontend , Python 3-style type annotations , Script function
def test_annot_ast_py3_fn(self):
code = dedent('''
from typing import Tuple, List, Optional
from torch import Tensor
from torch.jit.annotations import BroadcastingList2, BroadcastingList3
import torch
@torch.jit.script
def foo(x : {input}, y : Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]:
return x, x
''')
test_str = []
for pair in self.type_input_return_pairs():
fn = jit_utils._get_py3_code(self.format_code(code, pair), 'foo')
test_str.append(str(fn.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
def test_multiline_annot_ast_py3_fn(self):
code = dedent('''
from typing import Tuple, List, Optional
from torch import Tensor
from torch.jit.annotations import BroadcastingList2, BroadcastingList3
import torch
@torch.jit.script
def foo(x, # type: {input}
y # type: Tuple[Tensor, Tensor]
):
# type: (...) -> Tuple[{output}, {output}]
return x, x
''')
test_str = []
for pair in self.type_input_return_pairs():
fn = jit_utils._get_py3_code(self.format_code(code, pair), 'foo')
args = fn.schema.arguments
returns = fn.schema.returns
self.assertEqual(str(args[0].type), pair[1])
self.assertEqual(str(args[1].type), "Tuple[Tensor, Tensor]")
self.assertEqual(str(returns[0].type), f"Tuple[{pair[1]}, {pair[1]}]")
def test_bad_multiline_annotations(self):
with self.assertRaisesRegex(RuntimeError, "Return type line"):
@torch.jit.script
def bad_type_line(a, # type: Tensor
b, # type: Tensor
c # type: Tensor
):
# type: (int, int, int) -> Tensor
# type: bad type line # noqa: F723
return a + b + c
with self.assertRaisesRegex(RuntimeError, "Return type line"):
@torch.jit.script
def bad_return_line(a, # type: Tensor
b,
c # type: Tensor
):
# type: (int, int, int) -> Tensor
return a + b + c
# TODO: this should be supported but is difficult to parse
with self.assertRaisesRegex(RuntimeError, "Number of type annotations"):
@torch.jit.script
def missing_type(a, # type: Tensor
b,
c # type: Tensor
):
# type: (...) -> Tensor
return a + b + c
# Python AST Frontend , Python 3-style type annotations , Script method
def test_annot_ast_py3_method(self):
code = dedent('''
from typing import Tuple, List, Optional
from torch import Tensor
from torch.jit.annotations import BroadcastingList2, \\
BroadcastingList3
import torch
class FooModule(torch.jit.ScriptModule):
@torch.jit.script_method
def foo(self, x : {input}, y : Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]:
return x, x
instance = FooModule()
''')
test_str = []
for pair in self.type_input_return_pairs():
fn = jit_utils._get_py3_code(self.format_code(code, pair), 'instance')
test_str.append(str(fn.foo.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
# Python AST Frontend , MyPy-style type comments , Script function
def test_annot_ast_mypy_fn(self):
code = dedent('''
import torch
@torch.jit.script
def foo(x, y):
# type: ({input}, Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]
return x, x
''')
test_str = []
for pair in self.type_input_return_pairs():
fn = jit_utils._get_py3_code(self.format_code(code, pair), 'foo')
test_str.append(str(fn.schema))
self.assertExpected("\n".join(test_str) + "\n")
# Python AST Frontend , MyPy-style type comments , Script method
def test_annot_ast_mypy_method(self):
code = dedent('''
import torch
class FooModule(torch.jit.ScriptModule):
@torch.jit.script_method
def foo(self, x, y):
# type: ({input}, Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]
return x, x
instance = FooModule()
''')
test_str = []
for pair in self.type_input_return_pairs():
fn = jit_utils._get_py3_code(self.format_code(code, pair), 'instance')
test_str.append(str(fn.foo.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
# Tests that "# type: ignore[*]" is supported in type lines and is
# properly ignored.
def test_mypy_type_ignore(self):
@torch.jit.script
def foo(x): # type: ignore
return x
@torch.jit.script
def bar(x): # type: ignore[no-redef]
return x
def test_method_casts_script(self):
cast_types = [
'byte', 'char', 'double', 'float', 'int', 'long', 'short'
]
for cast_type in cast_types:
cu = torch.jit.CompilationUnit(f'''
def cast_to(x):
return x.{cast_type}()
''')
x = torch.rand(3, 4, 5) * 128
cu_result = cu.cast_to(x)
reference = getattr(x, cast_type)()
self.assertEqual(cu_result, reference)
def test_string_frontend_elif(self):
code = '''
def func(niter):
# type: (int)
rv = 0
for i in range(niter):
if i % 3 == 0 and i % 5 == 0:
rv += 35
elif i % 3 == 0:
rv += 3
elif i % 5 == 0:
rv += 5
else:
rv += i
return rv
'''
self.checkScript(dedent(code), (101,))
def test_module_parameters_and_buffers(self):
weights = torch.randn(10, 10)
bias = torch.randn(10)
weights2 = torch.randn(10, 10)
bias2 = torch.randn(10)
class TestLinear(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = torch.nn.Parameter(torch.empty(out_features, in_features))
self.bias = torch.nn.Parameter(torch.empty(out_features))
self.counter = nn.Buffer(torch.ones(out_features))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input):
return F.linear(input, self.weight, self.bias) + self.counter
# Initialize a ScriptModule that uses the weak module above multiple times
class Strong(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.fc1 = TestLinear(10, 10)
self.fc1.weight = torch.nn.Parameter(weights)
self.fc1.bias = torch.nn.Parameter(bias)
self.fc2 = TestLinear(10, 10)
self.fc2.weight = torch.nn.Parameter(weights2)
self.fc2.bias = torch.nn.Parameter(bias2)
@torch.jit.script_method
def forward(self, x):
return x + self.fc1(x) + self.fc1(x) + self.fc2(x)
strong_mod = Strong()
# Run same calculation as module
inp = torch.ones(10)
lin = torch.nn.Linear(10, 10)
lin.weight = torch.nn.Parameter(weights)
lin.bias = torch.nn.Parameter(bias)
lin2 = torch.nn.Linear(10, 10)
lin2.weight = torch.nn.Parameter(weights2)
lin2.bias = torch.nn.Parameter(bias2)
expected_result = inp + (lin(inp) + torch.ones(10)) * 2 + lin2(inp) + torch.ones(10)
self.assertEqual(strong_mod(inp), expected_result)
self.assertExportImportModule(strong_mod, (inp,))
def test_module_copying(self):
class Submodule(torch.nn.Module):
def forward(self, x):
return x + 100
class Weak(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.weight = torch.nn.Parameter(torch.ones(out_features, in_features))
self.bias = torch.nn.Parameter(torch.ones(out_features))
self.buffer = nn.Buffer(torch.ones(out_features))
self.submodule = Submodule()
def forward(self, x):
return F.linear(x, self.weight, self.bias) \
+ self.buffer + self.submodule(x)
class Strong(torch.jit.ScriptModule):
def __init__(self, weak):
super().__init__()
self.weak = weak
@torch.jit.script_method
def forward(self, x):
return self.weak(x)
inp = torch.ones(5, 5) * 5
weak_mod = Weak(5, 5)
strong_mod = Strong(weak_mod)
self.assertTrue(isinstance(strong_mod.weak, torch.jit.ScriptModule))
self.assertFalse(isinstance(weak_mod, torch.jit.ScriptModule))
self.assertIs(strong_mod.weak.weight, weak_mod.weight)
self.assertIs(strong_mod.weak.buffer, weak_mod.buffer)
# strong_mod.weak.submodule has been recursively scripted
self.assertIsNot(strong_mod.weak.submodule, weak_mod.submodule)
weak_mod.weight.data += torch.ones(5, 5) * 100
self.assertTrue(strong_mod(inp).allclose(weak_mod(inp)))
# Re-assignment is not tracked
weak_mod.weight = torch.nn.Parameter(torch.ones(5, 5) * 100)
self.assertFalse(strong_mod(inp).allclose(weak_mod(inp)))
def test_backend_cudnn_enabled(self):
# Only test that this compiles
@torch.jit.script
def fn(x):
if torch.backends.cudnn.enabled:
x = x + 2
else:
x = x + 3
return x
def test_inplace_add(self):
def foo(a, b):
c = a + b
c.add_(b)
return c
self.checkScript(foo, (torch.rand(3), torch.rand(3)))
def test_add_out(self):
def foo(a, b):
c = a + b
e = 2 * a
torch.add(c, b, out=e)
return e
self.checkScript(foo, (torch.rand(3), torch.rand(3)))
def test_tuple_error_msg(self):
def fn(t: Any):
if isinstance(t, tuple):
a, b = t
return a + b
with self.assertRaisesRegexWithHighlight(RuntimeError, "Provided tuple is not fully defined/refined", "t"):
s = torch.jit.script(fn)
def test_augmented_assign(self):
def foo(a, b):
a += b
a -= b
a /= b
a *= b
return a, b
self.checkScript(foo, (torch.rand(3), torch.rand(3)))
def test_ignored_props(self):
class A(nn.Module):
__jit_ignored_attributes__ = ["ignored", "ignored_return_val"]
@property
def ignored(self):
raise ValueError("shouldn't be called")
@property
def ignored_return_val(self):
return 1
@torch.jit.ignore
def call(self):
return self.ignored_return_val
f = torch.jit.script(A())
# jank way to test if there is no error
self.assertTrue(isinstance(f, torch.jit.ScriptModule))
self.assertTrue(isinstance(f.call(), property))
def test_pass(self):
def foo(x):
# type: (bool) -> int
for _ in range(3):
pass
if x:
pass
else:
pass
return 3
self.checkScript(foo, (True,))
def test_lhs_indexing(self):
def foo(a, b):
a = a.clone()
a[0] = b
return a
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_lhs_advanced_indexing_assignment(self):
def foo(x, y):
a = torch.exp(x)
b = x == 1
a[b] = y[b]
return a
self.checkScript(foo, (torch.ones(4, 3), torch.ones(4, 3)))
def test_lhs_advanced_indexing_augmented_assignment(self):
def foo(x, y):
a = torch.exp(x)
b = x == 1
a[b] += y[b]
return a
self.checkScript(foo, (torch.ones(4, 3), torch.ones(4, 3)))
def test_lhs_indexing_list(self):
def foo(a, b):
ls = [a]
ls[0] = b
return ls
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_inplace_copy_script(self):
def foo(x):
a = torch.rand(3, 4)
a.copy_(x)
return a
self.checkScript(foo, (torch.rand(3, 4),))
def test_lhs_indexing_increment(self):
def foo(a, b):
a[0] += b
return a
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_lhs_indexing_increment_list(self):
def foo(a, b):
a = a.clone()
ls = [a, b]
ls[0] += b
return ls
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_lhs_indexing_increment_list_prim(self):
def foo():
ls = [1, 2, 3]
ls[0] += 5
return ls
self.checkScript(foo, ())
def test_lhs_indexing_multi(self):
def foo(a, b):
a = a.clone()
foo, a[0], bar = (1, b, 3)
return foo, a, bar
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_bool_dispatch(self):
with torch._jit_internal._disable_emit_hooks(): # TODO: Python print broadcasting list
def kwarg_false(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1, return_indices=False)
self.checkScript(kwarg_false, (torch.randn(3, 3, 3),))
def kwarg_true(x):
# type: (Tensor) -> Tuple[Tensor, Tensor]
return F.max_pool1d(x, 1, 1, return_indices=True)
self.checkScript(kwarg_true, (torch.randn(3, 3, 3),))
def full_kwarg_false(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1, ceil_mode=False, return_indices=False)
self.checkScript(full_kwarg_false, (torch.randn(3, 3, 3),))
def full_kwarg_true(x):
# type: (Tensor) -> Tuple[Tensor, Tensor]
return F.max_pool1d(x, 1, 1, ceil_mode=False, return_indices=True)
self.checkScript(full_kwarg_true, (torch.randn(3, 3, 3),))
def use_default(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1)
self.checkScript(use_default, (torch.randn(3, 3, 3),))
def arg_false(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1, 0, 1, False, False)
self.checkScript(arg_false, (torch.randn(3, 3, 3),))
def arg_true(x):
# type: (Tensor) -> Tuple[Tensor, Tensor]
return F.max_pool1d(x, 1, 1, 0, 1, False, True)
self.checkScript(arg_true, (torch.randn(3, 3, 3),))
def test_infer_size(self):
from torch._C import _infer_size
def fn(x, y):
# type: (Tensor, Tensor) -> List[int]
return _infer_size(x.size(), y.size())
self.checkScript(fn, (torch.ones(2, 4, 2), torch.ones(2, 4, 2)))
def test_hash(self):
def tester(fn, inputs):
for x in inputs:
for y in inputs:
if x == y:
self.assertEqual(fn(x), fn(y))
else:
self.assertNotEqual(fn(x), fn(y))
@torch.jit.script
def int_hash(x):
# type: (int) -> int
return hash(x)
@torch.jit.script
def float_hash(x):
# type: (float) -> int
return hash(x)
@torch.jit.script
def str_hash(x):
# type: (str) -> int
return hash(x)
tester(int_hash, (20, 21, 22))
tester(float_hash, (20.0, 21.00001, 22.443))
tester(str_hash, ("", "hello", "a"))
def test_id(self):
with self.assertRaisesRegex(RuntimeError, "Expected a value"):
@torch.jit.script
def test_id_scalars():
return id(2) == id(None)
@torch.jit.script
class FooTest:
def __init__(self, x):
self.foo = x
def getFooTest(self):
return self.foo
@torch.jit.script
def test_id_class_types():
obj1 = FooTest(torch.tensor(3))
obj2 = FooTest(torch.tensor(2))
assert obj1 is not obj2
assert id(obj1) != id(obj2)
assert id(obj1) != id(None)
return True
self.assertTrue(test_id_class_types())
def test_mutable_dce(self):
@torch.jit.script
def foo():
a = torch.rand(2, 3)
a += torch.rand(2, 3)
b = torch.rand(2, 3)
b += torch.rand(2, 3)
# b should be cleaned up but not a
return a
FileCheck().check_count("aten::rand", 2, exactly=True) \
.check_count("aten::add", 1, exactly=True).run(str(foo.graph))
def test_mutable_dce_block(self):
@torch.jit.script
def foo():
a = torch.rand(2, 3)
a += torch.rand(2, 3)
b = torch.rand(2, 3)
if bool(a > torch.zeros(2, 3)):
b += torch.rand(2, 3)
a += torch.rand(2, 3)
# a should be cleaned up but not b
return b
FileCheck().check("prim::If").check_count("aten::rand", 1, exactly=True) \
.run(str(foo.graph))
def test_mutable_dce_graph_input(self):
@torch.jit.script
def foo(a):
a += torch.rand(2, 3)
# shouldn't clean up `a` even though it's not used in the output
FileCheck().check("aten::rand").check("aten::add").run(str(foo.graph))
def test_mutable_dce_list(self):
@torch.jit.script
def foo(a):
l = []
l.append(a)
c = l[0]
b = torch.rand(2, 3)
c += torch.rand(2, 3)
return b
# c does not get cleaned up because there is a wildcard + mutation
FileCheck().check_count("aten::rand", 2, exactly=True).run(str(foo.graph))
def test_mutable_dce_loop(self):
@torch.jit.script
def foo(a):
l = []
l.append(a)
i = 0
b = torch.rand(2, 3)
while i < 1:
dead = torch.rand(2, 3)
c = l[0]
c += torch.rand(2, 3)
i += 1
return b
FileCheck().check("prim::Loop").check_not("aten::rand").check("aten::__getitem__") \
.check_count("aten::rand", 1, exactly=True).run(str(foo.graph))
def test_mutable_dce_indirect_wildcards(self):
def fn():
x = torch.ones(2, 3)
x_1 = x.view(-1)
l = []
l.append(x_1)
x_view = l[0]
x.add_(torch.ones(2, 3))
return x_view
self.checkScript(fn, ())
def test_mutable_dce_indirect_wildcard_write(self):
def fn():
indexes = torch.jit.annotate(List[Tensor], [])
word_ids = torch.zeros(10, dtype=torch.int32)
word_ids[1] = 1
indexes.append(word_ids)
return word_ids
self.checkScript(fn, ())
def test_mutable_dce_wildcards(self):
def fn():
x = torch.ones(2, 3)
l = []
l.append(x)
x_view = l[0]
x.add_(torch.ones(2, 3))
return x_view
self.checkScript(fn, (), profiling=ProfilingMode.SIMPLE)
def test_cpp_function_tensor_str(self):
x = torch.randn(2, 2)
scale = torch.randn(2, 2, requires_grad=True)
shift = torch.randn(2, 2, requires_grad=True)
@torch.jit.script
def fn(x, scale, shift):
return scale * x + shift
with self.capture_stdout() as captured:
print(fn(x, scale, shift))
def test_string_index(self):
def fn(x):
# type: (str)
return x[2], x[-1]
self.checkScript(fn, ("abcde",))
def test_ord(self):
def fn(x):
# type: (str) -> int
return ord(x)
self.checkScript(fn, ("h"))
self.checkScript(fn, ("y"))
def index_str_to_tensor(s):
# type: (str) -> Tensor
return torch.tensor(ord(s)) # noqa: T484
s = '\u00a3'.encode()[:1]
self.checkScript(index_str_to_tensor, (s,))
def test_chr(self):
def fn(x):
# type: (int) -> str
return chr(x)
self.checkScript(fn, (1,))
self.checkScript(fn, (97,))
def test_round(self):
def round_float(x):
# type: (float) -> float
return round(x)
def round_int(x):
# type: (int) -> float
return round(x)
self.checkScript(round_float, (1.5,))
self.checkScript(round_int, (2,))
def test_convert_base(self):
def test_hex(x):
# type: (int) -> str
return hex(x)
def test_oct(x):
# type: (int) -> str
return oct(x)
def test_bin(x):
# type: (int) -> str
return bin(x)
numbers = [-1000, -10, 0, 1, 10, 2343]
for n in numbers:
self.checkScript(test_bin, (n,))
self.checkScript(test_oct, (n,))
self.checkScript(test_hex, (n,))
@unittest.skipIf(IS_SANDCASTLE, "NYI: TemporaryFileName support for Sandcastle")
def test_get_set_state(self):
class Root(torch.jit.ScriptModule):
__constants__ = ['number']
def __init__(self, number):
super().__init__()
self.buffer1 = nn.Buffer(torch.ones(2, 2))
self.buffer2 = nn.Buffer(torch.ones(2, 2))
self.number = number
@torch.jit.script_method
def __getstate__(self):
return (self.buffer1, self.buffer2, 74, self.training)
@torch.jit.script_method
def __setstate__(self, state):
self.buffer1 = state[0] + 10
self.buffer2 = state[1] + 10
self.training = state[3]
class M(torch.jit.ScriptModule):
__constants__ = ['number']
def __init__(self, number, submodule):
super().__init__()
self.buffer1 = nn.Buffer(torch.ones(2, 2))
self.buffer2 = nn.Buffer(torch.ones(2, 2))
self.number = number
self.submodule = submodule
@torch.jit.script_method
def __getstate__(self):
return (self.buffer1, self.buffer2, 74, self.submodule, self.training)
@torch.jit.script_method
def __setstate__(self, state):
self.buffer1 = state[0] + 10
self.buffer2 = state[1] + 10
self.submodule = state[3]
self.training = state[4]
with TemporaryFileName() as fname:
m = M(23, submodule=Root(99))
m.save(fname)
loaded = torch.jit.load(fname)
# Check original module
self.assertEqual(m.buffer1, torch.ones(2, 2))
self.assertEqual(m.buffer2, torch.ones(2, 2))
# Check top level module
self.assertEqual(loaded.buffer1, torch.ones(2, 2) + 10)
self.assertEqual(loaded.buffer2, torch.ones(2, 2) + 10)
# Check submodule
self.assertEqual(loaded.submodule.buffer1, torch.ones(2, 2) + 10)
self.assertEqual(loaded.submodule.buffer2, torch.ones(2, 2) + 10)
# Check simpler module
class NoArgState(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer1 = nn.Buffer(torch.ones(2, 2))
self.buffer2 = nn.Buffer(torch.ones(2, 2))
def forward(self):
pass
@torch.jit.export
def __getstate__(self):
return 5, self.training
@torch.jit.export
def __setstate__(self, state):
self.buffer1 = torch.ones(2, 2) + state[0]
self.buffer2 = torch.ones(2, 2) + 10
self.training = state[1]
with TemporaryFileName() as fname:
m = torch.jit.script(NoArgState())
m.save(fname)
loaded = torch.jit.load(fname)
self.assertEqual(loaded.buffer1, torch.ones(2, 2) + 5)
self.assertEqual(loaded.buffer2, torch.ones(2, 2) + 10)
def test_string_slicing(self):
def fn1(x):
# type: (str) -> str
return x[1:3]
def fn2(x):
# type: (str) -> str
return x[-1:3]
def fn3(x):
# type: (str) -> str
return x[3:1]
def fn4(x):
# type: (str) -> str
return x[3:100]
self.checkScript(fn1, ("abcdefghi",))
self.checkScript(fn2, ("abcdefghi",))
self.checkScript(fn3, ("abcdefghi",))
self.checkScript(fn4, ("abcdefghi",))
def test_early_return_closure(self):
code = dedent('''
def tanh(self):
output = torch.tanh(self)
def backward(grad_output):
pass
return output, backward
''')
cu = torch.jit.CompilationUnit(code)
g = cu.tanh.graph
FileCheck().check_count("prim::Closure_0", 2).check("NoneType = prim::Constant") \
.check_next("return").run(g)
code = dedent('''
def tanh(self):
output = torch.tanh(self)
def backward(grad_output):
a = 1
if output:
return 1
else:
a = 2
return a
return output, backward
''')
cu = torch.jit.CompilationUnit(code)
g = cu.tanh.graph
FileCheck().check_count("prim::Closure_0", 2).check("int = prim::If") \
.run(g)
code = dedent('''
def loop_in_closure(self):
output = torch.tanh(self)
def backward(grad_output):
for i in range(3):
return 1
return 4
return output, backward
''')
cu = torch.jit.CompilationUnit(code)
fc = FileCheck()
fc.check("prim::Closure").check("(Tensor, NoneType) = prim::TupleConstruct")
# Loop then two if's added in exit transform
fc.check("prim::Closure").check("prim::Loop").check_count("prim::If", 2)
fc.run(cu.loop_in_closure.graph)
code = dedent('''
def tanh(self):
output = torch.tanh(self)
def backward(grad_output):
if 1 == 1:
return 1
else:
return 1.
return output, backward
''')
with self.assertRaisesRegex(RuntimeError, "returned a value of type int but"):
cu = torch.jit.CompilationUnit(code)
@_inline_everything
def test_early_return_fork_join(self):
@torch.jit.script
def foo(x):
if x.dim() == 2:
return torch.neg(x), x
else:
return torch.neg(x), x + 1
x = torch.rand(3, 4)
@torch.jit.script
def wait_script(x):
fut = torch.jit._fork(foo, x)
y_hat = foo(x)
y = torch.jit._wait(fut)
return y, y_hat
FileCheck().check("with prim::fork").check("prim::If").check("return")\
.run(wait_script.graph)
def test_early_return_type_refinement(self):
@torch.jit.script
def test(x):
# type: (Optional[int]) -> int
if x is None:
return 1
else:
return x
self.assertEqual(test(None), 1)
self.assertEqual(test(2), 2)
def test_exceptions_with_control_flow(self):
def test_num_ifs(func, num_ifs):
g = torch.jit.script(func).graph
FileCheck().check_count("prim::If", num_ifs, exactly=True).run(g)
def no_guard_ifs_added(x):
# type: (int) -> int
if x == 1:
return 1
else:
if x == 2:
raise RuntimeError("hi")
else:
raise RuntimeError("hi")
self.checkScript(no_guard_ifs_added, (1,))
self.checkScriptRaisesRegex(no_guard_ifs_added, (2,), Exception, "")
test_num_ifs(no_guard_ifs_added, 2)
# FUNCTION LOOKS LIKE:
# graph(%x.1 : int):
# %7 : str = prim::Constant[value="Exception"]()
# %2 : int = prim::Constant[value=1]()
# %5 : int = prim::Constant[value=2]()
# %19 : int = prim::Uninitialized()
# %3 : bool = aten::eq(%x.1, %2)
# %20 : int = prim::If(%3)
# block0():
# -> (%2)
# block1():
# %6 : bool = aten::eq(%x.1, %5)
# = prim::If(%6)
# block0():
# = prim::RaiseException(%7)
# -> ()
# block1():
# = prim::RaiseException(%7)
# -> ()
# -> (%19)
# return (%20)
def no_ifs_added(x):
# type: (int) -> int
if x < 0:
raise RuntimeError("hi")
return x
self.checkScript(no_ifs_added, (1,))
self.checkScriptRaisesRegex(no_ifs_added, (-2,), Exception, "")
test_num_ifs(no_ifs_added, 1)
def test_if_might(x):
# type: (int)
if x > 0:
if x == 1:
return 1
else:
a = 2
else:
raise RuntimeError("hi")
return a + 2
self.checkScript(test_if_might, (1,))
self.checkScript(test_if_might, (3,))
self.checkScriptRaisesRegex(no_ifs_added, (-2,), Exception, "")
test_num_ifs(test_if_might, 3) # one if added to guard a + 2
def test_loop_no_escape(x):
# type: (int)
if x >= 0:
for _ in range(x):
raise RuntimeError("hi")
else:
return 5
return x + 3
self.checkScript(test_loop_no_escape, (0,))
self.checkScript(test_loop_no_escape, (-1,))
self.checkScriptRaisesRegex(test_loop_no_escape, (1,), Exception, "")
# if guard gets optimized away
test_num_ifs(test_loop_no_escape, 1)
def test_loop_exception_with_continue(x):
# type: (int)
i = 0
for i in range(5):
if i == x:
raise RuntimeError("hi")
else:
continue
print(i)
return i + 5
self.checkScript(test_loop_exception_with_continue, (-1,))
self.checkScriptRaisesRegex(test_loop_exception_with_continue, (1,), Exception, "")
test_num_ifs(test_loop_exception_with_continue, 1) # no ifs added to guard print
def test_exception_exits_closure(self):
code = dedent('''
def no_return_func(self):
# type: (Tensor) -> Tensor
output = torch.tanh(self)
def backward(grad_output):
raise RuntimeError("Hi")
''')
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
cu = torch.jit.CompilationUnit(code)
code = dedent('''
def test_exit_pair_reset(x):
# type: (int) -> int
if x > 0:
a = 0
def backward(grad_output):
raise RuntimeError("Hi")
a = a + 1
else:
return x
return a + 1
''')
func = torch.jit.CompilationUnit(code).test_exit_pair_reset
self.assertEqual(func(1,), 2)
self.assertEqual(func(-1,), -1)
# final a + 1 gets inlined into the first branch and optimized away
FileCheck().check_count("prim::If", 1, exactly=True).run(func.graph)
def test_non_final_return(self):
def simple(x):
if bool(x > 3):
return x + 1
else:
return x + 2
raise RuntimeError("nope")
def nest(x):
x = x + 1
if bool(x > 3):
if bool(x > 4):
x += 1
return x + 1
else:
return x + 2
def early_ret(x):
x = x + 1
if bool(x > 3):
return x + 1
x = x + 1
return x + 2
def nest_early_ret(x):
x = x + 1
if bool(x > 3):
if bool(x > 4):
return x + 2
return x + 1
x = x + 1
return x + 2
def not_early_ret(x):
s = ""
if bool(x > 3):
if bool(x > 4):
return 1, s
s += "foo"
else:
s += "5"
s += "hi"
return 7, s
def not_total_ret(x):
s = ""
if bool(x > 3):
if bool(x > 4):
return 1, s
else:
return 2, s
else:
s += "5"
return 7, s
for i in range(3):
for func in [simple, nest, early_ret, nest_early_ret, not_early_ret,
not_total_ret]:
self.checkScript(func, (torch.tensor(2.5 + i),))
def vars_used_after_ret(x):
# type: (int) -> int
if x == 0:
return x
else:
y = 2
z = 3
return x + y * z
self.checkScript(vars_used_after_ret, (1,))
self.checkScript(vars_used_after_ret, (0,))
def complicated(x):
# type: (int) -> int
if x:
if x == 2:
return 1
assert 1 == 2
else:
if x == 3:
return 2
assert 1 == 2
else:
a = 2
b = 3
else:
a = 4
b = 1
return a + b
assert 1 == 2
for i in range(4):
self.checkScript(complicated, (i,))
def test_partial_returns(self):
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
@torch.jit.script
def no_ret():
# type: () -> int
pass
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
@torch.jit.script
def partial(x):
# type: (Tensor) -> int
if x:
return 1
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
@torch.jit.script
def typed_none():
# type: () -> Optional[int]
pass
@torch.jit.script
def none_ret():
pass
self.assertIs(none_ret(), None)
FileCheck().check(": None").run(none_ret.graph)
def test_early_returns_loops(self):
def nest_while_ret(x):
# type: (int) -> int
y = 4
while x < 4:
if x < 3:
return y
else:
y = y + 1
break
y = y + 2
y = y + 1
return y
self.checkScript(nest_while_ret, (2,))
self.checkScript(nest_while_ret, (3,))
self.checkScript(nest_while_ret, (4,))
def loop_ret(x, y):
# type: (int, int) -> (int)
i = 0
for i in range(x):
if x == y:
return x + y
i = i + y
i = i - 1
return i
self.checkScript(loop_ret, (3, 3))
self.checkScript(loop_ret, (2, 3))
self.checkScript(loop_ret, (3, 1))
def test_will_ret(y):
# type: (int) -> int
for _ in range(y):
return 2
return 1
self.checkScript(test_will_ret, (0,))
self.checkScript(test_will_ret, (1,))
def test_loop_nest_ret(y):
# type: (int) -> int
for _ in range(y):
for _ in range(y - 2):
return 10
return 5
return 0
self.checkScript(test_loop_nest_ret, (0,))
self.checkScript(test_loop_nest_ret, (1,))
self.checkScript(test_loop_nest_ret, (2,))
def test_nn_init(self):
tests = (
('constant_', (lambda: (torch.ones(2, 2), 2.5)), "Tensor, float"),
('ones_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('zeros_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('uniform_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('normal_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('xavier_normal_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('xavier_uniform_', (lambda: (torch.ones(2, 2),)), "Tensor"),
)
for name, args_fn, type_str in tests:
# Build test code
arg_str = ', '.join([chr(i + ord('a')) for i in range(len(args_fn()))])
code = dedent('''
def test({arg_str}):
# type: ({type_str})
return torch.nn.init.{name}({arg_str})
''').format(arg_str=arg_str, type_str=type_str, name=name)
cu = torch.jit.CompilationUnit(code)
# Compare functions
init_fn = getattr(torch.nn.init, name)
script_out = self.runAndSaveRNG(cu.test, args_fn())
eager_out = self.runAndSaveRNG(init_fn, args_fn())
self.assertEqual(script_out, eager_out)
FileCheck().check_not("prim::PythonOp").run(cu.test.graph)
def test_nn_init_generator(self):
init_fns = (
'uniform_', 'normal_', 'xavier_normal_', 'xavier_uniform_',
)
for name in init_fns:
# Build test code
code = dedent('''
def test(tensor, generator):
# type: (Tensor, Generator)
return torch.nn.init.{name}(tensor, generator=generator)
''').format(name=name)
cu = torch.jit.CompilationUnit(code)
# Compare functions
init_fn = getattr(torch.nn.init, name)
torch.manual_seed(1)
g = torch.Generator()
g.manual_seed(2023)
script_out = cu.test(torch.ones(2, 2), g)
# Change the seed of the default generator to make
# sure that we're using the provided generator
torch.manual_seed(2)
g = torch.Generator()
g.manual_seed(2023)
eager_out = init_fn(torch.ones(2, 2), generator=g)
self.assertEqual(script_out, eager_out)
FileCheck().check_not("prim::PythonOp").run(cu.test.graph)
def test_parse_generator(self):
def _test_parse_generator(seed):
jit_graph = parse_ir(
f"""
graph():
%0 : float = prim::Constant[value=-0.31622776601683789]()
%1 : float = prim::Constant[value=0.31622776601683789]()
%2 : Generator = prim::Constant[value=torch.Generator(device="cpu", seed={seed})]()
%3 : NoneType = prim::Constant()
%4 : int[] = prim::Constant[value=[]]()
%5 : int = prim::Constant[value=6]()
%6 : Device = prim::Constant[value="cpu"]()
%7 : Tensor = aten::empty(%4, %5, %3, %6, %3, %3)
%8 : Float() = aten::uniform(%7, %0, %1, %2)
return (%8)
""",
)
node = next(
n
for n in jit_graph.nodes()
if isinstance(n.output().type(), torch._C._GeneratorType)
)
assert isinstance(node.output().type(), torch._C._GeneratorType)
g = node.ival("value")
assert isinstance(g, torch.Generator)
self.assertEqual(g.initial_seed(), seed)
_test_parse_generator(2024)
_test_parse_generator(2**63 - 1)
with self.assertRaisesRegex(RuntimeError, "Seed must be a non-negative integer"):
_test_parse_generator(-2024)
with self.assertRaisesRegex(RuntimeError, "Number is too big"):
_test_parse_generator(2**63)
def test_early_return_rewrite(self):
def test_foo(x: bool):
if x:
return 1
return 2
self.checkScript(test_foo, (True,))
self.checkScript(test_foo, (False,))
FileCheck().check_count("prim::If", 1, exactly=True).run(torch.jit.script(test_foo).graph)
def test_multiple(x: int):
if x == 5:
return x * x
else:
y = 2 * x
z = y * 2
if z == 8:
return 1
if z != 16:
z = z - 2
abc = 4
else:
return 3
z = z * abc
return z * z * z
self.checkScript(test_multiple, (5,))
self.checkScript(test_multiple, (2,))
self.checkScript(test_multiple, (4,))
self.checkScript(test_multiple, (3,))
self.checkScript(test_multiple, (10,))
graph = torch.jit.script(test_multiple).graph
FileCheck().check_count("prim::If", 3, exactly=True).run(graph)
def test_is_scripting_metacompile(self):
@torch.jit.script
def foo():
if torch.jit.is_scripting():
return 1
else:
print("hello") + 2 # will not be compiled
self.assertEqual(foo(), 1)
def test_boolean_literal_constant_metacompile(self):
class Mod(torch.nn.Module):
__constants__ = ['val']
def __init__(self, val):
super().__init__()
self.val = val
def forward(self):
if self.val:
return 1
else:
return "2"
self.checkModule(Mod(True), ())
self.checkModule(Mod(False), ())
@torch.jit.script
def foo():
if True:
return 1
else:
return "2"
self.assertEqual(foo(), 1)
def test_assert_is_scripting_metacompile(self):
def foo():
assert not torch.jit.is_scripting(), "TestErrorMsg"
print("hello") + 2 # will not be compiled
f = torch.jit.script(foo)
with self.assertRaisesRegex(torch.jit.Error, "TestErrorMsg"):
f()
def test_isinstance_metacompile(self):
@torch.jit.script
def test_primitive_type(x):
# type: (int) -> int
if isinstance(x, int):
return x + 1
else:
return x - 1
self.assertEqual(test_primitive_type(1), 2)
with self.assertRaisesRegex(Exception, "Expected a value of type"):
test_primitive_type(1.5)
_MyNamedTuple = namedtuple('_MyNamedTuple', ['value'])
@torch.jit.script
def test_non_primitive_types(x):
# type: (_MyNamedTuple) -> Tensor
if isinstance(1, _MyNamedTuple):
return 10
if isinstance(x, _MyNamedTuple):
return x.value + 1
else:
return 1
out = test_non_primitive_types(_MyNamedTuple(value=torch.tensor(5.0)))
self.assertEqual(out, torch.tensor(6.0))
def test_namedtuple_type_inference(self):
_AnnotatedNamedTuple = NamedTuple('_NamedTupleAnnotated', [('value', int)]) # noqa: UP014
_UnannotatedNamedTuple = namedtuple('_NamedTupleUnAnnotated', ['value'])
def test_check_named_tuple_value():
named_tuple = _AnnotatedNamedTuple(1)
return named_tuple.value
self.checkScript(test_check_named_tuple_value, ())
def test_error():
return _UnannotatedNamedTuple(1)
with self.assertRaisesRegex(RuntimeError, r"Expected a value of type \'Tensor \(inferred\)\' "
r"for argument \'value\' but instead found type \'int\'."):
torch.jit.script(test_error)
def test_namedtuple_default_values_simple_type(self):
class Point(NamedTuple):
x: Optional[int] = None
y: int = 2
make_global(Point)
class M(torch.nn.Module):
def forward(self, point: Point):
return point
p = Point(x=3, y=2)
self.checkModule(M(), (p,))
self.checkModule(M(), (Point(),))
m = torch.jit.script(M())
FileCheck().check(r"NamedTuple(x : int? = None, y : int = 2))") \
.run(m.graph)
def test_namedtuple_default_values_missing(self):
class Point(NamedTuple):
x: Optional[int]
y: int
z: int = 3
make_global(Point)
class M(torch.nn.Module):
def forward(self, point: Point):
return point
p1 = Point(x=3, y=2)
p2 = Point(x=3, y=2, z=1)
self.checkModule(M(), (p1,))
self.checkModule(M(), (p2,))
m = torch.jit.script(M())
FileCheck().check(r"NamedTuple(x : int?, y : int, z : int = 3))") \
.run(m.graph)
def test_namedtuple_default_values_container_type(self):
class Point(NamedTuple):
x: Optional[List[int]] = None
y: List[int] = [1, 2, 3]
z: Optional[Dict[str, int]] = {"a": 1}
make_global(Point)
class M(torch.nn.Module):
def forward(self, point: Point):
return point
p = Point(x=[4, 5, 6], y=[3, 2, 1], z={"b": 2})
self.checkModule(M(), (p,))
self.checkModule(M(), (Point(),))
m = torch.jit.script(M())
first_line = r"NamedTuple(x : int[]? = None, y : int[] = " \
r"[1, 2, 3], z : Dict(str, int)? = {a: 1}))"
FileCheck().check(first_line) \
.run(m.graph)
def test_namedtuple_default_values_Tensor_type(self):
class Point(NamedTuple):
x: torch.Tensor = torch.rand(2, 3)
make_global(Point)
class M(torch.nn.Module):
def forward(self, point: Point):
return point
p = Point(x=torch.rand(2, 3))
with self.assertRaisesRegex(RuntimeError, "Tensors are not "
"supported as default NamedTuple "
"fields"):
m = torch.jit.script(M())
m(p)
def test_namedtuple_default_values_using_factory_constructor(self):
Pair = namedtuple("Pair", ["x", "y"], defaults=(1, 2))
make_global(Pair)
@torch.jit.script
def fn(x: Pair) -> Pair:
return x
# TODO: We can't use `checkScript` with the NamedTuple factory
# constructor. Using the factory constructor with TorchScript
# TorchScript creates an anonymous `NamedTuple` class instead of
# preserving the actual name. For example, the actual generated
# signature in this case is:
# graph(%x.1 : NamedTuple(x : Tensor, y : Tensor))
# It looks like similar test cases have had this issue as well
# (see: `test_namedtuple_python`).
FileCheck().check(r"NamedTuple(x : Tensor = 1, y : Tensor = 2))") \
.check_next(r"return (%x.1)") \
.run(fn.graph)
def test_isinstance_dynamic(self):
@torch.jit.script
def foo(a):
# type: (Optional[List[int]]) -> int
b = 0
if isinstance(a, (int, (float,), list, str)):
b += 1
if isinstance(a, (int, str)):
b += 1
if isinstance(a, List[int]):
b += 1
return b
self.assertEqual(foo([3, 4]), 2)
self.assertEqual(foo(None), 0)
def test_function_overloads(self):
# TODO: pyflakes currently does not compose @overload annotation with other
# decorators. This is fixed on master but not on version 2.1.1.
# Next version update remove noqa and add @typing.overload annotation
@torch.jit._overload # noqa: F811
def test_simple(x1): # noqa: F811
# type: (int) -> int
pass
@torch.jit._overload # noqa: F811
def test_simple(x1): # noqa: F811
# type: (float) -> float
pass
def test_simple(x1): # noqa: F811
return x1
def invoke_function():
return test_simple(1.0), test_simple(.5)
self.checkScript(invoke_function, ())
# testing that the functions are cached
compiled_fns_1 = torch.jit._script._get_overloads(test_simple)
compiled_fns_2 = torch.jit._script._get_overloads(test_simple)
for a, b in zip(compiled_fns_1, compiled_fns_2):
self.assertIs(a.graph, b.graph)
old_func = test_simple
# testing that new functions added work with caching
@torch.jit._overload # noqa: F811
def test_simple(x1): # noqa: F811
# type: (str) -> str
pass
@torch.jit.script
def my_func():
return old_func("hi")
# testing new function same qualified name
@torch.jit._overload # noqa: F811
def test_simple(a, b): # noqa: F811
# type: (int, int) -> int
pass
def test_simple(a, b):
return a + b
@torch.jit.script
def fn():
return test_simple(3, 4)
self.assertEqual(fn(), 7)
# currently we take the default values have to be specified in the
# overload as well - TODO take them from implementation and apply
# where the type is valid.
@torch.jit._overload # noqa: F811
def identity(x1): # noqa: F811
# type: (str) -> str
pass
@torch.jit._overload # noqa: F811
def identity(x1): # noqa: F811
# type: (float) -> float
pass
def identity(x1=1.0): # noqa: F811
return x1
def invoke():
return identity(), identity(.5), identity("hi")
self.checkScript(invoke, ())
def schema_match_failure():
return identity((1, 2))
thrown = False
try:
torch.jit.script(schema_match_failure)
except Exception as e:
thrown = True
self.assertTrue(r"of type 'str'" in str(e) and r"of type 'float" in str(e))
self.assertTrue(thrown)
with self.assertRaisesRegex(Exception, "cannot be directly compiled"):
torch.jit.script(identity)
@torch.jit._overload # noqa: F811
def impl_compile_failure(x, y): # noqa: F811
# type: (str, str) -> (str)
pass
@torch.jit._overload # noqa: F811
def impl_compile_failure(x, y): # noqa: F811
# type: (int, int) -> (int)
pass
def impl_compile_failure(x, y): # noqa: F811
return x - y
def test():
impl_compile_failure("one", "two")
with self.assertRaisesRegex(Exception, "Arguments for call are not valid"):
torch.jit.script(test)
@torch.jit._overload # noqa: F811
def good_overload(x=1): # noqa: F811
# type: (int) -> (int)
pass
def good_overload(x=1): # noqa: F811
return x
@torch.jit.script
def foo():
return good_overload()
self.assertEqual(foo(), 1)
with self.assertRaisesRegex(Exception, "must equal to the default parameter"):
@torch.jit._overload # noqa: F811
def bad_default_on_overload(x, y=2): # noqa: F811
# type: (int, int) -> (int)
pass
def bad_default_on_overload(x, y=1): # noqa: F811
# type: (int, int) -> (int)
pass
@torch.jit.script
def test():
return bad_default_on_overload(1, 2)
@torch.jit._overload # noqa: F811
def diff_default(x): # noqa: F811
# type: (int) -> int
pass
@torch.jit._overload # noqa: F811
def diff_default(x): # noqa: F811
# type: (str) -> str
pass
def diff_default(x="hi"): # noqa: F811
return x
def test():
return diff_default(), diff_default(2), diff_default("abc")
self.assertEqual(test(), torch.jit.script(test)())
@torch.jit._overload # noqa: F811
def diff_num_params(x): # noqa: F811
# type: (float) -> float
pass
@torch.jit._overload # noqa: F811
def diff_num_params(x, y): # noqa: F811
# type: (int, int) -> int
pass
def diff_num_params(x, y=2, z=3): # noqa: F811
# type: (Union[float, int], int, int)
return x + y + z
def test():
return diff_num_params(1.0), diff_num_params(1, 2), diff_num_params(1), diff_num_params(1, 2, 3)
self.assertEqual(test(), torch.jit.script(test)())
@torch.jit._overload # noqa: F811
def diff_num_params_no_annot():
# type: () -> int
pass
def diff_num_params_no_annot(x=1): # noqa: F811
return x
def test():
return diff_num_params_no_annot(1.0)
with self.assertRaisesRegex(Exception, "Parameters not specified"):
torch.jit.script(test)
def test_function_overload_misuse(self):
with self.assertRaisesRegex(RuntimeError, "Only `pass` statement or `...` can be the body"):
@torch.jit._overload
def wrong_decl_body(x: str) -> str:
return x + "0"
with self.assertRaisesRegex(RuntimeError, "Only `pass` statement or `...` can be the body"):
class MyClass:
@torch.jit._overload_method
def method(self):
return 0
@torch.jit._overload
def null_overload(x: int) -> int: ... # noqa: E704
@torch.jit._overload # noqa: F811
def null_overload(x: str) -> str: # noqa: F811
pass
def null_overload_driver():
return null_overload(0)
with self.assertRaisesRegex(RuntimeError, 'Implementation for the function ".+" is missing.'):
torch.jit.script(null_overload_driver)
class OverloadMisuse(torch.nn.Module):
@torch.jit._overload_method
def forward(self, x: int):
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x: Tensor): # noqa: F811
pass
with self.assertRaisesRegex(RuntimeError, 'Implementation for the method ".+" is missing.'):
m = torch.jit.script(OverloadMisuse())
def test_script_method_torch_function_overload(self):
class MyCustomTensor(torch.Tensor):
pass
class MyCustomModule(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
scripted_mod = torch.jit.script(MyCustomModule())
t = torch.tensor([3.0])
ref_out = scripted_mod(t)
t_custom = MyCustomTensor([3.0])
out1 = scripted_mod(t_custom)
self.assertEqual(out1, ref_out)
out2 = scripted_mod.forward(t_custom)
self.assertEqual(out2, ref_out)
def test_function_overloading_isinstance(self):
@torch.jit._overload # noqa: F811
def my_conv(x, y): # noqa: F811
# type: (float, str) -> (float)
pass
@torch.jit._overload # noqa: F811
def my_conv(x, y): # noqa: F811
# type: (float, float) -> (float)
pass
def my_conv(x, y=2.0): # noqa: F811
if isinstance(y, str):
if y == "hi":
return 4.0 - x
else:
return 5.0 - x
else:
return 2.0 + x
def test_uses():
return my_conv(1.5), my_conv(1.5, "hi"), my_conv(1.5, 5.0)
self.checkScript(test_uses, ())
def test_method_overloading(self):
class Over(torch.nn.Module):
@torch.jit._overload_method # noqa: F811
def forward(self, x): # noqa: F811
# type: (Tuple[Tensor, Tensor]) -> Tensor
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x): # noqa: F811
# type: (Tensor) -> Tensor
pass
def forward(self, x): # noqa: F811
if isinstance(x, Tensor):
return x + 20
else:
return x[0] + 5
class S(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.weak = Over()
@torch.jit.script_method
def forward(self, x):
return self.weak(x) + self.weak((x, x))
s_mod = S()
x = torch.ones(1)
self.assertEqual(s_mod(x), x + 20 + 5 + x)
over = Over()
self.assertEqual(over((x, x)), x + 5)
self.assertEqual(over(x), x + 20)
class Unannotated(torch.nn.Module):
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
pass
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
# type: (int) -> (int)
pass
def hello(self, x): # noqa: F811
return x + 3
def forward(self):
return self.hello(1), self.hello(.5)
w = Unannotated()
with self.assertRaisesRegex(Exception, "explicitly add type annotations to overloaded functions"):
torch.jit.script(w)
class CompileOverloadError(torch.nn.Module):
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
# type: (str) -> (int)
pass
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
# type: (int) -> (int)
pass
def hello(self, x): # noqa: F811
return x + 1
def forward(self):
return self.hello("hi"), self.hello(.5)
w = CompileOverloadError()
with self.assertRaisesRegex(Exception, "but instead found type 'str'"):
torch.jit.script(w)
# testing overload declared first, then non-overload
if sys.version_info < (3, 13): # test broken in 3.13
with self.assertRaisesRegex(Exception, "Overloads are not usable when a module"):
class W3(torch.nn.Module):
@torch.jit._overload_method # noqa: F811
def forward(self, x): # noqa: F811
# type: (int) -> int
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x): # noqa: F811
# type: (Tensor) -> Tensor
pass
def forward(self, x): # noqa: F811
return x + 5
a = W3()
b = torch.jit.script(a)
class W3(torch.nn.Module):
def forward(self, x): # noqa: F811
return x + 5 + 10
a = W3()
b = torch.jit.script(a)
# testing non-overload declared first, then overload
class W2(torch.nn.Module):
def hello(self, x1, x2):
return x1 + x2
def forward(self, x):
return self.hello(x, x)
a = torch.jit.script(W2())
self.assertEqual(a(torch.tensor(1)), torch.tensor(2))
class W2(torch.nn.Module):
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
pass
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
# type: (int) -> (int)
pass
def hello(self, x): # noqa: F811
return x + 5 + 10
def forward(self, x):
return self.hello(1), self.hello(x)
if sys.version_info < (3, 13): # test broken in 3.13
with self.assertRaisesRegex(Exception, "Overloads are not usable when a module"):
a = torch.jit.script(W2())
def test_narrow_copy(self):
def foo(a):
return a.narrow_copy(0, 0, 5)
self.checkScript(foo, [torch.rand(10)])
def test_select_after_chunk(self):
def foo(x):
chunked = torch.chunk(x, 1)
foo = chunked[0]
foo.add_(5)
return x
self.checkScript(foo, [torch.rand(2, 3)])
def test_nn_LSTM_with_layers(self):
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.rnn = nn.LSTM(2, 3, 2, dropout=0)
@torch.jit.script_method
def forward(self, x, lengths, h0, c0):
return self.rnn(x, (h0, c0))[0]
class Eager(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.rnn = nn.LSTM(2, 3, 2, dropout=0)
def forward(self, x, lengths, h0, c0):
return self.rnn(x, (h0, c0))[0]
inputs = (torch.randn(1, 1, 2), torch.LongTensor([7]), torch.randn(2, 1, 3), torch.randn(2, 1, 3))
eager_out = self.runAndSaveRNG(lambda: Eager()(*inputs), ())[0]
script_out = self.runAndSaveRNG(lambda: M()(*inputs), ())[0]
self.assertEqual(eager_out, script_out)
def test_nn_LSTM(self):
input = torch.nn.utils.rnn.pack_sequence([torch.randn(5, 5)])
class S(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.x = torch.nn.LSTM(5, 5)
@torch.jit.script_method
def forward(self, input: PackedSequence) -> Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]:
return self.x(input)
eager_out = self.runAndSaveRNG(lambda x: torch.nn.LSTM(5, 5)(x), (input,))[0]
script_out = self.runAndSaveRNG(lambda x: S()(x), (input,))[0]
self.assertEqual(eager_out, script_out)
def test_nn_GRU(self):
seq_input = torch.nn.utils.rnn.pack_sequence([torch.randn(5, 5)])
tensor_input = torch.randn(5, 5, 5)
class SeqLengthGRU(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.x = torch.nn.GRU(5, 5)
@torch.jit.script_method
def forward(self, input: PackedSequence) -> Tuple[PackedSequence, torch.Tensor]:
return self.x(input)
class TensorGRU(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.x = torch.nn.GRU(5, 5)
@torch.jit.script_method
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return self.x(input)
seq_eager_out = self.runAndSaveRNG(lambda x: torch.nn.GRU(5, 5)(x), (seq_input,))[0]
seq_script_out = self.runAndSaveRNG(lambda x: SeqLengthGRU()(x), (seq_input,))[0]
tensor_eager_out = self.runAndSaveRNG(lambda x: torch.nn.GRU(5, 5)(x), (tensor_input,))[0]
tensor_script_out = self.runAndSaveRNG(lambda x: TensorGRU()(x), (tensor_input,))[0]
self.assertEqual(seq_eager_out, seq_script_out)
self.assertEqual(tensor_eager_out, tensor_script_out)
def test_torchscript_memoryformat(self):
@torch.jit.script
def fn(x):
return x.contiguous(memory_format=torch.channels_last)
x = torch.randn(4, 3, 6, 6)
y = fn(x)
self.assertTrue(y.is_contiguous(memory_format=torch.channels_last))
def test_torchscript_multi_head_attn(self):
@torch.jit.script
def jit_multihead_attn_forward(query, # type: Tensor
key, # type: Tensor
value, # type: Tensor
embed_dim_to_check, # type: int
num_heads, # type: int
in_proj_weight, # type: Tensor
in_proj_bias, # type: Tensor
bias_k, # type: Optional[Tensor]
bias_v, # type: Optional[Tensor]
add_zero_attn, # type: bool
dropout, # type: float
out_proj_weight, # type: Tensor
out_proj_bias, # type: Tensor
training=True, # type: bool
key_padding_mask=None, # type: Optional[Tensor]
need_weights=True, # type: bool
attn_mask=None # type: Optional[Tensor]
):
# type: (...) -> Tuple[Tensor, Optional[Tensor]]
return torch.nn.functional.multi_head_attention_forward(query, key, value,
embed_dim_to_check, num_heads,
in_proj_weight, in_proj_bias,
bias_k, bias_v,
add_zero_attn, dropout,
out_proj_weight, out_proj_bias,
training, key_padding_mask,
need_weights, attn_mask)
src_l = 3
bsz = 5
embed_size = 8
nhead = 2
multi_head_attn = torch.nn.MultiheadAttention(embed_size, nhead)
query = torch.rand((src_l, bsz, embed_size))
key = torch.rand((src_l, bsz, embed_size))
value = torch.rand((src_l, bsz, embed_size))
mask = (torch.triu(torch.ones(src_l, src_l)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, 0.0).to(torch.get_default_dtype())
jit_out = jit_multihead_attn_forward(query, key, value,
embed_size, nhead,
multi_head_attn.in_proj_weight,
multi_head_attn.in_proj_bias,
multi_head_attn.bias_k, multi_head_attn.bias_v,
multi_head_attn.add_zero_attn, multi_head_attn.dropout,
multi_head_attn.out_proj.weight,
multi_head_attn.out_proj.bias, attn_mask=mask)[0]
py_out = torch.nn.functional.multi_head_attention_forward(query, key, value,
embed_size, nhead,
multi_head_attn.in_proj_weight,
multi_head_attn.in_proj_bias,
multi_head_attn.bias_k,
multi_head_attn.bias_v,
multi_head_attn.add_zero_attn,
multi_head_attn.dropout,
multi_head_attn.out_proj.weight,
multi_head_attn.out_proj.bias,
attn_mask=mask)[0]
# print("rel. error: ")
# print(jit_out / py_out - 1)
self.assertEqual(jit_out, py_out, atol=5e-4, rtol=1e-4)
def test_torchscript_multi_head_attn_fast_path(self):
src_l = 3
bsz = 5
embed_size = 8
nhead = 2
multi_head_attn = torch.nn.MultiheadAttention(embed_size, nhead, batch_first=True)
multi_head_attn = multi_head_attn.eval()
query = key = value = torch.rand((bsz, src_l, embed_size))
with torch.no_grad():
py_out = multi_head_attn(query, key, value)
mha = torch.jit.script(multi_head_attn)
jit_out = mha(query, key, value)
torch.testing.assert_close(jit_out, py_out)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_scriptmodule_multi_head_attn_cuda(self):
class MyModule(torch.jit.ScriptModule):
def __init__(self, embed_dim, num_heads):
super().__init__()
sample_q = torch.randn(3, 2, embed_dim)
sample_kv = torch.randn(3, 2, embed_dim)
attention = nn.MultiheadAttention(embed_dim, num_heads)
attention.eval()
self.mod = torch.jit.trace(attention,
(sample_q, sample_kv, sample_kv))
@torch.jit.script_method
def forward(self, q, k, v):
return self.mod(q, k, v)
embed_dim = 8
num_heads = 2
sl = 3
bs = 2
model = MyModule(embed_dim, num_heads).cuda()
q = torch.randn(sl, bs, embed_dim, device="cuda")
kv = torch.randn(sl, bs, embed_dim, device="cuda")
jit_out = model(q, kv, kv)[0]
py_out = torch.nn.functional.multi_head_attention_forward(q, kv, kv,
embed_dim, num_heads,
model.mod.in_proj_weight,
model.mod.in_proj_bias,
None, None, None, 0.0,
model.mod.out_proj.weight,
model.mod.out_proj.bias)[0]
self.assertEqual(jit_out, py_out, atol=5e-4, rtol=1e-4)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_scriptmodule_transformer_cuda(self):
class MyModule(torch.jit.ScriptModule):
def __init__(self, transformer, sample_q, sample_kv):
super().__init__()
transformer.eval()
self.mod = torch.jit.trace(transformer,
(sample_q, sample_kv))
@torch.jit.script_method
def forward(self, q, k):
return self.mod(q, k)
d_model = 8
nhead = 2
num_encoder_layers = 2
num_decoder_layers = 2
dim_feedforward = 16
bsz = 2
seq_length = 5
tgt_length = 3
with torch.no_grad():
src = torch.randn(seq_length, bsz, d_model)
tgt = torch.randn(tgt_length, bsz, d_model)
transformer = nn.Transformer(d_model, nhead, num_encoder_layers,
num_decoder_layers, dim_feedforward, dropout=0.0)
model = MyModule(transformer, tgt, src)
src = torch.randn(seq_length, bsz, d_model)
tgt = torch.randn(tgt_length, bsz, d_model)
jit_out = model(tgt, src)
py_out = transformer(tgt, src)
# print(jit_out/py_out-1)
# print(torch.allclose(jit_out, py_out, atol=5e-4, rtol=1e-4))
self.assertEqual(jit_out, py_out, atol=5e-4, rtol=1e-4)
def test_list_python_op(self):
def python_list_op(lst):
# type: (List[Tensor]) -> Tensor
return lst[0]
def fn(lst):
# type: (List[Tensor]) -> Tensor
return python_list_op(lst)
self.checkScript(fn, ([torch.ones(2) + 2, torch.ones(2)],))
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_weak_cuda(self):
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.lstm = torch.nn.LSTM(5, 5)
self.lstm.cuda()
@torch.jit.script_method
def forward(self, x):
return self.lstm(x)
m = M()
m.cuda()
out = m(torch.ones(5, 5, 5).cuda())
self.assertTrue(out[0].is_cuda)
def test_ignore_decorator(self):
with warnings.catch_warnings(record=True) as warns:
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
tensor = torch.zeros(1, requires_grad=False)
self.some_state = nn.Buffer(torch.nn.Parameter(tensor))
@torch.jit.script_method
def forward(self, x):
self.ignored_code(x)
return x
@torch.jit.ignore(drop_on_export=True)
def ignored_code(self, x):
self.some_state = torch.tensor((100,))
FileCheck().check("TorchScript will now drop the function").run(str(warns[0]))
# Assert ignored code is run
m = M()
m2 = self.getExportImportCopy(m)
pp = str(m2.forward.code)
self.assertNotIn('ignored_code', pp)
with self.assertRaisesRegex(torch.jit.Error, "annotated to be ignored and cannot be run"):
m2.forward(torch.ones(1))
def test_ignored_as_value(self):
class Model(nn.Module):
@torch.jit.unused
def tuple_ignored(self, x):
# type: (Tensor) -> Tuple[Tensor, Tensor]
return x, x
@torch.jit.unused
def single_val_ignored(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return x
def forward(self, x, use_ignore_path):
# type: (Tensor, bool) -> Tuple[Tensor, Tensor]
if 1 == 2:
return self.tuple_ignored(x)
if use_ignore_path:
return self.single_val_ignored(x, x), self.single_val_ignored(x, x)
return x, x
original = Model()
scripted = torch.jit.script(original)
self.assertEqual(scripted(torch.tensor(.5), False), (torch.tensor(.5), torch.tensor(.5)))
buffer = io.BytesIO()
torch.jit.save(scripted, buffer)
buffer.seek(0)
loaded = torch.jit.load(buffer)
with self.assertRaisesRegex(torch.jit.Error, "annotated to be ignored and cannot be run"):
loaded(torch.tensor(.5), True)
def test_module_error(self):
class MyModule(torch.nn.Module):
def forward(self, foo):
return foo
with self.assertRaisesRegex(RuntimeError, "cannot be compiled since it inherits from nn.Module"):
torch.jit.script(MyModule)
def test_view_write(self):
def fn(x, y):
l = []
l.append(x)
x_view = l[0]
a = x + x
x_view.add_(y)
b = x + x
return a == b
self.checkScript(fn, (torch.rand(2, 3), torch.rand(2, 3)))
def test_module_attrs(self):
class M(torch.jit.ScriptModule):
def __init__(self, table):
super().__init__()
self.table = torch.jit.Attribute(table, Dict[str, torch.Tensor])
self.x = torch.nn.Parameter(torch.tensor([100.0]))
@torch.jit.script_method
def forward(self, key):
# type: (str) -> Tensor
return self.table[key] + self.x
with torch._jit_internal._disable_emit_hooks():
# TODO: re-enable module hook when Python printing of attributes is
# supported
m = M({char : torch.ones(1) + ord(char) - ord("a") for char in "abcdefg"})
self.assertEqual(m("c"), torch.tensor([103.]))
def test_module_none_attrs(self):
class MyMod(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.optional_value = None
@torch.jit.script_method
def forward(self):
return self.optional_value
graph = MyMod().forward.graph
FileCheck().check("prim::GetAttr").run(graph)
self.run_pass('peephole', graph)
FileCheck().check_not("prim::GetAttr").run(graph)
def test_tensor_import_export(self):
@torch.jit.script
def foo(x):
a = torch.tensor(1)
b = torch.tensor([1, 2])
c = [a, b]
return c
self.run_pass('constant_propagation', foo.graph)
m = self.createFunctionFromGraph(foo.graph)
self.getExportImportCopy(m)
def get_pickle_values(self):
return (('dict', {"I": "am", "a test": "test"}, Dict[str, str]),
('float', 2.3, float),
('int', 99, int),
('bool', False, bool),
('tuple', (1, 2, 3, 4), Tuple[int, int, int, int]),
('list', [(1, 2), (3, 4)], List[Tuple[int, int]]),
('tensor', torch.randn(2, 2), torch.Tensor),
('int_list', [1, 2, 3, 4], List[int]),
('tensor_list', [torch.ones(2, 2) + i for i in range(4)], List[torch.Tensor]),
('bool_list', [True, True, False, True], List[bool]),
('float_list', [1., 2., 3., 4.], List[float]),
('str_list', ['hello', 'bye'], List[str]),
('none', None, Optional[int]),
('a_device', torch.device('cpu'), torch.device),
('another_device', torch.device('cuda:1'), torch.device))
def test_attribute_serialization(self):
tester = self
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
for name, value, the_type in tester.get_pickle_values():
setattr(self, name, torch.jit.Attribute(value, the_type))
@torch.jit.script_method
def forward(self):
return (self.dict, self.float, self.int, self.bool, self.tuple,
self.list, self.int_list, self.tensor_list, self.bool_list,
self.float_list, self.str_list, self.none)
m = M()
imported_m = self.getExportImportCopy(m)
self.assertEqual(m(), imported_m())
def test_string_len(self):
def fn(x):
# type: (str) -> int
return len(x)
self.checkScript(fn, ("",))
self.checkScript(fn, ("h",))
self.checkScript(fn, ("hello",))
def test_multiline_optional_future_refinement(self):
@torch.jit.script
def fun() -> int:
future: Optional[
torch.jit.Future[Tuple[torch.Tensor]]
] = None
return 1
self.assertEqual(fun(), 1)
@unittest.skipIf(IS_SANDCASTLE, "NYI: TemporaryFileName support for Sandcastle")
def test_attribute_unpickling(self):
tensor = torch.randn(2, 2)
tester = self
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
for name, value, the_type in tester.get_pickle_values():
setattr(self, "_" + name, torch.jit.Attribute(value, the_type))
@torch.jit.script_method
def forward(self):
return (self._dict, self._float, self._int, self._bool, self._tuple,
self._list, self._int_list, self._tensor_list, self._bool_list,
self._float_list, self._str_list, self._none)
with TemporaryFileName() as fname:
M().save(fname)
loaded = torch.jit.load(fname)
def is_tensor_value(item):
if isinstance(item, torch.Tensor):
return True
if isinstance(item, list):
return is_tensor_value(item[0])
return False
for name, value, _the_type in self.get_pickle_values():
if is_tensor_value(value):
continue
self.assertEqual(value, getattr(loaded, "_" + name))
def test_submodule_attribute_serialization(self):
class S(torch.jit.ScriptModule):
def __init__(self, list_data):
super().__init__()
self.table = torch.jit.Attribute({"I": "am", "a test": "test"}, Dict[str, str])
self.list = torch.jit.Attribute(list_data, List[Tuple[int, int]])
@torch.jit.script_method
def forward(self):
return (self.table, self.list)
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.table = torch.jit.Attribute({"this": "is", "a different": "dict"}, Dict[str, str])
self.tensor = torch.jit.Attribute(torch.randn(2, 2), torch.Tensor)
self.s1 = S([(1, 2)])
self.s2 = S([(4, 5)])
@torch.jit.script_method
def forward(self):
return (self.table, self.tensor, self.s1.table, self.s2.list, self.s1.list)
m = M()
imported_m = self.getExportImportCopy(m)
self.assertEqual(m(), imported_m())
def test_serialization_big_ints(self):
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.int32_max = torch.jit.Attribute(2**31 - 1, int)
self.int32_min = torch.jit.Attribute(-2**31, int)
self.uint32_max = torch.jit.Attribute(2**32, int)
self.int64_max = torch.jit.Attribute(2**63 - 1, int)
self.int64_min = torch.jit.Attribute(-2**63, int)
self.tensor = torch.nn.Parameter(torch.ones(2, 2))
@torch.jit.script_method
def forward(self, x):
# type: (int) -> (int)
return x + (self.int32_max + self.int32_min) + (self.int64_max + self.int64_min)
m = M()
imported = self.getExportImportCopy(m)
self.assertEqual(m(10), imported(10))
self.assertEqual(m.int32_max, imported.int32_max)
self.assertEqual(m.int32_min, imported.int32_min)
self.assertEqual(m.uint32_max, imported.uint32_max)
self.assertEqual(m.int64_max, imported.int64_max)
self.assertEqual(m.int64_min, imported.int64_min)
def test_script_scope(self):
scripted = torch.jit.script(torch.nn.functional.triplet_margin_loss)
def test_serialization_sharing(self):
class M(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.list = torch.jit.Attribute([], List[str])
@torch.jit.script_method
def forward(self, key):
# type: (str) -> List[str]
self.list.append(key)
self.list.append(key)
self.list.append(key)
return self.list
# the text of the string should only appear once in the pickling
m = M()
s1 = "a long string"
s2 = "a different, even longer string"
self.assertEqual(m(s1), [s1] * 3)
self.assertEqual(m(s2), [s1] * 3 + [s2] * 3)
with TemporaryFileName() as fname:
m.save(fname)
archive_name = os.path.basename(os.path.normpath(fname))
archive = zipfile.ZipFile(fname, 'r')
pickled_data = archive.read(f"{archive_name}/data.pkl")
out = io.StringIO()
pickletools.dis(pickled_data, out=out)
disassembled = out.getvalue()
archive.close()
FileCheck().check_count(s1, 1, exactly=True) \
.check_count("BINGET", 2, exactly=True) \
.check_count(s2, 1, exactly=True) \
.check_count("BINGET", 2, exactly=True).run(disassembled)
def test_sys_stdout_override(self):
@torch.jit.script
def foo():
print('foo')
class Redirect:
def __init__(self) -> None:
self.s = ''
def write(self, s):
self.s += s
old_stdout = sys.stdout
redirect = Redirect()
try:
sys.stdout = redirect
foo()
finally:
sys.stdout = old_stdout
FileCheck().check('foo').run(redirect.s)
def test_dtype_attr(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.dtype = torch.zeros([]).dtype
def forward(self):
return torch.zeros(3, 4, dtype=self.dtype)
f = Foo()
torch.jit.script(f)
def test_named_buffers_are_iterable(self):
class MyMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mod = (torch.nn.ReLU())
self.mod2 = (torch.nn.ReLU())
self.mod3 = torch.nn.Sequential(torch.nn.Sequential(torch.nn.ReLU()))
self.x = nn.Buffer(torch.zeros(3))
self.y = nn.Buffer(torch.zeros(3))
self.z = torch.zeros(3)
def bleh(self):
return self.z + 4
@torch.jit.export
def method(self):
names = [""]
vals = []
for name, buffer in self.named_buffers():
names.append(name)
vals.append(buffer + 2)
return names, vals
def forward(self, x):
return x
model = MyMod()
x = torch.jit.script(model)
z = self.getExportImportCopy(x)
self.assertEqual(z.method(), x.method())
self.assertEqual(z.method(), model.method())
self.assertEqual(x.method(), model.method())
names = x.method()
for name in names:
self.assertNotEqual('z', name)
def test_static_if_prop(self):
class MaybeHasAttr(torch.nn.Module):
def __init__(self, add_attr):
super().__init__()
if add_attr:
self.maybe_attr = 1
def forward(self):
if hasattr(self, "maybe_attr") and True:
return self.maybe_attr
else:
return 0
class MaybeHasAttr2(torch.nn.Module):
def __init__(self, add_attr):
super().__init__()
if add_attr:
self.maybe_attr = 1
def forward(self):
if not hasattr(self, "maybe_attr") or False:
return 0
else:
return self.maybe_attr
torch.jit.script(MaybeHasAttr(True))
torch.jit.script(MaybeHasAttr(False))
torch.jit.script(MaybeHasAttr2(True))
torch.jit.script(MaybeHasAttr2(False))
class MyMod(torch.nn.Module):
def forward(self):
if hasattr(self, "foo"):
return 1
else:
return 0
@torch.jit.export
def fee(self):
return 1
self.checkModule(MyMod(), ())
class HasAttrMod(torch.nn.Module):
__constants__ = ["fee"]
def __init__(self) -> None:
super().__init__()
self.fee = 3
def forward(self):
a = hasattr(self, "fee")
b = hasattr(self, "foo")
c = hasattr(self, "hi")
d = hasattr(self, "nonexistent")
return (a, b, c, d)
def foo(self):
return 1
@torch.jit._overload_method
def hi(self, x: Tensor): ... # noqa: E704
def hi(self, x): # noqa: F811
return 2
self.checkModule(HasAttrMod(), ())
@torch.jit.script
class FooTest:
def __init__(self) -> None:
self.x = 1
def foo(self, y):
return self.x + y
def foo():
a = FooTest()
val1 = hasattr(a, "foo"), hasattr(a, "x"), hasattr(a, "bla")
val2 = hasattr(FooTest, "foo"), hasattr(FooTest, "a")
return val1, val2
self.assertEqual(foo(), torch.jit.script(foo)())
def _test_pickle_checkpoint(self, device):
with TemporaryFileName() as fname:
class M(torch.jit.ScriptModule):
__constants__ = ['fname']
def __init__(self, tensor):
super().__init__()
self.fname = fname
self.tensor = torch.nn.Parameter(tensor)
@torch.jit.script_method
def forward(self, x):
y = self.tensor + x
torch.save(y, self.fname)
return y
param = torch.randn(2, 2).to(device)
input = torch.randn(2, 2).to(device)
m = M(param)
m(input)
with open(fname, "rb") as handle:
loaded_tensor = torch.load(fname)
self.assertEqual(loaded_tensor, input + param)
def _test_pickle_checkpoint_views(self, device):
with TemporaryFileName() as fname:
class M(torch.jit.ScriptModule):
__constants__ = ['fname']
def __init__(self, tensor):
super().__init__()
self.fname = fname
self.tensor = torch.nn.Parameter(tensor)
@torch.jit.script_method
def forward(self, x):
y = self.tensor + x
y_view = y.view(4)
torch.save((y, y_view, y), self.fname)
return y
param = torch.randn(2, 2).to(device)
input = torch.randn(2, 2).to(device)
m = M(param)
m(input)
with open(fname, "rb") as handle:
loaded_y, loaded_y_view, loaded_y_2 = torch.load(fname)
self.assertEqual(loaded_y, input + param)
with torch.no_grad():
loaded_y_view[1] += 20
# assert that loaded_y changed as well
self.assertEqual(loaded_y.view(4), loaded_y_view)
self.assertEqual(loaded_y_2.view(4), loaded_y_view)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_pickle_checkpoint_cuda(self):
self._test_pickle_checkpoint('cuda')
self._test_pickle_checkpoint_views('cuda')
def test_pickle_checkpoint(self):
self._test_pickle_checkpoint('cpu')
self._test_pickle_checkpoint_views('cpu')
def test_pickle_checkpoint_tup(self):
@torch.jit.script
def foo(fname):
# type: (str) -> None
torch.save((3, 4), fname)
with TemporaryFileName() as name:
foo(name)
self.assertEqual(torch.load(name), (3, 4))
def test_string_list(self):
def fn(string):
# type: (str) -> List[str]
return list(string)
self.checkScript(fn, ("abcdefgh",))
def test_unicode_comments(self):
@torch.jit.script
def test(self, a):
# 🤷🤷🤷🤷
return torch.nn.functional.relu(a)
def test_get_set_state_with_tensors(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.tensor = torch.randn(2, 2)
@torch.jit.export
def __getstate__(self):
return (self.tensor, self.training)
@torch.jit.export
def __setstate__(self, state):
self.tensor = state[0]
self.training = state[1]
def forward(self, x):
return x + self.tensor
with TemporaryFileName() as fname:
m = torch.jit.script(M())
m.save(fname)
loaded = torch.jit.load(fname)
self.assertEqual(loaded.tensor, m.tensor)
def test_in_for_and_comp_expr(self):
def fn(d):
# type: (Dict[str, int]) -> List[int]
out = [1]
for i in range(d.get("hi", 6)):
out.append(i) # noqa: PERF402
return out
self.checkScript(fn, ({'hi': 2, 'bye': 3},))
self.checkScript(fn, ({'bye': 3},))
def test_for_else(self):
def fn():
c = 0
for _ in range(4):
c += 10
else:
print("In else block of for...else")
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "else branches of for loops aren't supported"):
torch.jit.script(fn)
def test_split(self):
def split_two(tensor):
a, b, c = torch.split(tensor, 2, dim=1)
return a, b, c
x = torch.randn(3, 6)
y = torch.randn(3, 6)
self.checkScript(split_two, [(x + y)])
def test_conv_error(self):
@torch.jit.script
def fn(x, y):
return F.conv2d(x, y)
try:
fn(torch.ones(2, 2), torch.ones(4, 4))
except RuntimeError as e:
self.assertFalse('frame' in str(e))
def test_python_op_name(self):
import random
with self.assertRaisesRegex(RuntimeError, "randint"):
@torch.jit.script
def fn():
return random.randint()
def test_dir(self):
class M(torch.jit.ScriptModule):
def forward(self, t):
return t
self.assertTrue('forward' in dir(M()))
def test_kwarg_expansion_error(self):
@torch.jit.ignore
def something_else(h, i):
pass
def fn(x):
something_else(**x)
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "keyword-arg expansion is not supported"):
torch.jit.script(fn)
def test_kwargs_error_msg(self):
def other(**kwargs):
print(kwargs)
def fn():
return other()
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, 'variable number'):
torch.jit.script(fn)
def another_other(*args):
print(args)
def another_fn():
return another_other()
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, 'variable number'):
torch.jit.script(another_fn)
def test_inferred_error_msg(self):
"""
Test that when we get a type mismatch on a function where we inferred
the type to be tensor, a good error message is given.
"""
@torch.jit.script
def foo(a):
return a
with self.assertRaisesRegex(RuntimeError, (r"Expected a value of type \'Tensor \(inferred\)\'"
r"[\S\s]*Inferred \'a\' to be of type \'Tensor\'")):
foo("1")
def test_type_comments_in_body(self):
@torch.jit.script
def foo(a, # type: int
b, # type: int
):
# type: (...) -> int
# type: int
return a + b
class M(torch.nn.Module):
def __init__(self,
a, # type: int
b # type: int
):
# type: (...) -> None
super().__init__()
self.a = a # type: int
self.b = b # type: int
torch.jit.script(M(2, 3))
def test_input_keyword_in_schema(self):
def f(x):
return torch.ceil(input=x)
inp = torch.randn(10)
self.checkScript(f, (inp, ))
def test_module_method_reassignment(self):
class Foo(torch.nn.Module):
def _forward(self, x):
return x
forward = _forward
sm = torch.jit.script(Foo())
input = torch.ones(2, 2)
self.assertEqual(input, sm(input))
# Tests the case where a torch.Tensor subclass (like Parameter) is used as
# input.
def test_script_module_tensor_subclass_argument(self):
@torch.jit.script
def parameter_script(x: torch.nn.Parameter):
return x
input = torch.ones(2, 2)
self.assertEqual(input, parameter_script(input))
def test_save_load_attr_error(self):
class Inner(nn.Module):
def forward(self, x):
return x
class Wrapper(nn.Module):
def __init__(self, inner):
super().__init__()
self.inner = inner
def forward(self, x):
# this attribute doesn't exist on `Inner`
return self.inner.b(x)
inner_module = torch.jit.script(Inner())
inner_module = self.getExportImportCopy(inner_module)
wrapped = Wrapper(inner_module)
# This should properly complain that `self.inner` doesn't have the attribute `b`
with self.assertRaisesRegex(RuntimeError, 'has no attribute'):
torch.jit.script(wrapped)
def test_rescripting_loaded_modules(self):
class InnerSubmod(nn.Module):
__constants__ = ['my_constant']
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Buffer(torch.ones(1))
self.register_parameter("bar", torch.nn.Parameter(torch.ones(1)))
self.baz = torch.ones(1)
self.my_constant = 1
def forward(self, x):
return x + x
class Inner(nn.Module):
def __init__(self) -> None:
super().__init__()
self.submod = InnerSubmod()
def forward(self, x):
return self.submod(x)
class Wrapper(nn.Module):
def __init__(self, inner):
super().__init__()
self.inner = inner
def forward(self, x):
# access inner elements
ret = self.inner.submod(x) + self.inner.submod.foo + self.inner.submod.bar + self.inner.submod.baz
ret = ret + self.inner.submod.my_constant
return ret
inner_module = torch.jit.script(Inner())
wrapped = Wrapper(inner_module)
self.checkModule(wrapped, torch.ones(1))
inner_module_loaded = self.getExportImportCopy(inner_module)
wrapped_loaded = Wrapper(inner_module_loaded)
self.assertEqual(wrapped(torch.ones(1)), wrapped_loaded(torch.ones(1)))
def test_interpret_graph(self):
def fn(x):
return x.unfold(0, 1, 1)
graph_str = """
graph(%a : Tensor, %b : Tensor):
%c : Tensor = aten::mul(%a, %b)
return (%c)
"""
graph = parse_ir(graph_str)
a = torch.rand(10)
b = torch.rand(10)
test = torch._C._jit_interpret_graph(graph, (a, b))
ref = a * b
self.assertEqual(test, ref)
def test_signed_float_zero(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.div(x, -0.)
inp = torch.ones(1)
self.checkModule(MyModule(), inp)
def test_index_with_tuple(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x[(1,)]
self.checkModule(MyModule(), (torch.ones(2, 3),))
def test_context_manager(self):
class MyModule(torch.nn.Module):
def forward(self, x, y):
p = x + y
q = p + 2.0
return q
x = torch.randn(3, 2, dtype=torch.float)
y = torch.randn(3, 2, dtype=torch.float)
for fuser_name in ['fuser0', 'fuser1', 'none']:
with torch.jit.fuser(fuser_name):
self.checkModule(MyModule(), (x, y))
def test_zero_dimension_tensor_trace(self):
def f(x):
return x[x > 0]
jf = torch.jit.trace(f, torch.tensor(2., device="cpu"))
# known to be failing in tracer
EXCLUDE_TRACED = {
# The following fail due to #12024.
# A prim::ListConstruct is involved and the indices get traced as TensorType,
# which always require_grad. This causes a crash in autodiff.
'test___getitem___adv_index',
'test___getitem___adv_index_beg',
'test___getitem___adv_index_comb',
'test___getitem___adv_index_dup',
'test___getitem___adv_index_sub',
'test___getitem___adv_index_sub_2',
'test___getitem___adv_index_sub_3',
'test___getitem___adv_index_var',
# jit doesn't support sparse tensors.
'test_to_sparse',
'test_to_sparse_dim',
}
EXCLUDE_TYPE_CHECK = {
# slogdet tests use itemgetter to select its only differentiable output,
# but this happens outside of the graph we handle, so there are fewer
# reference outputs than graph outputs.
'test_slogdet_1x1_neg_det',
'test_slogdet_1x1_pos_det',
'test_slogdet_distinct_singular_values',
'test_slogdet_neg_det',
'test_slogdet_pos_det',
'test_slogdet_symmetric',
'test_slogdet_symmetric_pd',
'test_slogdet_batched_1x1_neg_det',
'test_slogdet_batched_pos_det',
'test_slogdet_batched_symmetric',
'test_slogdet_batched_symmetric_pd',
'test_slogdet_batched_distinct_singular_values'
}
# chunk returns a list in scripting and we don't unpack the list,
# Thus it won't be replaced by ConstantChunk and run AD.
# It's explicitly checked in test_chunk_constant_script_ad
# Similarly for split, it's replaced by split_with_sizes in tracing,
# but we don't have AD formula for aten::split(Tensor, int[], int),
# an op registered in JIT so AD is not triggered in scripting.
EXCLUDE_SCRIPT_AD_CHECK = {
'test_chunk',
'test_chunk_dim',
'test_chunk_dim_neg0',
'test_split_size_list',
'test_split_size_list_dim',
'test_split_size_list_dim_neg0',
'test_tensor_indices_sections',
'test_tensor_indices_sections_dim',
'test_tensor_indices_sections_dim_neg0',
'test_tensor_split_sections',
'test_tensor_split_sections_dim',
'test_tensor_split_sections_dim_neg0'
}
EXCLUDE_PYTHON_PRINT = {
# no support for BroadcastingList in python printer
'test_nn_max_unpool1d',
'test_nn_max_unpool2d',
'test_nn_max_unpool3d',
'test_nn_max_pool1d',
'test_nn_max_pool2d',
'test_nn_max_pool3d',
'test_nn_max_pool1d_with_indices',
}
EXCLUDE_ALIAS = {
# aliases, which may appear in method_tests but are tested elsewhere
'true_divide',
# Disable tests for lu from common_methods_invocations.py
# TODO(@nikitaved) Enable jit tests once autograd.Function does support scripting
'lu'
}
| TestScript |
python | python__mypy | mypyc/test-data/fixtures/ir.py | {
"start": 493,
"end": 567
} | class ____(Protocol[T_co]):
def __abs__(self) -> T_co: pass
| __SupportsAbs |
python | pytorch__pytorch | test/jit/fixtures_srcs/fixtures_src.py | {
"start": 1464,
"end": 1607
} | class ____(torch.nn.Module):
def forward(self, x):
out = torch.zeros_like(x)
return out.random_(0, 10)
| TestVersionedRandomV10 |
python | ray-project__ray | python/ray/tune/examples/logging_example.py | {
"start": 118,
"end": 1877
} | class ____(LoggerCallback):
def on_trial_result(self, iteration, trials, trial, result, **info):
print(f"TestLogger for trial {trial}: {result}")
def trial_str_creator(trial):
return "{}_{}_123".format(trial.trainable_name, trial.trial_id)
def evaluation_fn(step, width, height):
time.sleep(0.1)
return (0.1 + width * step / 100) ** (-1) + height * 0.1
def easy_objective(config):
# Hyperparameters
width, height = config["width"], config["height"]
for step in range(config["steps"]):
# Iterative training function - can be any arbitrary training procedure
intermediate_score = evaluation_fn(step, width, height)
# Feed the score back back to Tune.
tune.report({"iterations": step, "mean_loss": intermediate_score})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing"
)
args, _ = parser.parse_known_args()
tuner = tune.Tuner(
easy_objective,
run_config=tune.RunConfig(
name="hyperband_test",
callbacks=[TestLoggerCallback()],
stop={"training_iteration": 1 if args.smoke_test else 100},
),
tune_config=tune.TuneConfig(
metric="mean_loss",
mode="min",
num_samples=5,
trial_name_creator=trial_str_creator,
trial_dirname_creator=trial_str_creator,
),
param_space={
"steps": 100,
"width": tune.randint(10, 100),
"height": tune.loguniform(10, 100),
},
)
results = tuner.fit()
print("Best hyperparameters: ", results.get_best_result().config)
| TestLoggerCallback |
python | getsentry__sentry | tests/sentry/pipeline/test_pipeline.py | {
"start": 1743,
"end": 4804
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
with assume_test_silo_mode(SiloMode.REGION):
self.org = serialize_rpc_organization(self.create_organization())
self.request = HttpRequest()
self.request.session = SessionBase()
self.request.user = self.user
@patch("sentry.pipeline.base.bind_organization_context")
def test_simple_pipeline(self, mock_bind_org_context: MagicMock) -> None:
pipeline = DummyPipeline(self.request, "dummy", self.org, config={"some_config": True})
pipeline.initialize()
assert pipeline.is_valid()
assert "some_config" in pipeline.provider.config
mock_bind_org_context.assert_called_with(self.org)
# Pipeline has two steps, ensure both steps compete. Usually the
# dispatch itself would be the one calling the current_step and
# next_step methods after it determines if it can move forward a step.
pipeline.current_step()
assert pipeline.dispatch_count == 1
assert pipeline.fetch_state("some_state") == "value"
pipeline.next_step()
assert pipeline.dispatch_count == 2
pipeline.next_step()
assert pipeline.dispatch_count == 2
assert pipeline.finished
pipeline.clear_session()
assert not pipeline.state.is_valid()
def test_invalidated_pipeline(self) -> None:
pipeline = DummyPipeline(self.request, "dummy", self.org)
pipeline.initialize()
assert pipeline.is_valid()
# Mutate the provider, Remove an item from the pipeline, thus
# invalidating the pipeline.
with patch.object(DummyProvider, "pipeline_views", [PipelineStep()]):
new_pipeline = DummyPipeline.get_for_request(self.request)
assert new_pipeline is not None
assert not new_pipeline.is_valid()
@patch("sentry.pipeline.base.bind_organization_context")
def test_pipeline_intercept_fails(self, mock_bind_org_context: MagicMock) -> None:
pipeline = DummyPipeline(self.request, "dummy", self.org, config={"some_config": True})
pipeline.initialize()
assert pipeline.is_valid()
assert "some_config" in pipeline.provider.config
mock_bind_org_context.assert_called_with(self.org)
pipeline.current_step()
assert pipeline.dispatch_count == 1
# Pipeline advancer uses pipeline_cls.get_for_request() to fetch pipeline from new incoming request
request = HttpRequest()
request.session = self.request.session # duplicate session
request.user = self.create_user()
intercepted_pipeline = DummyPipeline.get_for_request(request)
assert intercepted_pipeline is not None
# The pipeline errors because the user is different from the one that initialized it
resp = intercepted_pipeline.next_step()
assert isinstance(resp, HttpResponse) # TODO(cathy): fix typing on
assert ERR_MISMATCHED_USER.encode() in resp.content
| PipelineTestCase |
python | getsentry__sentry | src/sentry/types/request.py | {
"start": 299,
"end": 420
} | class ____(Request):
"""typing-only: for use in TypeIs to narrow to non-AnonymousUser"""
user: User
| _RequestWithUser |
python | getsentry__sentry | src/sentry/deletions/defaults/service_hook.py | {
"start": 116,
"end": 425
} | class ____(ModelDeletionTask[ServiceHook]):
# This subclass just represents an intentional decision to not cascade service hook deletions, and to
# mark status using ObjectStatus on deletion. The behavior is identical to the base class
# so that intentions are clear.
pass
| ServiceHookDeletionTask |
python | astropy__astropy | astropy/io/ascii/basic.py | {
"start": 8846,
"end": 10763
} | class ____(TabHeader):
"""
Header for RDB tables.
"""
col_type_map = {"n": core.NumType, "s": core.StrType}
def get_type_map_key(self, col):
return col.raw_type[-1]
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
This is a specialized get_cols for the RDB type:
Line 0: RDB col names
Line 1: RDB col definitions
Line 2+: RDB data rows
Parameters
----------
lines : list
List of table lines
Returns
-------
None
"""
header_lines = self.process_lines(lines) # this is a generator
header_vals_list = [hl for _, hl in zip(range(2), self.splitter(header_lines))]
if len(header_vals_list) != 2:
raise ValueError("RDB header requires 2 lines")
self.names, raw_types = header_vals_list
if len(self.names) != len(raw_types):
raise core.InconsistentTableError(
"RDB header mismatch between number of column names and column types."
)
if any(not re.match(r"\d*(N|S)$", x, re.IGNORECASE) for x in raw_types):
raise core.InconsistentTableError(
f"RDB types definitions do not all match [num](N|S): {raw_types}"
)
self._set_cols_from_names()
for col, raw_type in zip(self.cols, raw_types):
col.raw_type = raw_type
col.type = self.get_col_type(col)
def write(self, lines):
lines.append(self.splitter.join(self.colnames))
rdb_types = []
for col in self.cols:
# Check if dtype.kind is string or unicode. See help(np.core.numerictypes)
rdb_type = "S" if col.info.dtype.kind in ("S", "U") else "N"
rdb_types.append(rdb_type)
lines.append(self.splitter.join(rdb_types))
| RdbHeader |
python | davidhalter__parso | parso/parser.py | {
"start": 2657,
"end": 3225
} | class ____:
def __init__(self, dfa):
self.dfa = dfa
self.nodes = []
@property
def nonterminal(self):
return self.dfa.from_rule
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.dfa, self.nodes)
def _token_to_transition(grammar, type_, value):
# Map from token to label
if type_.value.contains_syntax:
# Check for reserved words (keywords)
try:
return grammar.reserved_syntax_strings[value]
except KeyError:
pass
return type_
| StackNode |
python | google__jax | jax/_src/array.py | {
"start": 2496,
"end": 6126
} | class ____:
"""A single data shard of an Array.
Attributes:
device : Which device this shard resides on.
index : The index into the global array of this shard.
replica_id : Integer id indicating which replica of the global array this
shard is part of. Always 0 for fully sharded data
(i.e. when there’s only 1 replica).
data : The data of this shard. None if ``device`` is non-local.
"""
def __init__(self, device: Device, sharding: Sharding, global_shape: Shape,
data: None | ArrayImpl | PRNGKeyArray = None):
self._device = device
self._sharding = sharding
self._global_shape = global_shape
self._data = data
def __repr__(self):
try:
return (f'Shard(device={self.device!r}, index={self.index}, '
f'replica_id={self.replica_id}, data={self.data})')
except ValueError:
return f'Shard(device={self.device!r}, data={self.data})'
@functools.cached_property
def index(self) -> Index:
try:
device_indices_map_fn = self._sharding.devices_indices_map
except AttributeError:
raise ValueError('Cannot calculate indices from sharding: '
f'{self._sharding}. Please create a device to index '
'mapping for your sharding.') from None
index = device_indices_map_fn(self._global_shape)[self.device]
assert index is not None
return index
@functools.cached_property
def replica_id(self) -> int:
return device_replica_id_map(self._sharding, self._global_shape)[self.device]
@property
def device(self):
return self._device
@property
def data(self):
return self._data
def _reconstruct_array(fun, args, arr_state, aval_state):
"""Method to reconstruct a device array from a serialized state."""
np_value = fun(*args)
np_value.__setstate__(arr_state)
jnp_value = api.device_put(np_value)
jnp_value.aval = jnp_value.aval.update(**aval_state)
return jnp_value
@cache(max_size=4096, trace_context_in_key=False)
def _cached_index_calc(s, shape):
map_ = s.addressable_devices_indices_map(shape)
seen_h_indices = set()
l = []
for array_index, index in enumerate(map_.values()):
h_index = hashed_index(index)
if h_index not in seen_h_indices:
seen_h_indices.add(h_index)
l.append((array_index, index))
return l
@cache(max_size=4096, trace_context_in_key=False)
def _process_has_full_value_in_mcjax(s, shape):
# Return False for single host as a fast path.
if xla_bridge.process_count() == 1:
return False
num_unique_indices = len(
{hashed_index(v) for v in s.devices_indices_map(shape).values()})
num_addressable_unique_indices = len(
{hashed_index(v) for v in s.addressable_devices_indices_map(shape).values()})
return num_unique_indices == num_addressable_unique_indices
def _validate_shape_and_dtype_for_per_device_arrays(
arrays: Sequence[ArrayImpl | np.ndarray | literals.TypedNdArray],
sharding: Sharding,
aval: core.ShapedArray,
expected_shape: Shape,
):
"""Validates that per-device arrays are valid and consistent."""
expected_dtype = aval.dtype
for db in arrays:
if db.dtype != expected_dtype:
raise ValueError(
"Input buffers to `Array` must have matching dtypes. "
f"Got {db.dtype}, expected {expected_dtype} for buffer: {db}"
)
if db.shape != expected_shape:
raise ValueError(
f"Expected shard shape {expected_shape} doesn't match the single "
f"device array shape {db.shape}. Shape of Array is "
f"{aval.str_short()} with sharding {sharding}"
)
| Shard |
python | ray-project__ray | python/ray/tune/tests/test_api.py | {
"start": 51779,
"end": 57673
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
if ray.is_initialized():
ray.shutdown()
ray.init(num_cpus=4, num_gpus=0, include_dashboard=False)
@classmethod
def tearDownClass(cls):
ray.shutdown()
# _register_all()
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testNestedResults(self):
def create_result(i):
return {"test": {"1": {"2": {"3": i, "4": False}}}}
flattened_keys = list(flatten_dict(create_result(0)))
class _MockScheduler(FIFOScheduler):
results = []
def on_trial_result(self, tune_controller, trial, result):
self.results += [result]
return TrialScheduler.CONTINUE
def on_trial_complete(self, tune_controller, trial, result):
self.complete_result = result
def train_fn(config):
for i in range(100):
tune.report(create_result(i))
algo = _MockSuggestionAlgorithm()
scheduler = _MockScheduler()
[trial] = tune.run(
train_fn, scheduler=scheduler, search_alg=algo, stop={"test/1/2/3": 20}
).trials
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result["test"]["1"]["2"]["3"], 20)
self.assertEqual(trial.last_result["test"]["1"]["2"]["4"], False)
self.assertEqual(trial.last_result[TRAINING_ITERATION], 21)
self.assertEqual(len(scheduler.results), 20)
self.assertTrue(
all(set(result) >= set(flattened_keys) for result in scheduler.results)
)
self.assertTrue(set(scheduler.complete_result) >= set(flattened_keys))
self.assertEqual(len(algo.results), 20)
self.assertTrue(
all(set(result) >= set(flattened_keys) for result in algo.results)
)
# Test, whether non-existent stop criteria do NOT cause an error anymore (just
# a warning).
[trial] = tune.run(train_fn, stop={"1/2/3": 20}).trials
self.assertFalse("1" in trial.last_result)
[trial] = tune.run(train_fn, stop={"test": 1}).trials
self.assertTrue(
"test" in trial.last_result
and "1" in trial.last_result["test"]
and "2" in trial.last_result["test"]["1"]
and "3" in trial.last_result["test"]["1"]["2"]
)
def testIterationCounter(self):
def train_fn(config):
for i in range(100):
tune.report(dict(itr=i, timesteps_this_iter=1))
register_trainable("exp", train_fn)
config = {
"my_exp": {
"run": "exp",
"config": {
"iterations": 100,
},
"stop": {"timesteps_total": 100},
}
}
[trial] = run_experiments(config)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TRAINING_ITERATION], 100)
self.assertEqual(trial.last_result["itr"], 99)
def testErrorReturn(self):
def train_fn(config):
raise Exception("uh oh")
register_trainable("f1", train_fn)
def f():
run_experiments(
{
"foo": {
"run": "f1",
}
}
)
self.assertRaises(TuneError, f)
def testSuccess(self):
def train_fn(config):
for i in range(100):
tune.report(dict(timesteps_total=i))
register_trainable("f1", train_fn)
[trial] = run_experiments(
{
"foo": {
"run": "f1",
}
}
)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testNoRaiseFlag(self):
def train_fn(config):
raise Exception()
register_trainable("f1", train_fn)
[trial] = run_experiments(
{
"foo": {
"run": "f1",
}
},
raise_on_failed_trial=False,
)
self.assertEqual(trial.status, Trial.ERROR)
def testReportInfinity(self):
def train_fn(config):
for _ in range(100):
tune.report(dict(mean_accuracy=float("inf")))
register_trainable("f1", train_fn)
[trial] = run_experiments(
{
"foo": {
"run": "f1",
}
}
)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result["mean_accuracy"], float("inf"))
def testSearcherSchedulerStr(self):
capture = {}
class MockTuneController(TuneController):
def __init__(self, search_alg=None, scheduler=None, **kwargs):
# should be converted from strings at this case and not None
capture["search_alg"] = search_alg
capture["scheduler"] = scheduler
super().__init__(
search_alg=search_alg,
scheduler=scheduler,
**kwargs,
)
with patch("ray.tune.tune.TuneController", MockTuneController):
tune.run(
lambda config: tune.report(dict(metric=1)),
search_alg="random",
scheduler="async_hyperband",
metric="metric",
mode="max",
stop={TRAINING_ITERATION: 1},
)
self.assertIsInstance(capture["search_alg"], BasicVariantGenerator)
self.assertIsInstance(capture["scheduler"], AsyncHyperBandScheduler)
| ApiTestFast |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/partition_status_cache.py | {
"start": 2412,
"end": 22513
} | class ____(
NamedTuple(
"_AssetPartitionsStatusCacheValue",
[
("latest_storage_id", int),
("partitions_def_id", Optional[str]),
("serialized_materialized_partition_subset", Optional[str]),
("serialized_failed_partition_subset", Optional[str]),
("serialized_in_progress_partition_subset", Optional[str]),
("earliest_in_progress_materialization_event_id", Optional[int]),
],
),
LoadableBy[tuple[AssetKey, PartitionsDefinition]],
):
"""Set of asset fields that reflect partition materialization status. This is used to display
global partition status in the asset view.
Properties:
latest_storage_id (int): The latest evaluated storage id for the asset.
partitions_def_id (Optional(str)): The serializable unique identifier for the partitions
definition. When this value differs from the new partitions definition, this cache
value needs to be recalculated. None if the asset is unpartitioned.
serialized_materialized_partition_subset (Optional(str)): The serialized representation of the
materialized partition subsets, up to the latest storage id. None if the asset is
unpartitioned.
serialized_failed_partition_subset (Optional(str)): The serialized representation of the failed
partition subsets, up to the latest storage id. None if the asset is unpartitioned.
serialized_in_progress_partition_subset (Optional(str)): The serialized representation of the
in progress partition subsets, up to the latest storage id. None if the asset is unpartitioned.
earliest_in_progress_materialization_event_id (Optional(int)): The event id of the earliest
materialization planned event for a run that is still in progress. This is used to check
on the status of runs that are still in progress.
"""
def __new__(
cls,
latest_storage_id: int,
partitions_def_id: Optional[str] = None,
serialized_materialized_partition_subset: Optional[str] = None,
serialized_failed_partition_subset: Optional[str] = None,
serialized_in_progress_partition_subset: Optional[str] = None,
earliest_in_progress_materialization_event_id: Optional[int] = None,
):
check.int_param(latest_storage_id, "latest_storage_id")
check.opt_str_param(partitions_def_id, "partitions_def_id")
check.opt_str_param(
serialized_materialized_partition_subset, "serialized_materialized_partition_subset"
)
check.opt_str_param(
serialized_failed_partition_subset, "serialized_failed_partition_subset"
)
check.opt_str_param(
serialized_in_progress_partition_subset, "serialized_in_progress_partition_subset"
)
return super().__new__(
cls,
latest_storage_id,
partitions_def_id,
serialized_materialized_partition_subset,
serialized_failed_partition_subset,
serialized_in_progress_partition_subset,
earliest_in_progress_materialization_event_id,
)
@staticmethod
def from_db_string(db_string: str) -> Optional["AssetStatusCacheValue"]:
if not db_string:
return None
try:
cached_data = deserialize_value(db_string, AssetStatusCacheValue)
except DeserializationError:
return None
return cached_data
@classmethod
def _blocking_batch_load(
cls, keys: Iterable[tuple[AssetKey, PartitionsDefinition]], context: LoadingContext
) -> Iterable[Optional["AssetStatusCacheValue"]]:
return context.instance.event_log_storage.get_asset_status_cache_values(keys, context)
def deserialize_materialized_partition_subsets(
self, partitions_def: PartitionsDefinition
) -> PartitionsSubset:
if not self.serialized_materialized_partition_subset:
return partitions_def.empty_subset()
return partitions_def.deserialize_subset(self.serialized_materialized_partition_subset)
def deserialize_failed_partition_subsets(
self, partitions_def: PartitionsDefinition
) -> PartitionsSubset:
if not self.serialized_failed_partition_subset:
return partitions_def.empty_subset()
return partitions_def.deserialize_subset(self.serialized_failed_partition_subset)
def deserialize_in_progress_partition_subsets(
self, partitions_def: PartitionsDefinition
) -> PartitionsSubset:
if not self.serialized_in_progress_partition_subset:
return partitions_def.empty_subset()
return partitions_def.deserialize_subset(self.serialized_in_progress_partition_subset)
def get_materialized_subset(
self,
asset_graph_view: AssetGraphView,
asset_key: AssetKey,
partitions_def: PartitionsDefinition,
) -> EntitySubset[AssetKey]:
value = self.deserialize_materialized_partition_subsets(partitions_def)
return EntitySubset(
asset_graph_view, key=asset_key, value=_ValidatedEntitySubsetValue(value)
)
def get_failed_subset(
self,
asset_graph_view: AssetGraphView,
asset_key: AssetKey,
partitions_def: PartitionsDefinition,
) -> EntitySubset[AssetKey]:
value = self.deserialize_failed_partition_subsets(partitions_def)
return EntitySubset(
asset_graph_view, key=asset_key, value=_ValidatedEntitySubsetValue(value)
)
def get_in_progress_subset(
self,
asset_graph_view: AssetGraphView,
asset_key: AssetKey,
partitions_def: PartitionsDefinition,
) -> EntitySubset[AssetKey]:
value = self.deserialize_in_progress_partition_subsets(partitions_def)
return EntitySubset(
asset_graph_view, key=asset_key, value=_ValidatedEntitySubsetValue(value)
)
def get_materialized_multipartitions(
instance: DagsterInstance, asset_key: AssetKey, partitions_def: MultiPartitionsDefinition
) -> Sequence[str]:
dimension_names = partitions_def.partition_dimension_names
materialized_keys: list[MultiPartitionKey] = []
for event_tags in instance.get_event_tags_for_asset(asset_key):
event_partition_keys_by_dimension = {
get_dimension_from_partition_tag(key): value
for key, value in event_tags.items()
if key.startswith(MULTIDIMENSIONAL_PARTITION_PREFIX)
}
if all(
dimension_name in event_partition_keys_by_dimension.keys()
for dimension_name in dimension_names
):
materialized_keys.append(
MultiPartitionKey(
{
dimension_names[0]: event_partition_keys_by_dimension[dimension_names[0]],
dimension_names[1]: event_partition_keys_by_dimension[dimension_names[1]],
}
)
)
return materialized_keys
def get_validated_partition_keys(
partitions_def: PartitionsDefinition,
partition_keys: set[str],
):
if isinstance(partitions_def, (DynamicPartitionsDefinition, StaticPartitionsDefinition)):
validated_partitions = set(partitions_def.get_partition_keys()) & partition_keys
elif isinstance(partitions_def, MultiPartitionsDefinition):
validated_partitions = partitions_def.filter_valid_partition_keys(partition_keys)
else:
if not isinstance(partitions_def, TimeWindowPartitionsDefinition):
check.failed("Unexpected partitions definition type {partitions_def}")
validated_partitions = {pk for pk in partition_keys if partitions_def.has_partition_key(pk)}
return validated_partitions
def get_last_planned_storage_id(
instance: DagsterInstance, asset_key: AssetKey, asset_record: Optional["AssetRecord"]
) -> int:
if instance.event_log_storage.asset_records_have_last_planned_and_failed_materializations:
return (
(asset_record.asset_entry.last_planned_materialization_storage_id or 0)
if asset_record
else 0
)
info = instance.get_latest_planned_materialization_info(asset_key)
if not info:
return 0
return info.storage_id
def _build_status_cache(
instance: DagsterInstance,
asset_key: AssetKey,
partitions_def: Optional[PartitionsDefinition],
stored_cache_value: Optional[AssetStatusCacheValue],
asset_record: Optional["AssetRecord"],
) -> Optional[AssetStatusCacheValue]:
"""This method refreshes the asset status cache for a given asset key. It recalculates
the materialized partition subset for the asset key and updates the cache value.
"""
last_materialization_storage_id = (
asset_record.asset_entry.last_materialization_storage_id if asset_record else None
)
last_planned_materialization_storage_id = get_last_planned_storage_id(
instance, asset_key, asset_record
)
latest_storage_id = max(
last_materialization_storage_id or 0,
last_planned_materialization_storage_id or 0,
)
if not latest_storage_id:
return None
if not partitions_def or not is_cacheable_partition_type(partitions_def):
return AssetStatusCacheValue(latest_storage_id=latest_storage_id)
failed_subset = (
partitions_def.deserialize_subset(stored_cache_value.serialized_failed_partition_subset)
if stored_cache_value and stored_cache_value.serialized_failed_partition_subset
else None
)
cached_in_progress_cursor = (
(
stored_cache_value.earliest_in_progress_materialization_event_id - 1
if stored_cache_value.earliest_in_progress_materialization_event_id
else stored_cache_value.latest_storage_id
)
if stored_cache_value
else None
)
if stored_cache_value:
# fetch the incremental new materialized partitions, and update the cached materialized
# subset
new_partitions = set()
if (
last_materialization_storage_id
and last_materialization_storage_id > stored_cache_value.latest_storage_id
):
new_partitions = get_validated_partition_keys(
partitions_def,
instance.get_materialized_partitions(
asset_key, after_cursor=stored_cache_value.latest_storage_id
),
)
materialized_subset: PartitionsSubset = (
partitions_def.deserialize_subset(
stored_cache_value.serialized_materialized_partition_subset
)
if stored_cache_value.serialized_materialized_partition_subset
else partitions_def.empty_subset()
)
if new_partitions:
materialized_subset = materialized_subset.with_partition_keys(new_partitions)
if failed_subset and new_partitions:
failed_subset = failed_subset - partitions_def.empty_subset().with_partition_keys(
new_partitions
)
else:
materialized_subset = partitions_def.empty_subset().with_partition_keys(
get_validated_partition_keys(
partitions_def,
instance.get_materialized_partitions(asset_key),
)
)
(
failed_subset,
in_progress_subset,
earliest_in_progress_materialization_event_id,
) = build_failed_and_in_progress_partition_subset(
instance,
asset_key,
partitions_def,
last_planned_materialization_storage_id=last_planned_materialization_storage_id,
failed_subset=failed_subset,
after_storage_id=cached_in_progress_cursor,
)
return AssetStatusCacheValue(
latest_storage_id=latest_storage_id,
partitions_def_id=partitions_def.get_serializable_unique_identifier(),
serialized_materialized_partition_subset=materialized_subset.serialize(),
serialized_failed_partition_subset=failed_subset.serialize(),
serialized_in_progress_partition_subset=in_progress_subset.serialize(),
earliest_in_progress_materialization_event_id=earliest_in_progress_materialization_event_id,
)
def build_failed_and_in_progress_partition_subset(
instance: DagsterInstance,
asset_key: AssetKey,
partitions_def: PartitionsDefinition,
last_planned_materialization_storage_id: int,
failed_subset: Optional[PartitionsSubset[str]] = None,
after_storage_id: Optional[int] = None,
) -> tuple[PartitionsSubset, PartitionsSubset, Optional[int]]:
in_progress_partitions: set[str] = set()
incomplete_materializations = {}
failed_subset = failed_subset or partitions_def.empty_subset()
# Fetch incomplete materializations if there have been any planned materializations since the
# cursor
if last_planned_materialization_storage_id and (
not after_storage_id or last_planned_materialization_storage_id > after_storage_id
):
incomplete_materializations = instance.event_log_storage.get_latest_asset_partition_materialization_attempts_without_materializations(
asset_key, after_storage_id=after_storage_id
)
failed_partitions: set[str] = set()
cursor = None
if incomplete_materializations:
to_fetch = list(set([run_id for run_id, _event_id in incomplete_materializations.values()]))
finished_runs = {}
unfinished_runs = {}
while to_fetch:
chunk = to_fetch[:RUN_FETCH_BATCH_SIZE]
to_fetch = to_fetch[RUN_FETCH_BATCH_SIZE:]
for r in instance.get_runs(filters=RunsFilter(run_ids=chunk)):
if r.status in FINISHED_STATUSES:
finished_runs[r.run_id] = r.status
else:
unfinished_runs[r.run_id] = r.status
for partition, (run_id, event_id) in incomplete_materializations.items():
if run_id in finished_runs:
status = finished_runs.get(run_id)
if status == DagsterRunStatus.FAILURE:
failed_partitions.add(partition)
elif run_id in unfinished_runs:
in_progress_partitions.add(partition)
# If the run is not finished, keep track of the event id so we can check on it next time
if cursor is None or event_id < cursor:
cursor = event_id
else:
# Runs that are neither finished nor unfinished must have been deleted, so are
# considered neither in-progress nor failed
pass
if failed_partitions:
failed_subset = failed_subset.with_partition_keys(
get_validated_partition_keys(partitions_def, failed_partitions)
)
return (
failed_subset,
(
partitions_def.empty_subset().with_partition_keys(
get_validated_partition_keys(partitions_def, in_progress_partitions)
)
if in_progress_partitions
else partitions_def.empty_subset()
),
cursor,
)
def get_and_update_asset_status_cache_value(
instance: DagsterInstance,
asset_key: AssetKey,
partitions_def: Optional[PartitionsDefinition],
dynamic_partitions_loader: Optional[DynamicPartitionsStore] = None,
loading_context: Optional[LoadingContext] = None,
) -> Optional[AssetStatusCacheValue]:
from dagster._core.storage.event_log.base import AssetRecord
with partition_loading_context(None, dynamic_partitions_loader or instance):
if loading_context:
asset_record = AssetRecord.blocking_get(loading_context, asset_key)
else:
asset_record = next(iter(instance.get_asset_records(asset_keys=[asset_key])), None)
if asset_record is None:
stored_cache_value = None
else:
stored_cache_value = asset_record.asset_entry.cached_status
use_cached_value = (
stored_cache_value
and partitions_def
and stored_cache_value.partitions_def_id
== partitions_def.get_serializable_unique_identifier()
)
updated_cache_value = _build_status_cache(
instance=instance,
asset_key=asset_key,
partitions_def=partitions_def,
stored_cache_value=stored_cache_value if use_cached_value else None,
asset_record=asset_record,
)
if (
updated_cache_value is not None
and instance.event_log_storage.can_write_asset_status_cache()
and updated_cache_value != stored_cache_value
):
instance.update_asset_cached_status_data(asset_key, updated_cache_value)
return updated_cache_value
async def get_partition_subsets(
instance: DagsterInstance,
loading_context: LoadingContext,
asset_key: AssetKey,
dynamic_partitions_loader: DynamicPartitionsStore,
partitions_def: Optional[PartitionsDefinition] = None,
) -> tuple[Optional[PartitionsSubset], Optional[PartitionsSubset], Optional[PartitionsSubset]]:
"""Returns a tuple of PartitionSubset objects: the first is the materialized partitions,
the second is the failed partitions, and the third are in progress.
"""
from dagster._core.storage.event_log.base import AssetRecord
if not partitions_def:
return None, None, None
with partition_loading_context(None, dynamic_partitions_loader or instance):
if instance.can_read_asset_status_cache() and is_cacheable_partition_type(partitions_def):
# When the "cached_status_data" column exists in storage, update the column to contain
# the latest partition status values
updated_cache_value = await AssetStatusCacheValue.gen(
loading_context, (asset_key, partitions_def)
)
materialized_subset = (
updated_cache_value.deserialize_materialized_partition_subsets(partitions_def)
if updated_cache_value
else partitions_def.empty_subset()
)
failed_subset = (
updated_cache_value.deserialize_failed_partition_subsets(partitions_def)
if updated_cache_value
else partitions_def.empty_subset()
)
in_progress_subset = (
updated_cache_value.deserialize_in_progress_partition_subsets(partitions_def)
if updated_cache_value
else partitions_def.empty_subset()
)
return materialized_subset, failed_subset, in_progress_subset
else:
# If the partition status can't be cached, fetch partition status from storage
if isinstance(partitions_def, MultiPartitionsDefinition):
materialized_keys = get_materialized_multipartitions(
instance, asset_key, partitions_def
)
else:
materialized_keys = instance.get_materialized_partitions(asset_key)
validated_keys = get_validated_partition_keys(partitions_def, set(materialized_keys))
materialized_subset = (
partitions_def.empty_subset().with_partition_keys(validated_keys)
if validated_keys
else partitions_def.empty_subset()
)
asset_record = AssetRecord.blocking_get(loading_context, asset_key)
failed_subset, in_progress_subset, _ = build_failed_and_in_progress_partition_subset(
instance,
asset_key,
partitions_def,
last_planned_materialization_storage_id=get_last_planned_storage_id(
instance, asset_key, asset_record
),
)
return materialized_subset, failed_subset, in_progress_subset
| AssetStatusCacheValue |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.