hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3423dccd807fb1332a750f36dae9ff22191dc5 | 92,796 | py | Python | unittests/test_rest_framework.py | maerifat/django-DefectDojo | ba1a415219ff20e8b4e909ef14f750de9b80297e | [
"BSD-3-Clause"
] | null | null | null | unittests/test_rest_framework.py | maerifat/django-DefectDojo | ba1a415219ff20e8b4e909ef14f750de9b80297e | [
"BSD-3-Clause"
] | 206 | 2020-04-20T16:03:18.000Z | 2022-01-15T23:07:48.000Z | unittests/test_rest_framework.py | maerifat/django-DefectDojo | ba1a415219ff20e8b4e909ef14f750de9b80297e | [
"BSD-3-Clause"
] | 1 | 2020-12-06T15:44:44.000Z | 2020-12-06T15:44:44.000Z | from collections import OrderedDict
from drf_spectacular.drainage import GENERATOR_STATS
# from drf_spectacular.renderers import OpenApiJsonRenderer
from unittest.mock import call, patch, ANY
from dojo.models import Product, Engagement, Test, Finding, \
JIRA_Issue, Tool_Product_Settings, Tool_Configuration, Tool_Type, \
User, Stub_Finding, Endpoint, JIRA_Project, JIRA_Instance, \
Finding_Template, Note_Type, App_Analysis, Endpoint_Status, \
Sonarqube_Issue, Sonarqube_Issue_Transition, Product_API_Scan_Configuration, Notes, \
BurpRawRequestResponse, DojoMeta, FileUpload, Product_Type, Dojo_Group, \
Role, Product_Type_Member, Product_Member, Product_Type_Group, \
Product_Group, Global_Role, Dojo_Group_Member, Language_Type, Languages, \
Notifications, UserContactInfo
from dojo.api_v2.views import EndPointViewSet, EngagementViewSet, \
FindingTemplatesViewSet, FindingViewSet, JiraInstanceViewSet, \
JiraIssuesViewSet, JiraProjectViewSet, ProductViewSet, \
StubFindingsViewSet, TestsViewSet, \
ToolConfigurationsViewSet, ToolProductSettingsViewSet, ToolTypesViewSet, \
UsersViewSet, ImportScanView, NoteTypeViewSet, AppAnalysisViewSet, \
EndpointStatusViewSet, SonarqubeIssueViewSet, NotesViewSet, ProductTypeViewSet, \
DojoGroupViewSet, RoleViewSet, ProductTypeMemberViewSet, ProductMemberViewSet, \
ProductTypeGroupViewSet, ProductGroupViewSet, GlobalRoleViewSet, \
DojoGroupMemberViewSet, LanguageTypeViewSet, LanguageViewSet, ImportLanguagesView, \
NotificationsViewSet, UserContactInfoViewSet, ProductAPIScanConfigurationViewSet
from json import dumps
from django.urls import reverse
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from .dojo_test_case import DojoAPITestCase
from dojo.api_v2.prefetch.utils import _get_prefetchable_fields
from rest_framework.mixins import \
ListModelMixin, RetrieveModelMixin, CreateModelMixin, \
DestroyModelMixin, UpdateModelMixin
from dojo.api_v2.prefetch import PrefetchListMixin, PrefetchRetrieveMixin
from drf_spectacular.settings import spectacular_settings
import logging
import pathlib
import json
from dojo.authorization.roles_permissions import Permissions
logger = logging.getLogger(__name__)
BASE_API_URL = "/api/v2"
TYPE_OBJECT = "object" #:
TYPE_STRING = "string" #:
TYPE_NUMBER = "number" #:
TYPE_INTEGER = "integer" #:
TYPE_BOOLEAN = "boolean" #:
TYPE_ARRAY = "array" #:
TYPE_FILE = "file" #:
def get_open_api3_json_schema():
generator_class = spectacular_settings.DEFAULT_GENERATOR_CLASS
generator = generator_class()
schema = generator.get_schema(request=None, public=True)
GENERATOR_STATS.emit_summary()
from drf_spectacular.validation import validate_schema
validate_schema(schema)
return schema
# use ugly global to avoid generating the schema for every test/method (as it's slow)
global open_api3_json_schema
open_api3_json_schema = get_open_api3_json_schema()
def skipIfNotSubclass(baseclass):
def decorate(f):
def wrapper(self, *args, **kwargs):
if not issubclass(self.viewset, baseclass):
self.skipTest('This view does not inherit from %s' % baseclass)
else:
f(self, *args, **kwargs)
return wrapper
return decorate
# def testIsBroken(method):
# return tag("broken")(method)
def check_response_valid(expected_code, response):
def _data_to_str(response):
if hasattr(response, "data"):
return response.data
return None
assert response.status_code == expected_code, \
f"Response invalid, returned with code {response.status_code}\nResponse Data:\n{_data_to_str(response)}"
def format_url(path):
return f"{BASE_API_URL}{path}"
class SchemaChecker():
def __init__(self, components):
self._prefix = []
self._has_failed = False
self._components = components
self._errors = []
def _register_error(self, error):
self._errors += [error]
def _check_or_fail(self, condition, message):
if not condition:
self._has_failed = True
self._register_error(message)
# print(message)
def _get_prefix(self):
return '#'.join(self._prefix)
def _push_prefix(self, prefix):
self._prefix += [prefix]
def _pop_prefix(self):
self._prefix = self._prefix if len(self._prefix) == 0 else self._prefix[:-1]
def _resolve_if_ref(self, schema):
if '$ref' not in schema:
return schema
ref_name = schema["$ref"]
ref_name = ref_name[ref_name.rfind("/") + 1:]
return self._components['schemas'][ref_name]
def _check_has_required_fields(self, required_fields, obj):
# if not required_fields:
# print('no required fields')
for required_field in required_fields:
# passwords are writeOnly, but this is not supported by Swagger / OpenAPIv2
# TODO check this for OpenAPI3
if required_field != 'password':
# print('checking field: ', required_field)
field = f"{self._get_prefix()}#{required_field}"
self._check_or_fail(obj is not None and required_field in obj, f"{field} is required but was not returned")
def _check_type(self, schema, obj):
if 'type' not in schema:
# TODO implement OneOf / AllOff (enums)
# Engagement
# "status": {
# "nullable": true,
# "oneOf": [
# {
# "$ref": "#/components/schemas/StatusEnum"
# },
# {
# "$ref": "#/components/schemas/NullEnum"
# }
# ]
# },
# "StatusEnum": {
# "enum": [
# "Not Started",
# "Blocked",
# "Cancelled",
# "Completed",
# "In Progress",
# "On Hold",
# "Waiting for Resource"
# ],
# "type": "string"
# },
return schema
schema_type = schema["type"]
is_nullable = schema.get("x-nullable", False) or schema.get("readOnly", False)
def _check_helper(check):
self._check_or_fail(check, f"{self._get_prefix()} should be of type {schema_type} but value was of type {type(obj)}")
if obj is None:
self._check_or_fail(is_nullable, f"{self._get_prefix()} is not nullable yet the value returned was null")
elif schema_type == TYPE_BOOLEAN:
_check_helper(isinstance(obj, bool))
elif schema_type == TYPE_INTEGER:
_check_helper(isinstance(obj, int))
elif schema_type == TYPE_NUMBER:
_check_helper(obj.isdecimal())
elif schema_type == TYPE_ARRAY:
_check_helper(isinstance(obj, list))
elif schema_type == TYPE_OBJECT:
_check_helper(isinstance(obj, OrderedDict) or isinstance(obj, dict))
elif schema_type == TYPE_STRING:
_check_helper(isinstance(obj, str))
else:
# Default case
_check_helper(False)
# print('_check_type ok for: %s: %s' % (schema, obj))
def _with_prefix(self, prefix, callable, *args):
self._push_prefix(prefix)
callable(*args)
self._pop_prefix()
def check(self, schema, obj):
def _check(schema, obj):
# Convert sets to lists to streamline checks
if 'type' in schema and schema["type"] is TYPE_ARRAY and isinstance(obj, set):
obj = list(obj)
schema = self._resolve_if_ref(schema)
self._check_type(schema, obj)
required_fields = schema.get("required", [])
self._check_has_required_fields(required_fields, obj)
if obj is None:
return
properties = schema.get("properties", None)
if properties is not None:
for name, prop in properties.items():
obj_child = obj.get(name, None)
if obj_child is not None:
# print('checking child: ', name, obj_child)
# self._with_prefix(name, _check, prop, obj_child)
_check(prop, obj_child)
for child_name in obj.keys():
# TODO prefetch mixins not picked up by spectcular?
if child_name not in ['prefetch']:
if not properties or child_name not in properties.keys():
self._has_failed = True
self._register_error(f'unexpected property "{child_name}" found')
additional_properties = schema.get("additionalProperties", None)
if additional_properties is not None:
for name, obj_child in obj.items():
self._with_prefix(f"additionalProp<{name}>", _check, additional_properties, obj_child)
# TODO implement support for enum / OneOff / AllOff
if 'type' in schema and schema["type"] is TYPE_ARRAY:
items_schema = schema["items"]
for index in range(len(obj)):
self._with_prefix(f"item{index}", _check, items_schema, obj[index])
self._has_failed = False
self._errors = []
self._prefix = []
_check(schema, obj)
assert not self._has_failed, "\n" + '\n'.join(self._errors) + "\nFailed with " + str(len(self._errors)) + " errors"
class BaseClass():
class RESTEndpointTest(DojoAPITestCase):
def __init__(self, *args, **kwargs):
DojoAPITestCase.__init__(self, *args, **kwargs)
def setUp(self):
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.url = reverse(self.viewname + '-list')
self.schema = open_api3_json_schema
def check_schema(self, schema, obj):
schema_checker = SchemaChecker(self.schema["components"])
# print(vars(schema_checker))
schema_checker.check(self.schema, obj)
# def get_valid_object_id(self):
# response = self.client.get(format_url(f"/{self.viewname}/"))
# check_response_valid(status.HTTP_200_OK, response)
# if len(response.data["results"]) == 0:
# return None
# return response.data["results"][0].get('id', None)
def get_endpoint_schema(self, path, method):
paths = self.schema["paths"]
methods = paths.get(path, None)
assert methods is not None, f"{path} not found in {[path for path in paths.keys()]}"
endpoint = methods.get(method, None)
assert endpoint is not None, f"Method {method} not found in {[method for method in methods.keys()]}"
return endpoint
def check_schema_response(self, method, status_code, response, detail=False):
detail_path = '{id}/' if detail else ''
endpoints_schema = self.schema["paths"][format_url(f"/{self.endpoint_path}/{detail_path}")]
schema = endpoints_schema[method]['responses'][status_code]['content']['application/json']['schema']
obj = response.data
self.check_schema(schema, obj)
@skipIfNotSubclass(ListModelMixin)
def test_list(self):
# print(open_api3_json_schema)
# validator = ResponseValidator(spec)
check_for_tags = False
if hasattr(self.endpoint_model, 'tags') and self.payload and self.payload.get('tags', None):
# create a new instance first to make sure there's at least 1 instance with tags set by payload to trigger tag handling code
logger.debug('creating model with endpoints: %s', self.payload)
response = self.client.post(self.url, self.payload)
self.assertEqual(201, response.status_code, response.content[:1000])
# print('response:', response.content[:1000])
check_for_id = response.data['id']
# print('id: ', check_for_id)
check_for_tags = self.payload.get('tags', None)
response = self.client.get(self.url, format='json')
# print('response')
# print(vars(response))
# print('response.data')
# print(response.data)
# tags must be present in last entry, the one we created
if check_for_tags:
tags_found = False
for result in response.data['results']:
if result['id'] == check_for_id:
# logger.debug('result.tags: %s', result.get('tags', ''))
self.assertEqual(len(check_for_tags), len(result.get('tags', None)))
for tag in check_for_tags:
# logger.debug('looking for tag %s in tag list %s', tag, result['tags'])
self.assertTrue(tag in result['tags'])
tags_found = True
self.assertTrue(tags_found)
self.assertEqual(200, response.status_code, response.content[:1000])
self.check_schema_response('get', '200', response)
@skipIfNotSubclass(CreateModelMixin)
def test_create(self):
length = self.endpoint_model.objects.count()
response = self.client.post(self.url, self.payload)
logger.debug('test_create_response:')
logger.debug(response)
logger.debug(response.data)
self.assertEqual(201, response.status_code, response.content[:1000])
self.assertEqual(self.endpoint_model.objects.count(), length + 1)
if hasattr(self.endpoint_model, 'tags') and self.payload and self.payload.get('tags', None):
self.assertEqual(len(self.payload.get('tags')), len(response.data.get('tags', None)))
for tag in self.payload.get('tags'):
# logger.debug('looking for tag %s in tag list %s', tag, response.data['tags'])
self.assertTrue(tag in response.data['tags'])
self.check_schema_response('post', '201', response)
@skipIfNotSubclass(RetrieveModelMixin)
def test_detail(self):
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.get(relative_url)
self.assertEqual(200, response.status_code, response.content[:1000])
# sensitive data must be set to write_only so those are not returned in the response
# https://github.com/DefectDojo/django-DefectDojo/security/advisories/GHSA-8q8j-7wc4-vjg5
self.assertFalse('password' in response.data)
self.assertFalse('ssh' in response.data)
self.assertFalse('api_key' in response.data)
self.check_schema_response('get', '200', response, detail=True)
@skipIfNotSubclass(DestroyModelMixin)
def test_delete(self):
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][-1]['id']
response = self.client.delete(relative_url)
self.assertEqual(204, response.status_code, response.content[:1000])
@skipIfNotSubclass(UpdateModelMixin)
def test_update(self):
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.patch(relative_url, self.update_fields)
# print('patch response.data')
# print(response.data)
self.assertEqual(200, response.status_code, response.content[:1000])
self.check_schema_response('patch', '200', response, detail=True)
for key, value in self.update_fields.items():
# some exception as push_to_jira has been implemented strangely in the update methods in the api
if key not in ['push_to_jira', 'ssh', 'password', 'api_key']:
# Convert data to sets to avoid problems with lists
if isinstance(value, list):
value = set(value)
if isinstance(response.data[key], list):
response_data = set(response.data[key])
else:
response_data = response.data[key]
self.assertEqual(value, response_data)
self.assertFalse('push_to_jira' in response.data)
self.assertFalse('ssh' in response.data)
self.assertFalse('password' in response.data)
self.assertFalse('api_key' in response.data)
if hasattr(self.endpoint_model, 'tags') and self.update_fields and self.update_fields.get('tags', None):
self.assertEqual(len(self.update_fields.get('tags')), len(response.data.get('tags', None)))
for tag in self.update_fields.get('tags'):
logger.debug('looking for tag %s in tag list %s', tag, response.data['tags'])
self.assertTrue(tag in response.data['tags'])
response = self.client.put(
relative_url, self.payload)
self.assertEqual(200, response.status_code, response.content[:1000])
# print('put response.data')
# print(response.data)
self.check_schema_response('put', '200', response, detail=True)
@skipIfNotSubclass(PrefetchRetrieveMixin)
def test_detail_prefetch(self):
# print("=======================================================")
prefetchable_fields = [x[0] for x in _get_prefetchable_fields(self.viewset.serializer_class)]
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.get(relative_url, data={
"prefetch": ','.join(prefetchable_fields)
})
self.assertEqual(200, response.status_code)
obj = response.data
self.assertTrue("prefetch" in obj)
for field in prefetchable_fields:
field_value = obj.get(field, None)
if field_value is None:
continue
self.assertTrue(field in obj["prefetch"])
values = field_value if type(field_value) is list else [field_value]
for value in values:
self.assertTrue(value in obj["prefetch"][field])
# TODO add schema check
@skipIfNotSubclass(PrefetchListMixin)
def test_list_prefetch(self):
prefetchable_fields = [x[0] for x in _get_prefetchable_fields(self.viewset.serializer_class)]
response = self.client.get(self.url, data={
"prefetch": ','.join(prefetchable_fields)
})
self.assertEqual(200, response.status_code)
objs = response.data
self.assertTrue("results" in objs)
self.assertTrue("prefetch" in objs)
for obj in objs["results"]:
for field in prefetchable_fields:
field_value = obj.get(field, None)
if field_value is None:
continue
self.assertTrue(field in objs["prefetch"])
values = field_value if type(field_value) is list else [field_value]
for value in values:
if type(value) is not int:
value = value['id']
self.assertTrue(value in objs["prefetch"][field])
# TODO add schema check
def setUp_not_authorized(self):
testuser = User.objects.get(id=3)
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def setUp_global_reader(self):
testuser = User.objects.get(id=5)
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def setUp_global_owner(self):
testuser = User.objects.get(id=6)
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
@skipIfNotSubclass(ListModelMixin)
def test_list_not_authorized(self):
if not self.object_permission:
self.skipTest('Authorization is not object based')
self.setUp_not_authorized()
response = self.client.get(self.url, format='json')
self.assertFalse(response.data['results'])
self.assertEqual(200, response.status_code, response.content[:1000])
@skipIfNotSubclass(RetrieveModelMixin)
def test_detail_not_authorized(self):
if not self.object_permission:
self.skipTest('Authorization is not object based')
self.setUp_not_authorized()
current_objects = self.endpoint_model.objects.all()
relative_url = self.url + '%s/' % current_objects[0].id
response = self.client.get(relative_url)
self.assertEqual(404, response.status_code, response.content[:1000])
@skipIfNotSubclass(CreateModelMixin)
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized(self, mock):
if not self.object_permission:
self.skipTest('Authorization is not object based')
mock.return_value = False
response = self.client.post(self.url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
ANY,
self.permission_create)
@skipIfNotSubclass(DestroyModelMixin)
@patch('dojo.api_v2.permissions.user_has_permission')
def test_delete_not_authorized(self, mock):
if not self.object_permission:
self.skipTest('Authorization is not object based')
mock.return_value = False
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.delete(relative_url)
mock.assert_called_with(User.objects.get(username='admin'),
self.permission_check_class.objects.get(id=self.permission_check_id),
self.permission_delete)
@skipIfNotSubclass(UpdateModelMixin)
@patch('dojo.api_v2.permissions.user_has_permission')
def test_update_not_authorized(self, mock):
if not self.object_permission:
self.skipTest('Authorization is not object based')
mock.return_value = False
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.patch(relative_url, self.update_fields)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
self.permission_check_class.objects.get(id=self.permission_check_id),
self.permission_update)
response = self.client.put(relative_url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
self.permission_check_class.objects.get(id=self.permission_check_id),
self.permission_update)
class MemberEndpointTest(RESTEndpointTest):
def __init__(self, *args, **kwargs):
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def test_update(self):
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.patch(relative_url, self.update_fields)
self.assertEqual(405, response.status_code, response.content[:1000])
response = self.client.put(
relative_url, self.payload)
self.assertEqual(200, response.status_code, response.content[:1000])
self.check_schema_response('put', '200', response, detail=True)
@skipIfNotSubclass(UpdateModelMixin)
@patch('dojo.api_v2.permissions.user_has_permission')
def test_update_not_authorized(self, mock):
if not self.object_permission:
self.skipTest('Authorization is not object based')
mock.return_value = False
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.put(relative_url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
self.permission_check_class.objects.get(id=self.permission_check_id),
self.permission_update)
class AppAnalysisTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = App_Analysis
self.endpoint_path = 'technologies'
self.viewname = 'app_analysis'
self.viewset = AppAnalysisViewSet
self.payload = {
'product': 1,
'name': 'Tomcat',
'user': 1,
'confidence': 100,
'version': '8.5.1',
'icon': '',
'website': '',
'website_found': '',
'created': '2018-08-16T16:58:23.908Z'
}
self.update_fields = {'version': '9.0'}
self.object_permission = True
self.permission_check_class = Product
self.permission_check_id = 1
self.permission_create = Permissions.Technology_Add
self.permission_update = Permissions.Technology_Edit
self.permission_delete = Permissions.Technology_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class EndpointStatusTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Endpoint_Status
self.endpoint_path = 'endpoint_status'
self.viewname = 'endpoint_status'
self.viewset = EndpointStatusViewSet
self.payload = {
'endpoint': 2,
'finding': 2,
'mitigated': False,
'false_positive': False,
'risk_accepted': False,
'out_of_scope': False,
"date": "2017-01-12T00:00",
}
self.update_fields = {'mitigated': True}
self.object_permission = True
self.permission_check_class = Endpoint
self.permission_check_id = 2
self.permission_create = Permissions.Endpoint_Edit
self.permission_update = Permissions.Endpoint_Edit
self.permission_delete = Permissions.Endpoint_Edit
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class EndpointTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Endpoint
self.endpoint_path = 'endpoints'
self.viewname = 'endpoint'
self.viewset = EndPointViewSet
self.payload = {
'protocol': 'http',
'host': '127.0.0.1',
'path': '/',
'query': 'test=true',
'fragment': 'test-1',
'product': 1,
"tags": ["mytag", "yourtag"]
}
self.update_fields = {'protocol': 'ftp', 'tags': ['one_new_tag']}
self.object_permission = True
self.permission_check_class = Endpoint
self.permission_check_id = 2
self.permission_create = Permissions.Endpoint_Add
self.permission_update = Permissions.Endpoint_Edit
self.permission_delete = Permissions.Endpoint_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class EngagementTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Engagement
self.endpoint_path = 'engagements'
self.viewname = 'engagement'
self.viewset = EngagementViewSet
self.payload = {
"engagement_type": 'Interactive',
"report_type": 1,
"name": "",
"description": "",
"version": "",
"target_start": '1937-01-01',
"target_end": '1937-01-01',
"reason": "",
"test_strategy": "",
"product": "1",
"tags": ["mytag"]
}
self.update_fields = {'version': 'latest'}
self.object_permission = True
self.permission_check_class = Engagement
self.permission_check_id = 1
self.permission_create = Permissions.Engagement_Add
self.permission_update = Permissions.Engagement_Edit
self.permission_delete = Permissions.Engagement_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class FindingRequestResponseTest(DojoAPITestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def test_request_response_post(self):
length = BurpRawRequestResponse.objects.count()
payload = {
"req_resp": [{"request": "POST", "response": "200"}]
}
response = self.client.post('/api/v2/findings/7/request_response/', dumps(payload), content_type='application/json')
self.assertEqual(200, response.status_code, response.content[:1000])
self.assertEqual(BurpRawRequestResponse.objects.count(), length + 1)
def test_request_response_get(self):
response = self.client.get('/api/v2/findings/7/request_response/', format='json')
# print('response.data:')
# print(response.data)
self.assertEqual(200, response.status_code, response.content[:1000])
class FindingFilesTest(DojoAPITestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def test_request_response_post(self):
url_levels = [
'findings/7',
'tests/3',
'engagements/1'
]
path = pathlib.Path(__file__).parent.absolute()
# print(path)
for level in url_levels:
length = FileUpload.objects.count()
payload = {
"title": level,
"file": open(str(path) + '/scans/acunetix/one_finding.xml')
}
response = self.client.post('/api/v2/' + level + '/files/', payload)
self.assertEqual(201, response.status_code, response.data)
self.assertEqual(FileUpload.objects.count(), length + 1)
def test_request_response_get(self):
url_levels = [
'findings/7',
'tests/3',
'engagements/1'
]
for level in url_levels:
response = self.client.get('/api/v2/' + level + '/files/')
self.assertEqual(200, response.status_code)
class FindingsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Finding
self.endpoint_path = 'findings'
self.viewname = 'finding'
self.viewset = FindingViewSet
self.payload = {
"review_requested_by": 2,
"reviewers": [2, 3],
"defect_review_requested_by": 2,
"test": 3,
"url": "http://www.example.com",
"thread_id": 1,
"found_by": [],
"title": "DUMMY FINDING123",
"date": "2020-05-20",
"cwe": 1,
"severity": "HIGH",
"description": "TEST finding",
"mitigation": "MITIGATION",
"impact": "HIGH",
"references": "",
"reporter": 3,
"active": False,
"verified": False,
"false_p": False,
"duplicate": False,
"out_of_scope": False,
"under_review": False,
"under_defect_review": False,
"numerical_severity": "S0",
"line": 100,
"file_path": "",
"static_finding": False,
"dynamic_finding": False,
"endpoints": [1, 2],
"files": [],
"tags": ['tag1', 'tag_2'],
}
self.update_fields = {'duplicate': False, 'active': True, "push_to_jira": "True", 'tags': ['finding_tag_new']}
self.object_permission = True
self.permission_check_class = Finding
self.permission_check_id = 3
self.permission_create = Permissions.Finding_Add
self.permission_update = Permissions.Finding_Edit
self.permission_delete = Permissions.Finding_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def test_duplicate(self):
# Reassign duplicate
result = self.client.post(self.url + "2/original/3/")
self.assertEqual(result.status_code, status.HTTP_204_NO_CONTENT, "Could not move duplicate")
result = self.client.get(self.url + "2/")
self.assertEqual(result.status_code, status.HTTP_200_OK, "Could not check new duplicate")
result_json = result.json()
assert result_json["duplicate"]
assert result_json["duplicate_finding"] == 3
# Check duplicate status
result = self.client.get(self.url + "3/duplicate/")
assert result.status_code == status.HTTP_200_OK, "Could not check duplicate status"
result_json = result.json()
# Should return all duplicates for id=3
assert set(x["id"] for x in result_json) == {2, 4, 5, 6}
# Reset duplicate
result = self.client.post(self.url + "2/duplicate/reset/")
self.assertEqual(result.status_code, status.HTTP_204_NO_CONTENT, "Could not reset duplicate")
new_result = self.client.get(self.url + "2/")
self.assertEqual(result.status_code, status.HTTP_204_NO_CONTENT, "Could not check reset duplicate status")
result_json = new_result.json()
assert not result_json["duplicate"]
assert result_json["duplicate_finding"] is None
class FindingMetadataTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Finding
self.endpoint_path = 'findings'
self.viewname = 'finding'
self.viewset = FindingViewSet
self.payload = {}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def setUp(self):
super().setUp()
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.url = reverse(self.viewname + '-list')
self.current_findings = self.client.get(self.url, format='json').data["results"]
finding = Finding.objects.get(id=self.current_findings[0]['id'])
self.base_url = f"{self.url}{self.current_findings[0]['id']}/metadata/"
metadata = DojoMeta(finding=finding, name="test_meta", value="20")
metadata.save()
def test_create(self):
response = self.client.post(self.base_url, data={"name": "test_meta2", "value": "40"})
self.assertEqual(200, response.status_code, response.data)
results = self.client.get(self.base_url).data
for result in results:
if result["name"] == "test_meta2" and result["value"] == "40":
return
assert False, "Metadata was not created correctly"
def test_create_duplicate(self):
result = self.client.post(self.base_url, data={"name": "test_meta", "value": "40"})
assert result.status_code == status.HTTP_400_BAD_REQUEST, "Metadata creation did not failed on duplicate"
def test_get(self):
results = self.client.get(self.base_url, format="json").data
for result in results:
if result["name"] == "test_meta" and result["value"] == "20":
return
assert False, "Metadata was not created correctly"
def test_update(self):
self.client.put(self.base_url + "?name=test_meta", data={"name": "test_meta", "value": "40"})
result = self.client.get(self.base_url).data[0]
assert result["name"] == "test_meta" and result["value"] == "40", "Metadata not edited correctly"
def test_delete(self):
self.client.delete(self.base_url + "?name=test_meta")
result = self.client.get(self.base_url).data
assert len(result) == 0, "Metadata not deleted correctly"
class FindingTemplatesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Finding_Template
self.endpoint_path = 'finding_templates'
self.viewname = 'finding_template'
self.viewset = FindingTemplatesViewSet
self.payload = {
"title": "Test template",
"cwe": 0,
"severity": "MEDIUM",
"description": "test template",
"mitigation": "None",
"impact": "MEDIUM",
"references": "",
}
self.update_fields = {'references': 'some reference'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class JiraInstancesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = JIRA_Instance
self.endpoint_path = 'jira_instances'
self.viewname = 'jira_instance'
self.viewset = JiraInstanceViewSet
self.payload = {
"url": "http://www.example.com",
"username": "testuser",
"password": "testuser",
"default_issue_type": "Story",
"epic_name_id": 1111,
"open_status_key": 111,
"close_status_key": 111,
"info_mapping_severity": "LOW",
"low_mapping_severity": "LOW",
"medium_mapping_severity": "LOW",
"high_mapping_severity": "LOW",
"critical_mapping_severity": "LOW",
"finding_text": "",
"global_jira_sla_notification": False
}
self.update_fields = {'epic_name_id': 1}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class JiraIssuesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = JIRA_Issue
self.endpoint_path = 'jira_finding_mappings'
self.viewname = 'jira_issue'
self.viewset = JiraIssuesViewSet
self.payload = {
"jira_id": "JIRA 1",
"jira_key": "SOME KEY",
"finding": 2,
"engagement": 2,
}
self.update_fields = {'finding': 2}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class JiraProjectTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = JIRA_Project
self.endpoint_path = 'jira_projects'
self.viewname = 'jira_project'
self.viewset = JiraProjectViewSet
self.payload = {
"project_key": "TEST KEY",
"component": "",
"push_all_issues": False,
"enable_engagement_epic_mapping": False,
"push_notes": False,
"product": 1,
"jira_instance": 2,
}
self.update_fields = {'jira_instance': 3}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class SonarqubeIssueTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Sonarqube_Issue
self.endpoint_path = 'sonarqube_issues'
self.viewname = 'sonarqube_issue'
self.viewset = SonarqubeIssueViewSet
self.payload = {
"key": "AREwS5n5TxsFUNm31CxP",
"status": "OPEN",
"type": "VULNERABILITY"
}
self.update_fields = {'key': 'AREwS5n5TxsFUNm31CxP'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class SonarqubeIssuesTransitionTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Sonarqube_Issue_Transition
self.endpoint_path = 'sonarqube_transitions'
self.viewname = 'sonarqube_issue_transition'
self.viewset = SonarqubeIssuesTransitionTest
self.payload = {
"sonarqube_issue": 1,
"finding_status": "Active, Verified",
"sonarqube_status": "OPEN",
"transitions": "confirm"
}
self.update_fields = {'sonarqube_status': 'CLOSED'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class Product_API_Scan_ConfigurationTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_API_Scan_Configuration
self.endpoint_path = 'product_api_scan_configurations'
self.viewname = 'product_api_scan_configuration'
self.viewset = ProductAPIScanConfigurationViewSet
self.payload = {
"product": 2,
"service_key_1": "dojo_sonar_key",
"tool_configuration": 3
}
self.update_fields = {'tool_configuration': 2}
self.object_permission = True
self.permission_check_class = Product_API_Scan_Configuration
self.permission_check_id = 1
self.permission_create = Permissions.Product_API_Scan_Configuration_Add
self.permission_update = Permissions.Product_API_Scan_Configuration_Edit
self.permission_delete = Permissions.Product_API_Scan_Configuration_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ProductTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product
self.endpoint_path = 'products'
self.viewname = 'product'
self.viewset = ProductViewSet
self.payload = {
"product_manager": 2,
"technical_contact": 3,
"team_manager": 2,
"prod_type": 1,
"name": "Test Product",
"description": "test product",
"tags": ["mytag, yourtag"]
}
self.update_fields = {'prod_type': 2}
self.object_permission = True
self.permission_check_class = Product
self.permission_check_id = 1
self.permission_create = Permissions.Product_Type_Add_Product
self.permission_update = Permissions.Product_Edit
self.permission_delete = Permissions.Product_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class StubFindingsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Stub_Finding
self.endpoint_path = 'stub_findings'
self.viewname = 'stub_finding'
self.viewset = StubFindingsViewSet
self.payload = {
"title": "Stub Finding 1",
"date": "2017-12-31",
"severity": "HIGH",
"description": "test stub finding",
"reporter": 3,
"test": 3,
}
self.update_fields = {'severity': 'LOW'}
self.object_permission = True
self.permission_check_class = Stub_Finding
self.permission_check_id = 2
self.permission_create = Permissions.Finding_Add
self.permission_update = Permissions.Finding_Edit
self.permission_delete = Permissions.Finding_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class TestsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Test
self.endpoint_path = 'tests'
self.viewname = 'test'
self.viewset = TestsViewSet
self.payload = {
"test_type": 1,
"environment": 1,
"engagement": 2,
"estimated_time": "0:30:20",
"actual_time": "0:20:30",
"notes": [],
"target_start": "2017-01-12T00:00",
"target_end": "2017-01-12T00:00",
"percent_complete": 0,
"lead": 2,
"tags": [],
"version": "1.0",
"branch_tag": "master",
"commit_hash": "1234567890abcdefghijkl",
}
self.update_fields = {'percent_complete': 100}
self.object_permission = True
self.permission_check_class = Test
self.permission_check_id = 3
self.permission_create = Permissions.Test_Add
self.permission_update = Permissions.Test_Edit
self.permission_delete = Permissions.Test_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ToolConfigurationsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Tool_Configuration
self.viewname = 'tool_configuration'
self.endpoint_path = 'tool_configurations'
self.viewset = ToolConfigurationsViewSet
self.payload = {
"configuration_url": "http://www.example.com",
"name": "Tool Configuration",
"description": "",
"authentication_type": "API",
"username": "",
"password": "",
"auth_title": "",
"ssh": "",
"api_key": "test key",
"tool_type": 1,
}
self.update_fields = {'ssh': 'test string'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ToolProductSettingsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Tool_Product_Settings
self.endpoint_path = 'tool_product_settings'
self.viewname = 'tool_product_settings'
self.viewset = ToolProductSettingsViewSet
self.payload = {
"setting_url": "http://www.example.com",
"name": "Tool Product Setting",
"description": "test tool product setting",
"tool_project_id": "1",
"tool_configuration": 3,
}
self.update_fields = {'tool_project_id': '2'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ToolTypesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Tool_Type
self.endpoint_path = 'tool_types'
self.viewname = 'tool_type'
self.viewset = ToolTypesViewSet
self.payload = {
"name": "Tool Type",
"description": "test tool type"
}
self.update_fields = {'description': 'changed description'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class NoteTypesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Note_Type
self.endpoint_path = 'note_type'
self.viewname = 'note_type'
self.viewset = NoteTypeViewSet
self.payload = {
"name": "Test Note",
"description": "not that much",
"is_single": False,
"is_active": True,
"is_mandatory": False
}
self.update_fields = {'description': 'changed description'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class NotesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Notes
self.endpoint_path = 'notes'
self.viewname = 'notes'
self.viewset = NotesViewSet
self.payload = {
"id": 1,
"entry": "updated_entry",
"author": '{"username": "admin"}',
"editor": '{"username": "user1"}'
}
self.update_fields = {'entry': 'changed entry'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class UsersTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = User
self.endpoint_path = 'users'
self.viewname = 'user'
self.viewset = UsersViewSet
self.payload = {
"username": "test_user",
"first_name": "test",
"last_name": "user",
"email": "example@email.com",
"is_active": True,
}
self.update_fields = {"first_name": "test changed"}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class UserContactInfoTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = UserContactInfo
self.endpoint_path = 'user_contact_infos'
self.viewname = 'usercontactinfo'
self.viewset = UserContactInfoViewSet
self.payload = {
"user": 4,
"title": "Sir",
"phone_number": "+999999999",
"cell_number": "+999999999",
"twitter_username": "defectdojo",
}
self.update_fields = {"title": "Lady"}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ProductPermissionTest(DojoAPITestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
testuser = User.objects.get(username='user1')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def test_user_should_not_have_access_to_product_3_in_list(self):
response = self.client.get(
reverse('product-list'), format='json')
for obj in response.data['results']:
self.assertNotEqual(obj['id'], 3)
def test_user_should_not_have_access_to_product_3_in_detail(self):
response = self.client.get('http://testserver/api/v2/products/3/')
self.assertEqual(response.status_code, 404)
class ImportScanTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Test
self.endpoint_path = 'import-scan'
self.viewname = 'importscan'
self.viewset = ImportScanView
self.payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"engagement": 1,
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
}
self.object_permission = True
self.permission_create = Permissions.Import_Scan_Result
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Python How-to',
"engagement_name": 'April monthly engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Engagement.objects.get(id=2), # engagement id found via product name and engagement name
Permissions.Import_Scan_Result)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Python How-to',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Engagement_Add)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product_Type.objects.get(id=1),
Permissions.Product_Type_Add_Product)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_global_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "more books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Permissions.Product_Type_Add)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock):
"""
Test creating a new engagement should also check for import scan permission in the product
"""
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Python How-to',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_has_calls([
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Engagement_Add),
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Import_Scan_Result)
])
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product_Type.objects.get(id=1),
Permissions.Product_Type_Add_Product)
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_global_permission')
def test_create_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "more books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Permissions.Product_Type_Add)
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
class ReimportScanTest(DojoAPITestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.url = reverse('reimportscan' + '-list')
# Specific tests for reimport
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
def test_reimport_zap_xml(self, importer_mock, reimporter_mock):
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
length = Test.objects.all().count()
response = self.client.post(
reverse('reimportscan-list'), {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": True,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"test": 3,
"version": "1.0.1",
})
self.assertEqual(length, Test.objects.all().count())
self.assertEqual(201, response.status_code, response.content[:1000])
# TODO add schema check
importer_mock.assert_not_called()
reimporter_mock.assert_called_once()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Security How-to',
"engagement_name": 'April monthly engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Test.objects.get(id=4), # test id found via product name and engagement name and scan_type
Permissions.Import_Scan_Result)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_authorized_product_name_engagement_name_scan_type_title_auto_create(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Security How-to',
"engagement_name": 'April monthly engagement',
"test_title": 'My ZAP Scan NEW',
"version": "1.0.0",
"auto_create_context": True,
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Engagement.objects.get(id=4),
Permissions.Import_Scan_Result)
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock):
"""
Test creating a new engagement should also check for import scan permission in the product
"""
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Python How-to',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_has_calls([
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Engagement_Add),
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Import_Scan_Result)
])
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product_Type.objects.get(id=1),
Permissions.Product_Type_Add_Product)
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_global_permission')
def test_create_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "more books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Permissions.Product_Type_Add)
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_test_id(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": True,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"test": 3,
"version": "1.0.1"
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Test.objects.get(id=3),
Permissions.Import_Scan_Result)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
# copied tests from import, unsure how to use inheritance/mixins with test_ methods
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Python How-to',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Engagement_Add)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product_Type.objects.get(id=1),
Permissions.Product_Type_Add_Product)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_global_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "more books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Permissions.Product_Type_Add)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_scan_type(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Security How-to',
"engagement_name": 'April monthly engagement',
"version": "1.0.0",
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Test.objects.get(id=4), # engagement id found via product name and engagement name
Permissions.Import_Scan_Result)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_scan_type_title(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Security How-to',
"engagement_name": 'April monthly engagement',
"test_title": 'My ZAP Scan',
"version": "1.0.0",
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Test.objects.get(id=4), # test id found via product name and engagement name and scan_type and test_title
Permissions.Import_Scan_Result)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
class ProductTypeTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_Type
self.endpoint_path = 'product_types'
self.viewname = 'product_type'
self.viewset = ProductTypeViewSet
self.payload = {
"name": "Test Product Type",
"description": "Test",
"key_product": True,
"critical_product": False
}
self.update_fields = {'description': "changed"}
self.object_permission = True
self.permission_check_class = Product_Type
self.permission_check_id = 1
self.permission_update = Permissions.Product_Type_Edit
self.permission_delete = Permissions.Product_Type_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def test_create_not_authorized(self):
self.setUp_not_authorized()
response = self.client.post(self.url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
def test_create_not_authorized_reader(self):
self.setUp_global_reader()
response = self.client.post(self.url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
def test_create_authorized_owner(self):
self.setUp_global_owner()
response = self.client.post(self.url, self.payload)
self.assertEqual(201, response.status_code, response.content[:1000])
class DojoGroupsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Dojo_Group
self.endpoint_path = 'dojo_groups'
self.viewname = 'dojo_group'
self.viewset = DojoGroupViewSet
self.payload = {
"name": "Test Group",
"description": "Test",
}
self.update_fields = {'description': "changed"}
self.object_permission = True
self.permission_check_class = Dojo_Group
self.permission_check_id = 1
self.permission_update = Permissions.Group_Edit
self.permission_delete = Permissions.Group_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def test_create_not_authorized(self):
self.setUp_not_authorized()
response = self.client.post(self.url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
class DojoGroupsUsersTest(BaseClass.MemberEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Dojo_Group_Member
self.endpoint_path = 'dojo_group_members'
self.viewname = 'dojo_group_member'
self.viewset = DojoGroupMemberViewSet
self.payload = {
"group": 1,
"user": 3,
"role": 4
}
self.update_fields = {'role': 3}
self.object_permission = True
self.permission_check_class = Dojo_Group_Member
self.permission_check_id = 1
self.permission_create = Permissions.Group_Manage_Members
self.permission_update = Permissions.Group_Manage_Members
self.permission_delete = Permissions.Group_Member_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class RolesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Role
self.endpoint_path = 'roles'
self.viewname = 'role'
self.viewset = RoleViewSet
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class GlobalRolesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Global_Role
self.endpoint_path = 'global_roles'
self.viewname = 'global_role'
self.viewset = GlobalRoleViewSet
self.payload = {
"user": 2,
"role": 2
}
self.update_fields = {'role': 3}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ProductTypeMemberTest(BaseClass.MemberEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_Type_Member
self.endpoint_path = 'product_type_members'
self.viewname = 'product_type_member'
self.viewset = ProductTypeMemberViewSet
self.payload = {
"product_type": 1,
"user": 3,
"role": 2
}
self.update_fields = {'role': 3}
self.object_permission = True
self.permission_check_class = Product_Type_Member
self.permission_check_id = 1
self.permission_create = Permissions.Product_Type_Manage_Members
self.permission_update = Permissions.Product_Type_Manage_Members
self.permission_delete = Permissions.Product_Type_Member_Delete
BaseClass.MemberEndpointTest.__init__(self, *args, **kwargs)
class ProductMemberTest(BaseClass.MemberEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_Member
self.endpoint_path = 'product_members'
self.viewname = 'product_member'
self.viewset = ProductMemberViewSet
self.payload = {
"product": 3,
"user": 2,
"role": 2
}
self.update_fields = {'role': 3}
self.object_permission = True
self.permission_check_class = Product_Member
self.permission_check_id = 1
self.permission_create = Permissions.Product_Manage_Members
self.permission_update = Permissions.Product_Manage_Members
self.permission_delete = Permissions.Product_Member_Delete
BaseClass.MemberEndpointTest.__init__(self, *args, **kwargs)
class ProductTypeGroupTest(BaseClass.MemberEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_Type_Group
self.endpoint_path = 'product_type_groups'
self.viewname = 'product_type_group'
self.viewset = ProductTypeGroupViewSet
self.payload = {
"product_type": 1,
"group": 2,
"role": 2
}
self.update_fields = {'role': 3}
self.object_permission = True
self.permission_check_class = Product_Type_Group
self.permission_check_id = 1
self.permission_create = Permissions.Product_Type_Group_Add
self.permission_update = Permissions.Product_Type_Group_Edit
self.permission_delete = Permissions.Product_Type_Group_Delete
BaseClass.MemberEndpointTest.__init__(self, *args, **kwargs)
class ProductGroupTest(BaseClass.MemberEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_Group
self.endpoint_path = 'product_groups'
self.viewname = 'product_group'
self.viewset = ProductGroupViewSet
self.payload = {
"product": 1,
"group": 2,
"role": 2
}
self.update_fields = {'role': 3}
self.object_permission = True
self.permission_check_class = Product_Group
self.permission_check_id = 1
self.permission_create = Permissions.Product_Group_Add
self.permission_update = Permissions.Product_Group_Edit
self.permission_delete = Permissions.Product_Group_Delete
BaseClass.MemberEndpointTest.__init__(self, *args, **kwargs)
class LanguageTypeTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Language_Type
self.endpoint_path = 'language_types'
self.viewname = 'language_type'
self.viewset = LanguageTypeViewSet
self.payload = {
'language': 'Test',
'color': 'red',
'created': '2018-08-16T16:58:23.908Z'
}
self.update_fields = {'color': 'blue'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class LanguageTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Languages
self.endpoint_path = 'languages'
self.viewname = 'languages'
self.viewset = LanguageViewSet
self.payload = {
'product': 1,
'language': 2,
'user': 1,
'files': 2,
'blank': 3,
'comment': 4,
'code': 5,
'created': '2018-08-16T16:58:23.908Z'
}
self.update_fields = {'code': 10}
self.object_permission = True
self.permission_check_class = Languages
self.permission_check_id = 1
self.permission_create = Permissions.Language_Add
self.permission_update = Permissions.Language_Edit
self.permission_delete = Permissions.Language_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ImportLanguagesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Languages
self.endpoint_path = 'import-languages'
self.viewname = 'importlanguages'
self.viewset = ImportLanguagesView
self.payload = {
'product': 1,
'file': open("unittests/files/defectdojo_cloc.json")
}
self.object_permission = True
self.permission_check_class = Languages
self.permission_create = Permissions.Language_Add
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def test_create(self):
BaseClass.RESTEndpointTest.test_create(self)
languages = Languages.objects.filter(product=1).order_by('language')
self.assertEqual(2, len(languages))
self.assertEqual(languages[0].product, Product.objects.get(id=1))
self.assertEqual(languages[0].language, Language_Type.objects.get(id=1))
self.assertEqual(languages[0].files, 21)
self.assertEqual(languages[0].blank, 7)
self.assertEqual(languages[0].comment, 0)
self.assertEqual(languages[0].code, 63996)
self.assertEqual(languages[1].product, Product.objects.get(id=1))
self.assertEqual(languages[1].language, Language_Type.objects.get(id=2))
self.assertEqual(languages[1].files, 432)
self.assertEqual(languages[1].blank, 10813)
self.assertEqual(languages[1].comment, 5054)
self.assertEqual(languages[1].code, 51056)
class NotificationsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Notifications
self.endpoint_path = 'notifications'
self.viewname = 'notifications'
self.viewset = NotificationsViewSet
self.payload = {
'product': 1,
'user': 3,
'product_type_added': ["alert", "msteams"]
}
self.update_fields = {'product_added': ["alert", "msteams"]}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class UserProfileTest(DojoAPITestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.url = reverse('user_profile')
def test_profile(self):
response = self.client.get(reverse('user_profile'))
data = json.loads(response.content)
self.assertEqual(1, data['user']['id'])
self.assertEqual('admin', data['user']['username'])
self.assertTrue(data['user']['is_superuser'])
self.assertEqual(1, data['user_contact_info']['user'])
self.assertEqual('#admin', data['user_contact_info']['twitter_username'])
self.assertEqual(1, data['global_role']['user'])
self.assertEqual(4, data['global_role']['role'])
self.assertEqual(1, data['dojo_group_member'][0]['user'])
self.assertEqual(1, data['dojo_group_member'][0]['group'])
self.assertEqual(1, data['product_type_member'][0]['user'])
self.assertEqual(1, data['product_type_member'][0]['product_type'])
self.assertEqual(1, data['product_member'][1]['user'])
self.assertEqual(3, data['product_member'][1]['product'])
| 40.504583 | 140 | 0.621255 | from collections import OrderedDict
from drf_spectacular.drainage import GENERATOR_STATS
from unittest.mock import call, patch, ANY
from dojo.models import Product, Engagement, Test, Finding, \
JIRA_Issue, Tool_Product_Settings, Tool_Configuration, Tool_Type, \
User, Stub_Finding, Endpoint, JIRA_Project, JIRA_Instance, \
Finding_Template, Note_Type, App_Analysis, Endpoint_Status, \
Sonarqube_Issue, Sonarqube_Issue_Transition, Product_API_Scan_Configuration, Notes, \
BurpRawRequestResponse, DojoMeta, FileUpload, Product_Type, Dojo_Group, \
Role, Product_Type_Member, Product_Member, Product_Type_Group, \
Product_Group, Global_Role, Dojo_Group_Member, Language_Type, Languages, \
Notifications, UserContactInfo
from dojo.api_v2.views import EndPointViewSet, EngagementViewSet, \
FindingTemplatesViewSet, FindingViewSet, JiraInstanceViewSet, \
JiraIssuesViewSet, JiraProjectViewSet, ProductViewSet, \
StubFindingsViewSet, TestsViewSet, \
ToolConfigurationsViewSet, ToolProductSettingsViewSet, ToolTypesViewSet, \
UsersViewSet, ImportScanView, NoteTypeViewSet, AppAnalysisViewSet, \
EndpointStatusViewSet, SonarqubeIssueViewSet, NotesViewSet, ProductTypeViewSet, \
DojoGroupViewSet, RoleViewSet, ProductTypeMemberViewSet, ProductMemberViewSet, \
ProductTypeGroupViewSet, ProductGroupViewSet, GlobalRoleViewSet, \
DojoGroupMemberViewSet, LanguageTypeViewSet, LanguageViewSet, ImportLanguagesView, \
NotificationsViewSet, UserContactInfoViewSet, ProductAPIScanConfigurationViewSet
from json import dumps
from django.urls import reverse
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from .dojo_test_case import DojoAPITestCase
from dojo.api_v2.prefetch.utils import _get_prefetchable_fields
from rest_framework.mixins import \
ListModelMixin, RetrieveModelMixin, CreateModelMixin, \
DestroyModelMixin, UpdateModelMixin
from dojo.api_v2.prefetch import PrefetchListMixin, PrefetchRetrieveMixin
from drf_spectacular.settings import spectacular_settings
import logging
import pathlib
import json
from dojo.authorization.roles_permissions import Permissions
logger = logging.getLogger(__name__)
BASE_API_URL = "/api/v2"
TYPE_OBJECT = "object"
TYPE_STRING = "string"
TYPE_NUMBER = "number"
TYPE_INTEGER = "integer"
TYPE_BOOLEAN = "boolean"
TYPE_ARRAY = "array"
TYPE_FILE = "file"
def get_open_api3_json_schema():
generator_class = spectacular_settings.DEFAULT_GENERATOR_CLASS
generator = generator_class()
schema = generator.get_schema(request=None, public=True)
GENERATOR_STATS.emit_summary()
from drf_spectacular.validation import validate_schema
validate_schema(schema)
return schema
global open_api3_json_schema
open_api3_json_schema = get_open_api3_json_schema()
def skipIfNotSubclass(baseclass):
def decorate(f):
def wrapper(self, *args, **kwargs):
if not issubclass(self.viewset, baseclass):
self.skipTest('This view does not inherit from %s' % baseclass)
else:
f(self, *args, **kwargs)
return wrapper
return decorate
# def testIsBroken(method):
# return tag("broken")(method)
def check_response_valid(expected_code, response):
def _data_to_str(response):
if hasattr(response, "data"):
return response.data
return None
assert response.status_code == expected_code, \
f"Response invalid, returned with code {response.status_code}\nResponse Data:\n{_data_to_str(response)}"
def format_url(path):
return f"{BASE_API_URL}{path}"
class SchemaChecker():
def __init__(self, components):
self._prefix = []
self._has_failed = False
self._components = components
self._errors = []
def _register_error(self, error):
self._errors += [error]
def _check_or_fail(self, condition, message):
if not condition:
self._has_failed = True
self._register_error(message)
# print(message)
def _get_prefix(self):
return '
def _push_prefix(self, prefix):
self._prefix += [prefix]
def _pop_prefix(self):
self._prefix = self._prefix if len(self._prefix) == 0 else self._prefix[:-1]
def _resolve_if_ref(self, schema):
if '$ref' not in schema:
return schema
ref_name = schema["$ref"]
ref_name = ref_name[ref_name.rfind("/") + 1:]
return self._components['schemas'][ref_name]
def _check_has_required_fields(self, required_fields, obj):
# if not required_fields:
# print('no required fields')
for required_field in required_fields:
# passwords are writeOnly, but this is not supported by Swagger / OpenAPIv2
# TODO check this for OpenAPI3
if required_field != 'password':
# print('checking field: ', required_field)
field = f"{self._get_prefix()}#{required_field}"
self._check_or_fail(obj is not None and required_field in obj, f"{field} is required but was not returned")
def _check_type(self, schema, obj):
if 'type' not in schema:
# TODO implement OneOf / AllOff (enums)
# Engagement
# "status": {
# "nullable": true,
# "oneOf": [
# {
# "$ref": "#/components/schemas/StatusEnum"
# },
# {
# "$ref": "#/components/schemas/NullEnum"
# }
# ]
# },
# "StatusEnum": {
# "enum": [
# "Not Started",
# "Blocked",
# "Cancelled",
# "Completed",
# "In Progress",
# "On Hold",
# "Waiting for Resource"
# ],
# "type": "string"
# },
return schema
schema_type = schema["type"]
is_nullable = schema.get("x-nullable", False) or schema.get("readOnly", False)
def _check_helper(check):
self._check_or_fail(check, f"{self._get_prefix()} should be of type {schema_type} but value was of type {type(obj)}")
if obj is None:
self._check_or_fail(is_nullable, f"{self._get_prefix()} is not nullable yet the value returned was null")
elif schema_type == TYPE_BOOLEAN:
_check_helper(isinstance(obj, bool))
elif schema_type == TYPE_INTEGER:
_check_helper(isinstance(obj, int))
elif schema_type == TYPE_NUMBER:
_check_helper(obj.isdecimal())
elif schema_type == TYPE_ARRAY:
_check_helper(isinstance(obj, list))
elif schema_type == TYPE_OBJECT:
_check_helper(isinstance(obj, OrderedDict) or isinstance(obj, dict))
elif schema_type == TYPE_STRING:
_check_helper(isinstance(obj, str))
else:
# Default case
_check_helper(False)
# print('_check_type ok for: %s: %s' % (schema, obj))
def _with_prefix(self, prefix, callable, *args):
self._push_prefix(prefix)
callable(*args)
self._pop_prefix()
def check(self, schema, obj):
def _check(schema, obj):
# Convert sets to lists to streamline checks
if 'type' in schema and schema["type"] is TYPE_ARRAY and isinstance(obj, set):
obj = list(obj)
schema = self._resolve_if_ref(schema)
self._check_type(schema, obj)
required_fields = schema.get("required", [])
self._check_has_required_fields(required_fields, obj)
if obj is None:
return
properties = schema.get("properties", None)
if properties is not None:
for name, prop in properties.items():
obj_child = obj.get(name, None)
if obj_child is not None:
# print('checking child: ', name, obj_child)
# self._with_prefix(name, _check, prop, obj_child)
_check(prop, obj_child)
for child_name in obj.keys():
# TODO prefetch mixins not picked up by spectcular?
if child_name not in ['prefetch']:
if not properties or child_name not in properties.keys():
self._has_failed = True
self._register_error(f'unexpected property "{child_name}" found')
additional_properties = schema.get("additionalProperties", None)
if additional_properties is not None:
for name, obj_child in obj.items():
self._with_prefix(f"additionalProp<{name}>", _check, additional_properties, obj_child)
# TODO implement support for enum / OneOff / AllOff
if 'type' in schema and schema["type"] is TYPE_ARRAY:
items_schema = schema["items"]
for index in range(len(obj)):
self._with_prefix(f"item{index}", _check, items_schema, obj[index])
self._has_failed = False
self._errors = []
self._prefix = []
_check(schema, obj)
assert not self._has_failed, "\n" + '\n'.join(self._errors) + "\nFailed with " + str(len(self._errors)) + " errors"
class BaseClass():
class RESTEndpointTest(DojoAPITestCase):
def __init__(self, *args, **kwargs):
DojoAPITestCase.__init__(self, *args, **kwargs)
def setUp(self):
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.url = reverse(self.viewname + '-list')
self.schema = open_api3_json_schema
def check_schema(self, schema, obj):
schema_checker = SchemaChecker(self.schema["components"])
# print(vars(schema_checker))
schema_checker.check(self.schema, obj)
# def get_valid_object_id(self):
# response = self.client.get(format_url(f"/{self.viewname}/"))
# check_response_valid(status.HTTP_200_OK, response)
# if len(response.data["results"]) == 0:
# return None
# return response.data["results"][0].get('id', None)
def get_endpoint_schema(self, path, method):
paths = self.schema["paths"]
methods = paths.get(path, None)
assert methods is not None, f"{path} not found in {[path for path in paths.keys()]}"
endpoint = methods.get(method, None)
assert endpoint is not None, f"Method {method} not found in {[method for method in methods.keys()]}"
return endpoint
def check_schema_response(self, method, status_code, response, detail=False):
detail_path = '{id}/' if detail else ''
endpoints_schema = self.schema["paths"][format_url(f"/{self.endpoint_path}/{detail_path}")]
schema = endpoints_schema[method]['responses'][status_code]['content']['application/json']['schema']
obj = response.data
self.check_schema(schema, obj)
@skipIfNotSubclass(ListModelMixin)
def test_list(self):
# print(open_api3_json_schema)
# validator = ResponseValidator(spec)
check_for_tags = False
if hasattr(self.endpoint_model, 'tags') and self.payload and self.payload.get('tags', None):
# create a new instance first to make sure there's at least 1 instance with tags set by payload to trigger tag handling code
logger.debug('creating model with endpoints: %s', self.payload)
response = self.client.post(self.url, self.payload)
self.assertEqual(201, response.status_code, response.content[:1000])
check_for_id = response.data['id']
check_for_tags = self.payload.get('tags', None)
response = self.client.get(self.url, format='json')
if check_for_tags:
tags_found = False
for result in response.data['results']:
if result['id'] == check_for_id:
self.assertEqual(len(check_for_tags), len(result.get('tags', None)))
for tag in check_for_tags:
self.assertTrue(tag in result['tags'])
tags_found = True
self.assertTrue(tags_found)
self.assertEqual(200, response.status_code, response.content[:1000])
self.check_schema_response('get', '200', response)
@skipIfNotSubclass(CreateModelMixin)
def test_create(self):
length = self.endpoint_model.objects.count()
response = self.client.post(self.url, self.payload)
logger.debug('test_create_response:')
logger.debug(response)
logger.debug(response.data)
self.assertEqual(201, response.status_code, response.content[:1000])
self.assertEqual(self.endpoint_model.objects.count(), length + 1)
if hasattr(self.endpoint_model, 'tags') and self.payload and self.payload.get('tags', None):
self.assertEqual(len(self.payload.get('tags')), len(response.data.get('tags', None)))
for tag in self.payload.get('tags'):
self.assertTrue(tag in response.data['tags'])
self.check_schema_response('post', '201', response)
@skipIfNotSubclass(RetrieveModelMixin)
def test_detail(self):
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.get(relative_url)
self.assertEqual(200, response.status_code, response.content[:1000])
self.assertFalse('password' in response.data)
self.assertFalse('ssh' in response.data)
self.assertFalse('api_key' in response.data)
self.check_schema_response('get', '200', response, detail=True)
@skipIfNotSubclass(DestroyModelMixin)
def test_delete(self):
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][-1]['id']
response = self.client.delete(relative_url)
self.assertEqual(204, response.status_code, response.content[:1000])
@skipIfNotSubclass(UpdateModelMixin)
def test_update(self):
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.patch(relative_url, self.update_fields)
self.assertEqual(200, response.status_code, response.content[:1000])
self.check_schema_response('patch', '200', response, detail=True)
for key, value in self.update_fields.items():
if key not in ['push_to_jira', 'ssh', 'password', 'api_key']:
if isinstance(value, list):
value = set(value)
if isinstance(response.data[key], list):
response_data = set(response.data[key])
else:
response_data = response.data[key]
self.assertEqual(value, response_data)
self.assertFalse('push_to_jira' in response.data)
self.assertFalse('ssh' in response.data)
self.assertFalse('password' in response.data)
self.assertFalse('api_key' in response.data)
if hasattr(self.endpoint_model, 'tags') and self.update_fields and self.update_fields.get('tags', None):
self.assertEqual(len(self.update_fields.get('tags')), len(response.data.get('tags', None)))
for tag in self.update_fields.get('tags'):
logger.debug('looking for tag %s in tag list %s', tag, response.data['tags'])
self.assertTrue(tag in response.data['tags'])
response = self.client.put(
relative_url, self.payload)
self.assertEqual(200, response.status_code, response.content[:1000])
self.check_schema_response('put', '200', response, detail=True)
@skipIfNotSubclass(PrefetchRetrieveMixin)
def test_detail_prefetch(self):
prefetchable_fields = [x[0] for x in _get_prefetchable_fields(self.viewset.serializer_class)]
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.get(relative_url, data={
"prefetch": ','.join(prefetchable_fields)
})
self.assertEqual(200, response.status_code)
obj = response.data
self.assertTrue("prefetch" in obj)
for field in prefetchable_fields:
field_value = obj.get(field, None)
if field_value is None:
continue
self.assertTrue(field in obj["prefetch"])
values = field_value if type(field_value) is list else [field_value]
for value in values:
self.assertTrue(value in obj["prefetch"][field])
@skipIfNotSubclass(PrefetchListMixin)
def test_list_prefetch(self):
prefetchable_fields = [x[0] for x in _get_prefetchable_fields(self.viewset.serializer_class)]
response = self.client.get(self.url, data={
"prefetch": ','.join(prefetchable_fields)
})
self.assertEqual(200, response.status_code)
objs = response.data
self.assertTrue("results" in objs)
self.assertTrue("prefetch" in objs)
for obj in objs["results"]:
for field in prefetchable_fields:
field_value = obj.get(field, None)
if field_value is None:
continue
self.assertTrue(field in objs["prefetch"])
values = field_value if type(field_value) is list else [field_value]
for value in values:
if type(value) is not int:
value = value['id']
self.assertTrue(value in objs["prefetch"][field])
def setUp_not_authorized(self):
testuser = User.objects.get(id=3)
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def setUp_global_reader(self):
testuser = User.objects.get(id=5)
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def setUp_global_owner(self):
testuser = User.objects.get(id=6)
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
@skipIfNotSubclass(ListModelMixin)
def test_list_not_authorized(self):
if not self.object_permission:
self.skipTest('Authorization is not object based')
self.setUp_not_authorized()
response = self.client.get(self.url, format='json')
self.assertFalse(response.data['results'])
self.assertEqual(200, response.status_code, response.content[:1000])
@skipIfNotSubclass(RetrieveModelMixin)
def test_detail_not_authorized(self):
if not self.object_permission:
self.skipTest('Authorization is not object based')
self.setUp_not_authorized()
current_objects = self.endpoint_model.objects.all()
relative_url = self.url + '%s/' % current_objects[0].id
response = self.client.get(relative_url)
self.assertEqual(404, response.status_code, response.content[:1000])
@skipIfNotSubclass(CreateModelMixin)
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized(self, mock):
if not self.object_permission:
self.skipTest('Authorization is not object based')
mock.return_value = False
response = self.client.post(self.url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
ANY,
self.permission_create)
@skipIfNotSubclass(DestroyModelMixin)
@patch('dojo.api_v2.permissions.user_has_permission')
def test_delete_not_authorized(self, mock):
if not self.object_permission:
self.skipTest('Authorization is not object based')
mock.return_value = False
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.delete(relative_url)
mock.assert_called_with(User.objects.get(username='admin'),
self.permission_check_class.objects.get(id=self.permission_check_id),
self.permission_delete)
@skipIfNotSubclass(UpdateModelMixin)
@patch('dojo.api_v2.permissions.user_has_permission')
def test_update_not_authorized(self, mock):
if not self.object_permission:
self.skipTest('Authorization is not object based')
mock.return_value = False
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.patch(relative_url, self.update_fields)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
self.permission_check_class.objects.get(id=self.permission_check_id),
self.permission_update)
response = self.client.put(relative_url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
self.permission_check_class.objects.get(id=self.permission_check_id),
self.permission_update)
class MemberEndpointTest(RESTEndpointTest):
def __init__(self, *args, **kwargs):
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def test_update(self):
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.patch(relative_url, self.update_fields)
self.assertEqual(405, response.status_code, response.content[:1000])
response = self.client.put(
relative_url, self.payload)
self.assertEqual(200, response.status_code, response.content[:1000])
self.check_schema_response('put', '200', response, detail=True)
@skipIfNotSubclass(UpdateModelMixin)
@patch('dojo.api_v2.permissions.user_has_permission')
def test_update_not_authorized(self, mock):
if not self.object_permission:
self.skipTest('Authorization is not object based')
mock.return_value = False
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '%s/' % current_objects['results'][0]['id']
response = self.client.put(relative_url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
self.permission_check_class.objects.get(id=self.permission_check_id),
self.permission_update)
class AppAnalysisTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = App_Analysis
self.endpoint_path = 'technologies'
self.viewname = 'app_analysis'
self.viewset = AppAnalysisViewSet
self.payload = {
'product': 1,
'name': 'Tomcat',
'user': 1,
'confidence': 100,
'version': '8.5.1',
'icon': '',
'website': '',
'website_found': '',
'created': '2018-08-16T16:58:23.908Z'
}
self.update_fields = {'version': '9.0'}
self.object_permission = True
self.permission_check_class = Product
self.permission_check_id = 1
self.permission_create = Permissions.Technology_Add
self.permission_update = Permissions.Technology_Edit
self.permission_delete = Permissions.Technology_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class EndpointStatusTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Endpoint_Status
self.endpoint_path = 'endpoint_status'
self.viewname = 'endpoint_status'
self.viewset = EndpointStatusViewSet
self.payload = {
'endpoint': 2,
'finding': 2,
'mitigated': False,
'false_positive': False,
'risk_accepted': False,
'out_of_scope': False,
"date": "2017-01-12T00:00",
}
self.update_fields = {'mitigated': True}
self.object_permission = True
self.permission_check_class = Endpoint
self.permission_check_id = 2
self.permission_create = Permissions.Endpoint_Edit
self.permission_update = Permissions.Endpoint_Edit
self.permission_delete = Permissions.Endpoint_Edit
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class EndpointTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Endpoint
self.endpoint_path = 'endpoints'
self.viewname = 'endpoint'
self.viewset = EndPointViewSet
self.payload = {
'protocol': 'http',
'host': '127.0.0.1',
'path': '/',
'query': 'test=true',
'fragment': 'test-1',
'product': 1,
"tags": ["mytag", "yourtag"]
}
self.update_fields = {'protocol': 'ftp', 'tags': ['one_new_tag']}
self.object_permission = True
self.permission_check_class = Endpoint
self.permission_check_id = 2
self.permission_create = Permissions.Endpoint_Add
self.permission_update = Permissions.Endpoint_Edit
self.permission_delete = Permissions.Endpoint_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class EngagementTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Engagement
self.endpoint_path = 'engagements'
self.viewname = 'engagement'
self.viewset = EngagementViewSet
self.payload = {
"engagement_type": 'Interactive',
"report_type": 1,
"name": "",
"description": "",
"version": "",
"target_start": '1937-01-01',
"target_end": '1937-01-01',
"reason": "",
"test_strategy": "",
"product": "1",
"tags": ["mytag"]
}
self.update_fields = {'version': 'latest'}
self.object_permission = True
self.permission_check_class = Engagement
self.permission_check_id = 1
self.permission_create = Permissions.Engagement_Add
self.permission_update = Permissions.Engagement_Edit
self.permission_delete = Permissions.Engagement_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class FindingRequestResponseTest(DojoAPITestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def test_request_response_post(self):
length = BurpRawRequestResponse.objects.count()
payload = {
"req_resp": [{"request": "POST", "response": "200"}]
}
response = self.client.post('/api/v2/findings/7/request_response/', dumps(payload), content_type='application/json')
self.assertEqual(200, response.status_code, response.content[:1000])
self.assertEqual(BurpRawRequestResponse.objects.count(), length + 1)
def test_request_response_get(self):
response = self.client.get('/api/v2/findings/7/request_response/', format='json')
self.assertEqual(200, response.status_code, response.content[:1000])
class FindingFilesTest(DojoAPITestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def test_request_response_post(self):
url_levels = [
'findings/7',
'tests/3',
'engagements/1'
]
path = pathlib.Path(__file__).parent.absolute()
for level in url_levels:
length = FileUpload.objects.count()
payload = {
"title": level,
"file": open(str(path) + '/scans/acunetix/one_finding.xml')
}
response = self.client.post('/api/v2/' + level + '/files/', payload)
self.assertEqual(201, response.status_code, response.data)
self.assertEqual(FileUpload.objects.count(), length + 1)
def test_request_response_get(self):
url_levels = [
'findings/7',
'tests/3',
'engagements/1'
]
for level in url_levels:
response = self.client.get('/api/v2/' + level + '/files/')
self.assertEqual(200, response.status_code)
class FindingsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Finding
self.endpoint_path = 'findings'
self.viewname = 'finding'
self.viewset = FindingViewSet
self.payload = {
"review_requested_by": 2,
"reviewers": [2, 3],
"defect_review_requested_by": 2,
"test": 3,
"url": "http://www.example.com",
"thread_id": 1,
"found_by": [],
"title": "DUMMY FINDING123",
"date": "2020-05-20",
"cwe": 1,
"severity": "HIGH",
"description": "TEST finding",
"mitigation": "MITIGATION",
"impact": "HIGH",
"references": "",
"reporter": 3,
"active": False,
"verified": False,
"false_p": False,
"duplicate": False,
"out_of_scope": False,
"under_review": False,
"under_defect_review": False,
"numerical_severity": "S0",
"line": 100,
"file_path": "",
"static_finding": False,
"dynamic_finding": False,
"endpoints": [1, 2],
"files": [],
"tags": ['tag1', 'tag_2'],
}
self.update_fields = {'duplicate': False, 'active': True, "push_to_jira": "True", 'tags': ['finding_tag_new']}
self.object_permission = True
self.permission_check_class = Finding
self.permission_check_id = 3
self.permission_create = Permissions.Finding_Add
self.permission_update = Permissions.Finding_Edit
self.permission_delete = Permissions.Finding_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def test_duplicate(self):
result = self.client.post(self.url + "2/original/3/")
self.assertEqual(result.status_code, status.HTTP_204_NO_CONTENT, "Could not move duplicate")
result = self.client.get(self.url + "2/")
self.assertEqual(result.status_code, status.HTTP_200_OK, "Could not check new duplicate")
result_json = result.json()
assert result_json["duplicate"]
assert result_json["duplicate_finding"] == 3
result = self.client.get(self.url + "3/duplicate/")
assert result.status_code == status.HTTP_200_OK, "Could not check duplicate status"
result_json = result.json()
assert set(x["id"] for x in result_json) == {2, 4, 5, 6}
result = self.client.post(self.url + "2/duplicate/reset/")
self.assertEqual(result.status_code, status.HTTP_204_NO_CONTENT, "Could not reset duplicate")
new_result = self.client.get(self.url + "2/")
self.assertEqual(result.status_code, status.HTTP_204_NO_CONTENT, "Could not check reset duplicate status")
result_json = new_result.json()
assert not result_json["duplicate"]
assert result_json["duplicate_finding"] is None
class FindingMetadataTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Finding
self.endpoint_path = 'findings'
self.viewname = 'finding'
self.viewset = FindingViewSet
self.payload = {}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def setUp(self):
super().setUp()
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.url = reverse(self.viewname + '-list')
self.current_findings = self.client.get(self.url, format='json').data["results"]
finding = Finding.objects.get(id=self.current_findings[0]['id'])
self.base_url = f"{self.url}{self.current_findings[0]['id']}/metadata/"
metadata = DojoMeta(finding=finding, name="test_meta", value="20")
metadata.save()
def test_create(self):
response = self.client.post(self.base_url, data={"name": "test_meta2", "value": "40"})
self.assertEqual(200, response.status_code, response.data)
results = self.client.get(self.base_url).data
for result in results:
if result["name"] == "test_meta2" and result["value"] == "40":
return
assert False, "Metadata was not created correctly"
def test_create_duplicate(self):
result = self.client.post(self.base_url, data={"name": "test_meta", "value": "40"})
assert result.status_code == status.HTTP_400_BAD_REQUEST, "Metadata creation did not failed on duplicate"
def test_get(self):
results = self.client.get(self.base_url, format="json").data
for result in results:
if result["name"] == "test_meta" and result["value"] == "20":
return
assert False, "Metadata was not created correctly"
def test_update(self):
self.client.put(self.base_url + "?name=test_meta", data={"name": "test_meta", "value": "40"})
result = self.client.get(self.base_url).data[0]
assert result["name"] == "test_meta" and result["value"] == "40", "Metadata not edited correctly"
def test_delete(self):
self.client.delete(self.base_url + "?name=test_meta")
result = self.client.get(self.base_url).data
assert len(result) == 0, "Metadata not deleted correctly"
class FindingTemplatesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Finding_Template
self.endpoint_path = 'finding_templates'
self.viewname = 'finding_template'
self.viewset = FindingTemplatesViewSet
self.payload = {
"title": "Test template",
"cwe": 0,
"severity": "MEDIUM",
"description": "test template",
"mitigation": "None",
"impact": "MEDIUM",
"references": "",
}
self.update_fields = {'references': 'some reference'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class JiraInstancesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = JIRA_Instance
self.endpoint_path = 'jira_instances'
self.viewname = 'jira_instance'
self.viewset = JiraInstanceViewSet
self.payload = {
"url": "http://www.example.com",
"username": "testuser",
"password": "testuser",
"default_issue_type": "Story",
"epic_name_id": 1111,
"open_status_key": 111,
"close_status_key": 111,
"info_mapping_severity": "LOW",
"low_mapping_severity": "LOW",
"medium_mapping_severity": "LOW",
"high_mapping_severity": "LOW",
"critical_mapping_severity": "LOW",
"finding_text": "",
"global_jira_sla_notification": False
}
self.update_fields = {'epic_name_id': 1}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class JiraIssuesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = JIRA_Issue
self.endpoint_path = 'jira_finding_mappings'
self.viewname = 'jira_issue'
self.viewset = JiraIssuesViewSet
self.payload = {
"jira_id": "JIRA 1",
"jira_key": "SOME KEY",
"finding": 2,
"engagement": 2,
}
self.update_fields = {'finding': 2}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class JiraProjectTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = JIRA_Project
self.endpoint_path = 'jira_projects'
self.viewname = 'jira_project'
self.viewset = JiraProjectViewSet
self.payload = {
"project_key": "TEST KEY",
"component": "",
"push_all_issues": False,
"enable_engagement_epic_mapping": False,
"push_notes": False,
"product": 1,
"jira_instance": 2,
}
self.update_fields = {'jira_instance': 3}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class SonarqubeIssueTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Sonarqube_Issue
self.endpoint_path = 'sonarqube_issues'
self.viewname = 'sonarqube_issue'
self.viewset = SonarqubeIssueViewSet
self.payload = {
"key": "AREwS5n5TxsFUNm31CxP",
"status": "OPEN",
"type": "VULNERABILITY"
}
self.update_fields = {'key': 'AREwS5n5TxsFUNm31CxP'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class SonarqubeIssuesTransitionTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Sonarqube_Issue_Transition
self.endpoint_path = 'sonarqube_transitions'
self.viewname = 'sonarqube_issue_transition'
self.viewset = SonarqubeIssuesTransitionTest
self.payload = {
"sonarqube_issue": 1,
"finding_status": "Active, Verified",
"sonarqube_status": "OPEN",
"transitions": "confirm"
}
self.update_fields = {'sonarqube_status': 'CLOSED'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class Product_API_Scan_ConfigurationTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_API_Scan_Configuration
self.endpoint_path = 'product_api_scan_configurations'
self.viewname = 'product_api_scan_configuration'
self.viewset = ProductAPIScanConfigurationViewSet
self.payload = {
"product": 2,
"service_key_1": "dojo_sonar_key",
"tool_configuration": 3
}
self.update_fields = {'tool_configuration': 2}
self.object_permission = True
self.permission_check_class = Product_API_Scan_Configuration
self.permission_check_id = 1
self.permission_create = Permissions.Product_API_Scan_Configuration_Add
self.permission_update = Permissions.Product_API_Scan_Configuration_Edit
self.permission_delete = Permissions.Product_API_Scan_Configuration_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ProductTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product
self.endpoint_path = 'products'
self.viewname = 'product'
self.viewset = ProductViewSet
self.payload = {
"product_manager": 2,
"technical_contact": 3,
"team_manager": 2,
"prod_type": 1,
"name": "Test Product",
"description": "test product",
"tags": ["mytag, yourtag"]
}
self.update_fields = {'prod_type': 2}
self.object_permission = True
self.permission_check_class = Product
self.permission_check_id = 1
self.permission_create = Permissions.Product_Type_Add_Product
self.permission_update = Permissions.Product_Edit
self.permission_delete = Permissions.Product_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class StubFindingsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Stub_Finding
self.endpoint_path = 'stub_findings'
self.viewname = 'stub_finding'
self.viewset = StubFindingsViewSet
self.payload = {
"title": "Stub Finding 1",
"date": "2017-12-31",
"severity": "HIGH",
"description": "test stub finding",
"reporter": 3,
"test": 3,
}
self.update_fields = {'severity': 'LOW'}
self.object_permission = True
self.permission_check_class = Stub_Finding
self.permission_check_id = 2
self.permission_create = Permissions.Finding_Add
self.permission_update = Permissions.Finding_Edit
self.permission_delete = Permissions.Finding_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class TestsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Test
self.endpoint_path = 'tests'
self.viewname = 'test'
self.viewset = TestsViewSet
self.payload = {
"test_type": 1,
"environment": 1,
"engagement": 2,
"estimated_time": "0:30:20",
"actual_time": "0:20:30",
"notes": [],
"target_start": "2017-01-12T00:00",
"target_end": "2017-01-12T00:00",
"percent_complete": 0,
"lead": 2,
"tags": [],
"version": "1.0",
"branch_tag": "master",
"commit_hash": "1234567890abcdefghijkl",
}
self.update_fields = {'percent_complete': 100}
self.object_permission = True
self.permission_check_class = Test
self.permission_check_id = 3
self.permission_create = Permissions.Test_Add
self.permission_update = Permissions.Test_Edit
self.permission_delete = Permissions.Test_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ToolConfigurationsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Tool_Configuration
self.viewname = 'tool_configuration'
self.endpoint_path = 'tool_configurations'
self.viewset = ToolConfigurationsViewSet
self.payload = {
"configuration_url": "http://www.example.com",
"name": "Tool Configuration",
"description": "",
"authentication_type": "API",
"username": "",
"password": "",
"auth_title": "",
"ssh": "",
"api_key": "test key",
"tool_type": 1,
}
self.update_fields = {'ssh': 'test string'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ToolProductSettingsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Tool_Product_Settings
self.endpoint_path = 'tool_product_settings'
self.viewname = 'tool_product_settings'
self.viewset = ToolProductSettingsViewSet
self.payload = {
"setting_url": "http://www.example.com",
"name": "Tool Product Setting",
"description": "test tool product setting",
"tool_project_id": "1",
"tool_configuration": 3,
}
self.update_fields = {'tool_project_id': '2'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ToolTypesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Tool_Type
self.endpoint_path = 'tool_types'
self.viewname = 'tool_type'
self.viewset = ToolTypesViewSet
self.payload = {
"name": "Tool Type",
"description": "test tool type"
}
self.update_fields = {'description': 'changed description'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class NoteTypesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Note_Type
self.endpoint_path = 'note_type'
self.viewname = 'note_type'
self.viewset = NoteTypeViewSet
self.payload = {
"name": "Test Note",
"description": "not that much",
"is_single": False,
"is_active": True,
"is_mandatory": False
}
self.update_fields = {'description': 'changed description'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class NotesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Notes
self.endpoint_path = 'notes'
self.viewname = 'notes'
self.viewset = NotesViewSet
self.payload = {
"id": 1,
"entry": "updated_entry",
"author": '{"username": "admin"}',
"editor": '{"username": "user1"}'
}
self.update_fields = {'entry': 'changed entry'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class UsersTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = User
self.endpoint_path = 'users'
self.viewname = 'user'
self.viewset = UsersViewSet
self.payload = {
"username": "test_user",
"first_name": "test",
"last_name": "user",
"email": "example@email.com",
"is_active": True,
}
self.update_fields = {"first_name": "test changed"}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class UserContactInfoTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = UserContactInfo
self.endpoint_path = 'user_contact_infos'
self.viewname = 'usercontactinfo'
self.viewset = UserContactInfoViewSet
self.payload = {
"user": 4,
"title": "Sir",
"phone_number": "+999999999",
"cell_number": "+999999999",
"twitter_username": "defectdojo",
}
self.update_fields = {"title": "Lady"}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ProductPermissionTest(DojoAPITestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
testuser = User.objects.get(username='user1')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def test_user_should_not_have_access_to_product_3_in_list(self):
response = self.client.get(
reverse('product-list'), format='json')
for obj in response.data['results']:
self.assertNotEqual(obj['id'], 3)
def test_user_should_not_have_access_to_product_3_in_detail(self):
response = self.client.get('http://testserver/api/v2/products/3/')
self.assertEqual(response.status_code, 404)
class ImportScanTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Test
self.endpoint_path = 'import-scan'
self.viewname = 'importscan'
self.viewset = ImportScanView
self.payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"engagement": 1,
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
}
self.object_permission = True
self.permission_create = Permissions.Import_Scan_Result
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Python How-to',
"engagement_name": 'April monthly engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Engagement.objects.get(id=2),
Permissions.Import_Scan_Result)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Python How-to',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Engagement_Add)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product_Type.objects.get(id=1),
Permissions.Product_Type_Add_Product)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_global_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "more books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Permissions.Product_Type_Add)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Python How-to',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_has_calls([
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Engagement_Add),
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Import_Scan_Result)
])
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product_Type.objects.get(id=1),
Permissions.Product_Type_Add_Product)
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_global_permission')
def test_create_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "more books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Permissions.Product_Type_Add)
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
class ReimportScanTest(DojoAPITestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.url = reverse('reimportscan' + '-list')
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
def test_reimport_zap_xml(self, importer_mock, reimporter_mock):
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
length = Test.objects.all().count()
response = self.client.post(
reverse('reimportscan-list'), {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": True,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"test": 3,
"version": "1.0.1",
})
self.assertEqual(length, Test.objects.all().count())
self.assertEqual(201, response.status_code, response.content[:1000])
importer_mock.assert_not_called()
reimporter_mock.assert_called_once()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Security How-to',
"engagement_name": 'April monthly engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Test.objects.get(id=4),
Permissions.Import_Scan_Result)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_authorized_product_name_engagement_name_scan_type_title_auto_create(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Security How-to',
"engagement_name": 'April monthly engagement',
"test_title": 'My ZAP Scan NEW',
"version": "1.0.0",
"auto_create_context": True,
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Engagement.objects.get(id=4),
Permissions.Import_Scan_Result)
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Python How-to',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_has_calls([
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Engagement_Add),
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Import_Scan_Result)
])
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product_Type.objects.get(id=1),
Permissions.Product_Type_Add_Product)
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_global_permission')
def test_create_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock):
mock.return_value = True
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "more books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(201, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Permissions.Product_Type_Add)
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_test_id(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": True,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"test": 3,
"version": "1.0.1"
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Test.objects.get(id=3),
Permissions.Import_Scan_Result)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Python How-to',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product.objects.get(id=1),
Permissions.Engagement_Add)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Product_Type.objects.get(id=1),
Permissions.Product_Type_Add_Product)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_global_permission')
def test_create_not_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_type_name": "more books",
"product_name": 'New Product',
"engagement_name": 'New engagement',
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
"auto_create_context": True
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Permissions.Product_Type_Add)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_scan_type(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Security How-to',
"engagement_name": 'April monthly engagement',
"version": "1.0.0",
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Test.objects.get(id=4),
Permissions.Import_Scan_Result)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
@patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan')
@patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan')
@patch('dojo.api_v2.permissions.user_has_permission')
def test_create_not_authorized_product_name_engagement_name_scan_type_title(self, mock, importer_mock, reimporter_mock):
mock.return_value = False
importer_mock.return_value = None, 0, 0
reimporter_mock.return_value = None, 0, 0, 0, 0, 0
payload = {
"scan_date": '2017-12-30',
"minimum_severity": 'Low',
"active": False,
"verified": True,
"scan_type": 'ZAP Scan',
"file": open('tests/zap_sample.xml'),
"product_name": 'Security How-to',
"engagement_name": 'April monthly engagement',
"test_title": 'My ZAP Scan',
"version": "1.0.0",
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
mock.assert_called_with(User.objects.get(username='admin'),
Test.objects.get(id=4),
Permissions.Import_Scan_Result)
importer_mock.assert_not_called()
reimporter_mock.assert_not_called()
class ProductTypeTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_Type
self.endpoint_path = 'product_types'
self.viewname = 'product_type'
self.viewset = ProductTypeViewSet
self.payload = {
"name": "Test Product Type",
"description": "Test",
"key_product": True,
"critical_product": False
}
self.update_fields = {'description': "changed"}
self.object_permission = True
self.permission_check_class = Product_Type
self.permission_check_id = 1
self.permission_update = Permissions.Product_Type_Edit
self.permission_delete = Permissions.Product_Type_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def test_create_not_authorized(self):
self.setUp_not_authorized()
response = self.client.post(self.url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
def test_create_not_authorized_reader(self):
self.setUp_global_reader()
response = self.client.post(self.url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
def test_create_authorized_owner(self):
self.setUp_global_owner()
response = self.client.post(self.url, self.payload)
self.assertEqual(201, response.status_code, response.content[:1000])
class DojoGroupsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Dojo_Group
self.endpoint_path = 'dojo_groups'
self.viewname = 'dojo_group'
self.viewset = DojoGroupViewSet
self.payload = {
"name": "Test Group",
"description": "Test",
}
self.update_fields = {'description': "changed"}
self.object_permission = True
self.permission_check_class = Dojo_Group
self.permission_check_id = 1
self.permission_update = Permissions.Group_Edit
self.permission_delete = Permissions.Group_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def test_create_not_authorized(self):
self.setUp_not_authorized()
response = self.client.post(self.url, self.payload)
self.assertEqual(403, response.status_code, response.content[:1000])
class DojoGroupsUsersTest(BaseClass.MemberEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Dojo_Group_Member
self.endpoint_path = 'dojo_group_members'
self.viewname = 'dojo_group_member'
self.viewset = DojoGroupMemberViewSet
self.payload = {
"group": 1,
"user": 3,
"role": 4
}
self.update_fields = {'role': 3}
self.object_permission = True
self.permission_check_class = Dojo_Group_Member
self.permission_check_id = 1
self.permission_create = Permissions.Group_Manage_Members
self.permission_update = Permissions.Group_Manage_Members
self.permission_delete = Permissions.Group_Member_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class RolesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Role
self.endpoint_path = 'roles'
self.viewname = 'role'
self.viewset = RoleViewSet
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class GlobalRolesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Global_Role
self.endpoint_path = 'global_roles'
self.viewname = 'global_role'
self.viewset = GlobalRoleViewSet
self.payload = {
"user": 2,
"role": 2
}
self.update_fields = {'role': 3}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ProductTypeMemberTest(BaseClass.MemberEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_Type_Member
self.endpoint_path = 'product_type_members'
self.viewname = 'product_type_member'
self.viewset = ProductTypeMemberViewSet
self.payload = {
"product_type": 1,
"user": 3,
"role": 2
}
self.update_fields = {'role': 3}
self.object_permission = True
self.permission_check_class = Product_Type_Member
self.permission_check_id = 1
self.permission_create = Permissions.Product_Type_Manage_Members
self.permission_update = Permissions.Product_Type_Manage_Members
self.permission_delete = Permissions.Product_Type_Member_Delete
BaseClass.MemberEndpointTest.__init__(self, *args, **kwargs)
class ProductMemberTest(BaseClass.MemberEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_Member
self.endpoint_path = 'product_members'
self.viewname = 'product_member'
self.viewset = ProductMemberViewSet
self.payload = {
"product": 3,
"user": 2,
"role": 2
}
self.update_fields = {'role': 3}
self.object_permission = True
self.permission_check_class = Product_Member
self.permission_check_id = 1
self.permission_create = Permissions.Product_Manage_Members
self.permission_update = Permissions.Product_Manage_Members
self.permission_delete = Permissions.Product_Member_Delete
BaseClass.MemberEndpointTest.__init__(self, *args, **kwargs)
class ProductTypeGroupTest(BaseClass.MemberEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_Type_Group
self.endpoint_path = 'product_type_groups'
self.viewname = 'product_type_group'
self.viewset = ProductTypeGroupViewSet
self.payload = {
"product_type": 1,
"group": 2,
"role": 2
}
self.update_fields = {'role': 3}
self.object_permission = True
self.permission_check_class = Product_Type_Group
self.permission_check_id = 1
self.permission_create = Permissions.Product_Type_Group_Add
self.permission_update = Permissions.Product_Type_Group_Edit
self.permission_delete = Permissions.Product_Type_Group_Delete
BaseClass.MemberEndpointTest.__init__(self, *args, **kwargs)
class ProductGroupTest(BaseClass.MemberEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Product_Group
self.endpoint_path = 'product_groups'
self.viewname = 'product_group'
self.viewset = ProductGroupViewSet
self.payload = {
"product": 1,
"group": 2,
"role": 2
}
self.update_fields = {'role': 3}
self.object_permission = True
self.permission_check_class = Product_Group
self.permission_check_id = 1
self.permission_create = Permissions.Product_Group_Add
self.permission_update = Permissions.Product_Group_Edit
self.permission_delete = Permissions.Product_Group_Delete
BaseClass.MemberEndpointTest.__init__(self, *args, **kwargs)
class LanguageTypeTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Language_Type
self.endpoint_path = 'language_types'
self.viewname = 'language_type'
self.viewset = LanguageTypeViewSet
self.payload = {
'language': 'Test',
'color': 'red',
'created': '2018-08-16T16:58:23.908Z'
}
self.update_fields = {'color': 'blue'}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class LanguageTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Languages
self.endpoint_path = 'languages'
self.viewname = 'languages'
self.viewset = LanguageViewSet
self.payload = {
'product': 1,
'language': 2,
'user': 1,
'files': 2,
'blank': 3,
'comment': 4,
'code': 5,
'created': '2018-08-16T16:58:23.908Z'
}
self.update_fields = {'code': 10}
self.object_permission = True
self.permission_check_class = Languages
self.permission_check_id = 1
self.permission_create = Permissions.Language_Add
self.permission_update = Permissions.Language_Edit
self.permission_delete = Permissions.Language_Delete
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class ImportLanguagesTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Languages
self.endpoint_path = 'import-languages'
self.viewname = 'importlanguages'
self.viewset = ImportLanguagesView
self.payload = {
'product': 1,
'file': open("unittests/files/defectdojo_cloc.json")
}
self.object_permission = True
self.permission_check_class = Languages
self.permission_create = Permissions.Language_Add
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
def test_create(self):
BaseClass.RESTEndpointTest.test_create(self)
languages = Languages.objects.filter(product=1).order_by('language')
self.assertEqual(2, len(languages))
self.assertEqual(languages[0].product, Product.objects.get(id=1))
self.assertEqual(languages[0].language, Language_Type.objects.get(id=1))
self.assertEqual(languages[0].files, 21)
self.assertEqual(languages[0].blank, 7)
self.assertEqual(languages[0].comment, 0)
self.assertEqual(languages[0].code, 63996)
self.assertEqual(languages[1].product, Product.objects.get(id=1))
self.assertEqual(languages[1].language, Language_Type.objects.get(id=2))
self.assertEqual(languages[1].files, 432)
self.assertEqual(languages[1].blank, 10813)
self.assertEqual(languages[1].comment, 5054)
self.assertEqual(languages[1].code, 51056)
class NotificationsTest(BaseClass.RESTEndpointTest):
fixtures = ['dojo_testdata.json']
def __init__(self, *args, **kwargs):
self.endpoint_model = Notifications
self.endpoint_path = 'notifications'
self.viewname = 'notifications'
self.viewset = NotificationsViewSet
self.payload = {
'product': 1,
'user': 3,
'product_type_added': ["alert", "msteams"]
}
self.update_fields = {'product_added': ["alert", "msteams"]}
self.object_permission = False
BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
class UserProfileTest(DojoAPITestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
testuser = User.objects.get(username='admin')
token = Token.objects.get(user=testuser)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.url = reverse('user_profile')
def test_profile(self):
response = self.client.get(reverse('user_profile'))
data = json.loads(response.content)
self.assertEqual(1, data['user']['id'])
self.assertEqual('admin', data['user']['username'])
self.assertTrue(data['user']['is_superuser'])
self.assertEqual(1, data['user_contact_info']['user'])
self.assertEqual('#admin', data['user_contact_info']['twitter_username'])
self.assertEqual(1, data['global_role']['user'])
self.assertEqual(4, data['global_role']['role'])
self.assertEqual(1, data['dojo_group_member'][0]['user'])
self.assertEqual(1, data['dojo_group_member'][0]['group'])
self.assertEqual(1, data['product_type_member'][0]['user'])
self.assertEqual(1, data['product_type_member'][0]['product_type'])
self.assertEqual(1, data['product_member'][1]['user'])
self.assertEqual(3, data['product_member'][1]['product'])
| true | true |
1c34242f0b455f83fd1b2afa16dbd1a4e385d199 | 18,161 | py | Python | Civ4/Assets/Python/Screens/CvReligionScreen.py | f1rpo/Civ4CE | ba64c3545b479887739ad0ff78605b51b6fa57f9 | [
"CNRI-Python"
] | null | null | null | Civ4/Assets/Python/Screens/CvReligionScreen.py | f1rpo/Civ4CE | ba64c3545b479887739ad0ff78605b51b6fa57f9 | [
"CNRI-Python"
] | null | null | null | Civ4/Assets/Python/Screens/CvReligionScreen.py | f1rpo/Civ4CE | ba64c3545b479887739ad0ff78605b51b6fa57f9 | [
"CNRI-Python"
] | null | null | null | ## Sid Meier's Civilization 4
## Copyright Firaxis Games 2005
from CvPythonExtensions import *
import PyHelpers
import CvUtil
import ScreenInput
import CvScreenEnums
PyPlayer = PyHelpers.PyPlayer
# globals
gc = CyGlobalContext()
ArtFileMgr = CyArtFileMgr()
localText = CyTranslator()
class CvReligionScreen:
"Religion Advisor Screen"
def __init__(self):
self.SCREEN_NAME = "ReligionScreen"
self.BUTTON_NAME = "ReligionScreenButton"
self.RELIGION_NAME = "ReligionText"
self.CONVERT_NAME = "ReligionConvertButton"
self.CANCEL_NAME = "ReligionCancelButton"
self.CITY_NAME = "ReligionCity"
self.HEADER_NAME = "ReligionScreenHeader"
self.DEBUG_DROPDOWN_ID = "ReligionDropdownWidget"
self.AREA1_ID = "ReligionAreaWidget1"
self.AREA2_ID = "ReligionAreaWidget2"
self.BACKGROUND_ID = "ReligionBackground"
self.RELIGION_PANEL_ID = "ReligionPanel"
self.RELIGION_ANARCHY_WIDGET = "ReligionAnarchyWidget"
self.BORDER_WIDTH = 2
self.BUTTON_SIZE = 48
self.HIGHLIGHT_EXTRA_SIZE = 4
self.X_SCREEN = 500
self.Y_SCREEN = 396
self.W_SCREEN = 1024
self.H_SCREEN = 768
self.Z_SCREEN = -6.1
self.Y_TITLE = 8
self.Z_TEXT = self.Z_SCREEN - 0.2
self.DZ = -0.2
self.Z_CONTROLS = self.Z_TEXT
self.X_EXIT = 994
self.Y_EXIT = 726
self.X_CANCEL = 552
self.Y_CANCEL = 726
self.X_ANARCHY = 21
self.Y_ANARCHY = 726
self.LEFT_EDGE_TEXT = 10
self.X_RELIGION_START = 180
self.DX_RELIGION = 98
self.Y_RELIGION = 35
self.Y_FOUNDED = 90
self.Y_HOLY_CITY = 115
self.Y_INFLUENCE = 140
self.Y_RELIGION_NAME = 58
self.X_RELIGION_AREA = 45
self.Y_RELIGION_AREA = 84
self.W_RELIGION_AREA = 934
self.H_RELIGION_AREA = 175
self.X_CITY1_AREA = 45
self.X_CITY2_AREA = 522
self.Y_CITY_AREA = 282
self.W_CITY_AREA = 457
self.H_CITY_AREA = 395
self.X_CITY = 10
self.DY_CITY = 38
self.iReligionExamined = -1
self.iReligionSelected = -1
self.iReligionOriginal = -1
self.iActivePlayer = -1
self.bScreenUp = False
self.ReligionScreenInputMap = {
self.RELIGION_NAME : self.ReligionScreenButton,
self.BUTTON_NAME : self.ReligionScreenButton,
self.CONVERT_NAME : self.ReligionConvert,
self.CANCEL_NAME : self.ReligionCancel,
}
def getScreen(self):
return CyGInterfaceScreen(self.SCREEN_NAME, CvScreenEnums.RELIGION_SCREEN)
def interfaceScreen (self):
self.SCREEN_ART = ArtFileMgr.getInterfaceArtInfo("TECH_BG").getPath()
self.NO_STATE_BUTTON_ART = ArtFileMgr.getInterfaceArtInfo("INTERFACE_BUTTONS_CANCEL").getPath()
self.EXIT_TEXT = u"<font=4>" + localText.getText("TXT_KEY_PEDIA_SCREEN_EXIT", ()).upper() + "</font>"
self.CONVERT_TEXT = u"<font=4>" + localText.getText("TXT_KEY_RELIGION_CONVERT", ()).upper() + "</font>"
self.CANCEL_TEXT = u"<font=4>" + localText.getText("TXT_KEY_SCREEN_CANCEL", ()).upper() + "</font>"
self.iActivePlayer = gc.getGame().getActivePlayer()
self.bScreenUp = True
screen = self.getScreen()
if screen.isActive():
return
screen.setRenderInterfaceOnly(True);
screen.showScreen( PopupStates.POPUPSTATE_IMMEDIATE, False)
# Set the background and exit button, and show the screen
screen.setDimensions(screen.centerX(0), screen.centerY(0), self.W_SCREEN, self.H_SCREEN)
screen.addDDSGFC(self.BACKGROUND_ID, ArtFileMgr.getInterfaceArtInfo("MAINMENU_SLIDESHOW_LOAD").getPath(), 0, 0, self.W_SCREEN, self.H_SCREEN, WidgetTypes.WIDGET_GENERAL, -1, -1 )
screen.addPanel( "TechTopPanel", u"", u"", True, False, 0, 0, self.W_SCREEN, 55, PanelStyles.PANEL_STYLE_TOPBAR )
screen.addPanel( "TechBottomPanel", u"", u"", True, False, 0, 713, self.W_SCREEN, 55, PanelStyles.PANEL_STYLE_BOTTOMBAR )
screen.setText(self.CANCEL_NAME, "Background", self.CANCEL_TEXT, CvUtil.FONT_CENTER_JUSTIFY, self.X_CANCEL, self.Y_CANCEL, self.Z_TEXT, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, 1, 0)
screen.showWindowBackground(False)
# Header...
screen.setLabel(self.HEADER_NAME, "Background", u"<font=4b>" + localText.getText("TXT_KEY_RELIGION_SCREEN_TITLE", ()).upper() + u"</font>", CvUtil.FONT_CENTER_JUSTIFY, self.X_SCREEN, self.Y_TITLE, self.Z_TEXT, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Make the scrollable areas for the city list...
if (CyGame().isDebugMode()):
self.szDropdownName = self.DEBUG_DROPDOWN_ID
screen.addDropDownBoxGFC(self.szDropdownName, 22, 12, 300, WidgetTypes.WIDGET_GENERAL, -1, -1, FontTypes.GAME_FONT)
for j in range(gc.getMAX_PLAYERS()):
if (gc.getPlayer(j).isAlive()):
screen.addPullDownString(self.szDropdownName, gc.getPlayer(j).getName(), j, j, False )
# Draw Religion info
self.drawReligionInfo()
self.drawCityInfo(self.iReligionSelected)
# Draws the religion buttons and information
def drawReligionInfo(self):
screen = self.getScreen()
# Put everything on a scrollable area
szArea = self.RELIGION_PANEL_ID
screen.addPanel(szArea, "", "", False, True, self.X_RELIGION_AREA, self.Y_RELIGION_AREA, self.W_RELIGION_AREA, self.H_RELIGION_AREA, PanelStyles.PANEL_STYLE_MAIN)
# Religion buttons at the top
xLoop = self.X_RELIGION_START
for i in range(gc.getNumReligionInfos()):
szButtonName = self.getReligionButtonName(i)
if gc.getGame().getReligionGameTurnFounded(i) >= 0:
screen.addCheckBoxGFC(szButtonName, gc.getReligionInfo(i).getButton(), ArtFileMgr.getInterfaceArtInfo("BUTTON_HILITE_SQUARE").getPath(), self.X_RELIGION_AREA + xLoop - self.BUTTON_SIZE/2, self.Y_RELIGION_AREA + self.Y_RELIGION - self.BUTTON_SIZE/2, self.BUTTON_SIZE, self.BUTTON_SIZE, WidgetTypes.WIDGET_GENERAL, -1, -1, ButtonStyles.BUTTON_STYLE_LABEL)
else:
screen.setImageButton(szButtonName, gc.getReligionInfo(i).getButtonDisabled(), self.X_RELIGION_AREA + xLoop - self.BUTTON_SIZE/2, self.Y_RELIGION_AREA + self.Y_RELIGION - self.BUTTON_SIZE/2, self.BUTTON_SIZE, self.BUTTON_SIZE, WidgetTypes.WIDGET_GENERAL, -1, -1)
szName = self.getReligionTextName(i)
szLabel = gc.getReligionInfo(i).getDescription()
# if (self.iReligionSelected == i):
# szLabel = localText.changeTextColor(szLabel, gc.getInfoTypeForString("COLOR_YELLOW"))
screen.setText(szName, szArea, szLabel, CvUtil.FONT_CENTER_JUSTIFY, xLoop + self.X_RELIGION_AREA, self.Y_RELIGION_AREA + self.Y_RELIGION_NAME, 2*self.DZ, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop += self.DX_RELIGION
szButtonName = self.getReligionButtonName(gc.getNumReligionInfos())
screen.addCheckBoxGFC(szButtonName, self.NO_STATE_BUTTON_ART, ArtFileMgr.getInterfaceArtInfo("BUTTON_HILITE_SQUARE").getPath(), self.X_RELIGION_AREA + xLoop - self.BUTTON_SIZE/2, self.Y_RELIGION_AREA + self.Y_RELIGION - self.BUTTON_SIZE/2, self.BUTTON_SIZE, self.BUTTON_SIZE, WidgetTypes.WIDGET_GENERAL, -1, -1, ButtonStyles.BUTTON_STYLE_LABEL)
szName = self.getReligionTextName(gc.getNumReligionInfos())
szLabel = localText.getText("TXT_KEY_RELIGION_SCREEN_NO_STATE", ())
# if (self.iReligionSelected == gc.getNumReligionInfos()):
# szLabel = localText.changeTextColor(szLabel, gc.getInfoTypeForString("COLOR_YELLOW"))
screen.setText(szName, szArea, szLabel, CvUtil.FONT_CENTER_JUSTIFY, xLoop + self.X_RELIGION_AREA, self.Y_RELIGION_AREA + self.Y_RELIGION_NAME, 2*self.DZ, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Founded...
screen.setLabelAt("", szArea, localText.getText("TXT_KEY_RELIGION_SCREEN_DATE_FOUNDED", ()), CvUtil.FONT_LEFT_JUSTIFY, self.LEFT_EDGE_TEXT, self.Y_FOUNDED, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Date Founded:
xLoop = self.X_RELIGION_START
for i in range(gc.getNumReligionInfos()):
if (gc.getGame().getReligionGameTurnFounded(i) < 0):
szFounded = localText.getText("TXT_KEY_RELIGION_SCREEN_NOT_FOUNDED", ())
else:
szFounded = CyGameTextMgr().getTimeStr(gc.getGame().getReligionGameTurnFounded(i), false)
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_FOUNDED, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop += self.DX_RELIGION
screen.setLabelAt("", szArea, "", CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_FOUNDED, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Holy City...
screen.setLabelAt("", szArea, localText.getText("TXT_KEY_RELIGION_SCREEN_HOLY_CITY", ()), CvUtil.FONT_LEFT_JUSTIFY, self.LEFT_EDGE_TEXT, self.Y_HOLY_CITY, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop = self.X_RELIGION_START
for i in range(gc.getNumReligionInfos()):
pHolyCity = gc.getGame().getHolyCity(i)
if pHolyCity.isNone():
szFounded = localText.getText("TXT_KEY_NONE", ())
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_HOLY_CITY, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
elif not pHolyCity.isRevealed(gc.getPlayer(self.iActivePlayer).getTeam(), False):
szFounded = localText.getText("TXT_KEY_UNKNOWN", ())
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_HOLY_CITY, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
else:
szFounded = pHolyCity.getName()
screen.setLabelAt("", szArea, "(%s)" % gc.getPlayer(pHolyCity.getOwner()).getCivilizationAdjective(0), CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_HOLY_CITY+8, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_HOLY_CITY-8, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop += self.DX_RELIGION
szFounded = "-"
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_HOLY_CITY, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Influence...
screen.setLabelAt("", szArea, localText.getText("TXT_KEY_RELIGION_SCREEN_INFLUENCE", ()), CvUtil.FONT_LEFT_JUSTIFY, self.LEFT_EDGE_TEXT, self.Y_INFLUENCE, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop = self.X_RELIGION_START
for i in range(gc.getNumReligionInfos()):
if (gc.getGame().getReligionGameTurnFounded(i) < 0):
szFounded = "0%"
else:
szFounded = str(gc.getGame().calculateReligionPercent(i)) + "%"
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_INFLUENCE, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop += self.DX_RELIGION
szFounded = "-"
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_INFLUENCE, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
self.iReligionSelected = gc.getPlayer(self.iActivePlayer).getStateReligion()
if (self.iReligionSelected == -1):
self.iReligionSelected = gc.getNumReligionInfos()
self.iReligionExamined = self.iReligionSelected
self.iReligionOriginal = self.iReligionSelected
# Draws the city list
def drawCityInfo(self, iReligion):
if (not self.bScreenUp):
return
screen = self.getScreen()
if (iReligion == gc.getNumReligionInfos()):
iLinkReligion = -1
else:
iLinkReligion = iReligion
szArea1 = self.AREA1_ID
screen.addPanel(self.AREA1_ID, "", "", True, True, self.X_CITY1_AREA, self.Y_CITY_AREA, self.W_CITY_AREA, self.H_CITY_AREA, PanelStyles.PANEL_STYLE_MAIN)
szArea2 = self.AREA2_ID
screen.addPanel(self.AREA2_ID, "", "", True, True, self.X_CITY2_AREA, self.Y_CITY_AREA, self.W_CITY_AREA, self.H_CITY_AREA, PanelStyles.PANEL_STYLE_MAIN)
szArea = self.RELIGION_PANEL_ID
for i in range(gc.getNumReligionInfos()):
if (self.iReligionSelected == i):
screen.setState(self.getReligionButtonName(i), True)
else:
screen.setState(self.getReligionButtonName(i), False)
if (self.iReligionSelected == gc.getNumReligionInfos()):
screen.setState(self.getReligionButtonName(gc.getNumReligionInfos()), True)
else:
screen.setState(self.getReligionButtonName(gc.getNumReligionInfos()), False)
iPlayer = PyPlayer(self.iActivePlayer)
cityList = iPlayer.getCityList()
# Loop through the cities
szLeftCities = u""
szRightCities = u""
for i in range(len(cityList)):
bFirstColumn = (i % 2 == 0)
pLoopCity = cityList[i]
# Constructing the City name...
szCityName = u""
if pLoopCity.isCapital():
szCityName += u"%c" % CyGame().getSymbolID(FontSymbols.STAR_CHAR)
lHolyCity = pLoopCity.getHolyCity()
if lHolyCity:
for iI in range(len(lHolyCity)):
szCityName += u"%c" %(gc.getReligionInfo(lHolyCity[iI]).getHolyCityChar())
lReligions = pLoopCity.getReligions()
if lReligions:
for iI in range(len(lReligions)):
if lReligions[iI] not in lHolyCity:
szCityName += u"%c" %(gc.getReligionInfo(lReligions[iI]).getChar())
szCityName += pLoopCity.getName()[0:17] + " "
if (iLinkReligion == -1):
bFirst = True
for iI in range(len(lReligions)):
szTempBuffer = CyGameTextMgr().getReligionHelpCity(lReligions[iI], pLoopCity.GetCy(), False, False, False, True)
if (szTempBuffer):
if (not bFirst):
szCityName += u", "
szCityName += szTempBuffer
bFirst = False
else:
szCityName += CyGameTextMgr().getReligionHelpCity(iLinkReligion, pLoopCity.GetCy(), False, False, True, False)
if bFirstColumn:
szLeftCities += u"<font=3>" + szCityName + u"</font>\n"
else:
szRightCities += u"<font=3>" + szCityName + u"</font>\n"
screen.addMultilineText("Child" + self.AREA1_ID, szLeftCities, self.X_CITY1_AREA+5, self.Y_CITY_AREA+5, self.W_CITY_AREA-10, self.H_CITY_AREA-10, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
screen.addMultilineText("Child" + self.AREA2_ID, szRightCities, self.X_CITY2_AREA+5, self.Y_CITY_AREA+5, self.W_CITY_AREA-10, self.H_CITY_AREA-10, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
# Convert Button....
iLink = 0
if (gc.getPlayer(self.iActivePlayer).canChangeReligion()):
iLink = 1
if (not self.canConvert(iLinkReligion) or iLinkReligion == self.iReligionOriginal):
screen.setText(self.CONVERT_NAME, "Background", self.EXIT_TEXT, CvUtil.FONT_RIGHT_JUSTIFY, self.X_EXIT, self.Y_EXIT, self.Z_TEXT, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, 1, 0)
screen.hide(self.CANCEL_NAME)
szAnarchyTime = CyGameTextMgr().setConvertHelp(self.iActivePlayer, iLinkReligion)
else:
screen.setText(self.CONVERT_NAME, "Background", self.CONVERT_TEXT, CvUtil.FONT_RIGHT_JUSTIFY, self.X_EXIT, self.Y_EXIT, self.Z_TEXT, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_CONVERT, iLinkReligion, 1)
screen.show(self.CANCEL_NAME)
szAnarchyTime = localText.getText("TXT_KEY_ANARCHY_TURNS", (gc.getPlayer(self.iActivePlayer).getReligionAnarchyLength(), ))
# Turns of Anarchy Text...
screen.setLabel(self.RELIGION_ANARCHY_WIDGET, "Background", u"<font=3>" + szAnarchyTime + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, self.X_ANARCHY, self.Y_ANARCHY, self.Z_TEXT, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
def getReligionButtonName(self, iReligion):
szName = self.BUTTON_NAME + str(iReligion)
return szName
def getReligionTextName(self, iReligion):
szName = self.RELIGION_NAME + str(iReligion)
return szName
def canConvert(self, iReligion):
iCurrentReligion = gc.getPlayer(self.iActivePlayer).getStateReligion()
if (iReligion == gc.getNumReligionInfos()):
iConvertReligion = -1
else:
iConvertReligion = iReligion
return (iConvertReligion != iCurrentReligion and gc.getPlayer(self.iActivePlayer).canConvert(iConvertReligion))
# Will handle the input for this screen...
def handleInput (self, inputClass):
if (inputClass.getNotifyCode() == NotifyCode.NOTIFY_LISTBOX_ITEM_SELECTED):
screen = self.getScreen()
iIndex = screen.getSelectedPullDownID(self.DEBUG_DROPDOWN_ID)
self.iActivePlayer = screen.getPullDownData(self.DEBUG_DROPDOWN_ID, iIndex)
self.drawReligionInfo()
self.drawCityInfo(self.iReligionSelected)
return 1
elif (self.ReligionScreenInputMap.has_key(inputClass.getFunctionName())):
'Calls function mapped in ReligionScreenInputMap'
# only get from the map if it has the key
# get bound function from map and call it
self.ReligionScreenInputMap.get(inputClass.getFunctionName())(inputClass)
return 1
return 0
def update(self, fDelta):
return
# Religion Button
def ReligionScreenButton( self, inputClass ):
if ( inputClass.getNotifyCode() == NotifyCode.NOTIFY_CLICKED ) :
if (inputClass.getID() == gc.getNumReligionInfos() or gc.getGame().getReligionGameTurnFounded(inputClass.getID()) >= 0) :
self.iReligionSelected = inputClass.getID()
self.iReligionExamined = self.iReligionSelected
self.drawCityInfo(self.iReligionSelected)
elif ( inputClass.getNotifyCode() == NotifyCode.NOTIFY_CURSOR_MOVE_ON ) :
if ( inputClass.getID() == gc.getNumReligionInfos() or gc.getGame().getReligionGameTurnFounded(inputClass.getID()) >= 0) :
self.iReligionExamined = inputClass.getID()
self.drawCityInfo(self.iReligionExamined)
elif ( inputClass.getNotifyCode() == NotifyCode.NOTIFY_CURSOR_MOVE_OFF ) :
self.iReligionExamined = self.iReligionSelected
self.drawCityInfo(self.iReligionSelected)
return 0
def ReligionConvert(self, inputClass):
screen = self.getScreen()
if (inputClass.getNotifyCode() == NotifyCode.NOTIFY_CLICKED) :
screen.hideScreen()
def ReligionCancel(self, inputClass):
screen = self.getScreen()
if (inputClass.getNotifyCode() == NotifyCode.NOTIFY_CLICKED) :
self.iReligionSelected = self.iReligionOriginal
if (-1 == self.iReligionSelected):
self.iReligionSelected = gc.getNumReligionInfos()
self.drawCityInfo(self.iReligionSelected)
| 45.176617 | 358 | 0.729035 | 005
from CvPythonExtensions import *
import PyHelpers
import CvUtil
import ScreenInput
import CvScreenEnums
PyPlayer = PyHelpers.PyPlayer
# globals
gc = CyGlobalContext()
ArtFileMgr = CyArtFileMgr()
localText = CyTranslator()
class CvReligionScreen:
def __init__(self):
self.SCREEN_NAME = "ReligionScreen"
self.BUTTON_NAME = "ReligionScreenButton"
self.RELIGION_NAME = "ReligionText"
self.CONVERT_NAME = "ReligionConvertButton"
self.CANCEL_NAME = "ReligionCancelButton"
self.CITY_NAME = "ReligionCity"
self.HEADER_NAME = "ReligionScreenHeader"
self.DEBUG_DROPDOWN_ID = "ReligionDropdownWidget"
self.AREA1_ID = "ReligionAreaWidget1"
self.AREA2_ID = "ReligionAreaWidget2"
self.BACKGROUND_ID = "ReligionBackground"
self.RELIGION_PANEL_ID = "ReligionPanel"
self.RELIGION_ANARCHY_WIDGET = "ReligionAnarchyWidget"
self.BORDER_WIDTH = 2
self.BUTTON_SIZE = 48
self.HIGHLIGHT_EXTRA_SIZE = 4
self.X_SCREEN = 500
self.Y_SCREEN = 396
self.W_SCREEN = 1024
self.H_SCREEN = 768
self.Z_SCREEN = -6.1
self.Y_TITLE = 8
self.Z_TEXT = self.Z_SCREEN - 0.2
self.DZ = -0.2
self.Z_CONTROLS = self.Z_TEXT
self.X_EXIT = 994
self.Y_EXIT = 726
self.X_CANCEL = 552
self.Y_CANCEL = 726
self.X_ANARCHY = 21
self.Y_ANARCHY = 726
self.LEFT_EDGE_TEXT = 10
self.X_RELIGION_START = 180
self.DX_RELIGION = 98
self.Y_RELIGION = 35
self.Y_FOUNDED = 90
self.Y_HOLY_CITY = 115
self.Y_INFLUENCE = 140
self.Y_RELIGION_NAME = 58
self.X_RELIGION_AREA = 45
self.Y_RELIGION_AREA = 84
self.W_RELIGION_AREA = 934
self.H_RELIGION_AREA = 175
self.X_CITY1_AREA = 45
self.X_CITY2_AREA = 522
self.Y_CITY_AREA = 282
self.W_CITY_AREA = 457
self.H_CITY_AREA = 395
self.X_CITY = 10
self.DY_CITY = 38
self.iReligionExamined = -1
self.iReligionSelected = -1
self.iReligionOriginal = -1
self.iActivePlayer = -1
self.bScreenUp = False
self.ReligionScreenInputMap = {
self.RELIGION_NAME : self.ReligionScreenButton,
self.BUTTON_NAME : self.ReligionScreenButton,
self.CONVERT_NAME : self.ReligionConvert,
self.CANCEL_NAME : self.ReligionCancel,
}
def getScreen(self):
return CyGInterfaceScreen(self.SCREEN_NAME, CvScreenEnums.RELIGION_SCREEN)
def interfaceScreen (self):
self.SCREEN_ART = ArtFileMgr.getInterfaceArtInfo("TECH_BG").getPath()
self.NO_STATE_BUTTON_ART = ArtFileMgr.getInterfaceArtInfo("INTERFACE_BUTTONS_CANCEL").getPath()
self.EXIT_TEXT = u"<font=4>" + localText.getText("TXT_KEY_PEDIA_SCREEN_EXIT", ()).upper() + "</font>"
self.CONVERT_TEXT = u"<font=4>" + localText.getText("TXT_KEY_RELIGION_CONVERT", ()).upper() + "</font>"
self.CANCEL_TEXT = u"<font=4>" + localText.getText("TXT_KEY_SCREEN_CANCEL", ()).upper() + "</font>"
self.iActivePlayer = gc.getGame().getActivePlayer()
self.bScreenUp = True
screen = self.getScreen()
if screen.isActive():
return
screen.setRenderInterfaceOnly(True);
screen.showScreen( PopupStates.POPUPSTATE_IMMEDIATE, False)
# Set the background and exit button, and show the screen
screen.setDimensions(screen.centerX(0), screen.centerY(0), self.W_SCREEN, self.H_SCREEN)
screen.addDDSGFC(self.BACKGROUND_ID, ArtFileMgr.getInterfaceArtInfo("MAINMENU_SLIDESHOW_LOAD").getPath(), 0, 0, self.W_SCREEN, self.H_SCREEN, WidgetTypes.WIDGET_GENERAL, -1, -1 )
screen.addPanel( "TechTopPanel", u"", u"", True, False, 0, 0, self.W_SCREEN, 55, PanelStyles.PANEL_STYLE_TOPBAR )
screen.addPanel( "TechBottomPanel", u"", u"", True, False, 0, 713, self.W_SCREEN, 55, PanelStyles.PANEL_STYLE_BOTTOMBAR )
screen.setText(self.CANCEL_NAME, "Background", self.CANCEL_TEXT, CvUtil.FONT_CENTER_JUSTIFY, self.X_CANCEL, self.Y_CANCEL, self.Z_TEXT, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, 1, 0)
screen.showWindowBackground(False)
# Header...
screen.setLabel(self.HEADER_NAME, "Background", u"<font=4b>" + localText.getText("TXT_KEY_RELIGION_SCREEN_TITLE", ()).upper() + u"</font>", CvUtil.FONT_CENTER_JUSTIFY, self.X_SCREEN, self.Y_TITLE, self.Z_TEXT, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Make the scrollable areas for the city list...
if (CyGame().isDebugMode()):
self.szDropdownName = self.DEBUG_DROPDOWN_ID
screen.addDropDownBoxGFC(self.szDropdownName, 22, 12, 300, WidgetTypes.WIDGET_GENERAL, -1, -1, FontTypes.GAME_FONT)
for j in range(gc.getMAX_PLAYERS()):
if (gc.getPlayer(j).isAlive()):
screen.addPullDownString(self.szDropdownName, gc.getPlayer(j).getName(), j, j, False )
# Draw Religion info
self.drawReligionInfo()
self.drawCityInfo(self.iReligionSelected)
# Draws the religion buttons and information
def drawReligionInfo(self):
screen = self.getScreen()
# Put everything on a scrollable area
szArea = self.RELIGION_PANEL_ID
screen.addPanel(szArea, "", "", False, True, self.X_RELIGION_AREA, self.Y_RELIGION_AREA, self.W_RELIGION_AREA, self.H_RELIGION_AREA, PanelStyles.PANEL_STYLE_MAIN)
# Religion buttons at the top
xLoop = self.X_RELIGION_START
for i in range(gc.getNumReligionInfos()):
szButtonName = self.getReligionButtonName(i)
if gc.getGame().getReligionGameTurnFounded(i) >= 0:
screen.addCheckBoxGFC(szButtonName, gc.getReligionInfo(i).getButton(), ArtFileMgr.getInterfaceArtInfo("BUTTON_HILITE_SQUARE").getPath(), self.X_RELIGION_AREA + xLoop - self.BUTTON_SIZE/2, self.Y_RELIGION_AREA + self.Y_RELIGION - self.BUTTON_SIZE/2, self.BUTTON_SIZE, self.BUTTON_SIZE, WidgetTypes.WIDGET_GENERAL, -1, -1, ButtonStyles.BUTTON_STYLE_LABEL)
else:
screen.setImageButton(szButtonName, gc.getReligionInfo(i).getButtonDisabled(), self.X_RELIGION_AREA + xLoop - self.BUTTON_SIZE/2, self.Y_RELIGION_AREA + self.Y_RELIGION - self.BUTTON_SIZE/2, self.BUTTON_SIZE, self.BUTTON_SIZE, WidgetTypes.WIDGET_GENERAL, -1, -1)
szName = self.getReligionTextName(i)
szLabel = gc.getReligionInfo(i).getDescription()
# if (self.iReligionSelected == i):
# szLabel = localText.changeTextColor(szLabel, gc.getInfoTypeForString("COLOR_YELLOW"))
screen.setText(szName, szArea, szLabel, CvUtil.FONT_CENTER_JUSTIFY, xLoop + self.X_RELIGION_AREA, self.Y_RELIGION_AREA + self.Y_RELIGION_NAME, 2*self.DZ, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop += self.DX_RELIGION
szButtonName = self.getReligionButtonName(gc.getNumReligionInfos())
screen.addCheckBoxGFC(szButtonName, self.NO_STATE_BUTTON_ART, ArtFileMgr.getInterfaceArtInfo("BUTTON_HILITE_SQUARE").getPath(), self.X_RELIGION_AREA + xLoop - self.BUTTON_SIZE/2, self.Y_RELIGION_AREA + self.Y_RELIGION - self.BUTTON_SIZE/2, self.BUTTON_SIZE, self.BUTTON_SIZE, WidgetTypes.WIDGET_GENERAL, -1, -1, ButtonStyles.BUTTON_STYLE_LABEL)
szName = self.getReligionTextName(gc.getNumReligionInfos())
szLabel = localText.getText("TXT_KEY_RELIGION_SCREEN_NO_STATE", ())
# if (self.iReligionSelected == gc.getNumReligionInfos()):
# szLabel = localText.changeTextColor(szLabel, gc.getInfoTypeForString("COLOR_YELLOW"))
screen.setText(szName, szArea, szLabel, CvUtil.FONT_CENTER_JUSTIFY, xLoop + self.X_RELIGION_AREA, self.Y_RELIGION_AREA + self.Y_RELIGION_NAME, 2*self.DZ, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Founded...
screen.setLabelAt("", szArea, localText.getText("TXT_KEY_RELIGION_SCREEN_DATE_FOUNDED", ()), CvUtil.FONT_LEFT_JUSTIFY, self.LEFT_EDGE_TEXT, self.Y_FOUNDED, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Date Founded:
xLoop = self.X_RELIGION_START
for i in range(gc.getNumReligionInfos()):
if (gc.getGame().getReligionGameTurnFounded(i) < 0):
szFounded = localText.getText("TXT_KEY_RELIGION_SCREEN_NOT_FOUNDED", ())
else:
szFounded = CyGameTextMgr().getTimeStr(gc.getGame().getReligionGameTurnFounded(i), false)
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_FOUNDED, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop += self.DX_RELIGION
screen.setLabelAt("", szArea, "", CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_FOUNDED, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Holy City...
screen.setLabelAt("", szArea, localText.getText("TXT_KEY_RELIGION_SCREEN_HOLY_CITY", ()), CvUtil.FONT_LEFT_JUSTIFY, self.LEFT_EDGE_TEXT, self.Y_HOLY_CITY, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop = self.X_RELIGION_START
for i in range(gc.getNumReligionInfos()):
pHolyCity = gc.getGame().getHolyCity(i)
if pHolyCity.isNone():
szFounded = localText.getText("TXT_KEY_NONE", ())
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_HOLY_CITY, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
elif not pHolyCity.isRevealed(gc.getPlayer(self.iActivePlayer).getTeam(), False):
szFounded = localText.getText("TXT_KEY_UNKNOWN", ())
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_HOLY_CITY, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
else:
szFounded = pHolyCity.getName()
screen.setLabelAt("", szArea, "(%s)" % gc.getPlayer(pHolyCity.getOwner()).getCivilizationAdjective(0), CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_HOLY_CITY+8, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_HOLY_CITY-8, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop += self.DX_RELIGION
szFounded = "-"
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_HOLY_CITY, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Influence...
screen.setLabelAt("", szArea, localText.getText("TXT_KEY_RELIGION_SCREEN_INFLUENCE", ()), CvUtil.FONT_LEFT_JUSTIFY, self.LEFT_EDGE_TEXT, self.Y_INFLUENCE, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop = self.X_RELIGION_START
for i in range(gc.getNumReligionInfos()):
if (gc.getGame().getReligionGameTurnFounded(i) < 0):
szFounded = "0%"
else:
szFounded = str(gc.getGame().calculateReligionPercent(i)) + "%"
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_INFLUENCE, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
xLoop += self.DX_RELIGION
szFounded = "-"
screen.setLabelAt("", szArea, szFounded, CvUtil.FONT_CENTER_JUSTIFY, xLoop, self.Y_INFLUENCE, self.DZ, FontTypes.SMALL_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
self.iReligionSelected = gc.getPlayer(self.iActivePlayer).getStateReligion()
if (self.iReligionSelected == -1):
self.iReligionSelected = gc.getNumReligionInfos()
self.iReligionExamined = self.iReligionSelected
self.iReligionOriginal = self.iReligionSelected
# Draws the city list
def drawCityInfo(self, iReligion):
if (not self.bScreenUp):
return
screen = self.getScreen()
if (iReligion == gc.getNumReligionInfos()):
iLinkReligion = -1
else:
iLinkReligion = iReligion
szArea1 = self.AREA1_ID
screen.addPanel(self.AREA1_ID, "", "", True, True, self.X_CITY1_AREA, self.Y_CITY_AREA, self.W_CITY_AREA, self.H_CITY_AREA, PanelStyles.PANEL_STYLE_MAIN)
szArea2 = self.AREA2_ID
screen.addPanel(self.AREA2_ID, "", "", True, True, self.X_CITY2_AREA, self.Y_CITY_AREA, self.W_CITY_AREA, self.H_CITY_AREA, PanelStyles.PANEL_STYLE_MAIN)
szArea = self.RELIGION_PANEL_ID
for i in range(gc.getNumReligionInfos()):
if (self.iReligionSelected == i):
screen.setState(self.getReligionButtonName(i), True)
else:
screen.setState(self.getReligionButtonName(i), False)
if (self.iReligionSelected == gc.getNumReligionInfos()):
screen.setState(self.getReligionButtonName(gc.getNumReligionInfos()), True)
else:
screen.setState(self.getReligionButtonName(gc.getNumReligionInfos()), False)
iPlayer = PyPlayer(self.iActivePlayer)
cityList = iPlayer.getCityList()
# Loop through the cities
szLeftCities = u""
szRightCities = u""
for i in range(len(cityList)):
bFirstColumn = (i % 2 == 0)
pLoopCity = cityList[i]
# Constructing the City name...
szCityName = u""
if pLoopCity.isCapital():
szCityName += u"%c" % CyGame().getSymbolID(FontSymbols.STAR_CHAR)
lHolyCity = pLoopCity.getHolyCity()
if lHolyCity:
for iI in range(len(lHolyCity)):
szCityName += u"%c" %(gc.getReligionInfo(lHolyCity[iI]).getHolyCityChar())
lReligions = pLoopCity.getReligions()
if lReligions:
for iI in range(len(lReligions)):
if lReligions[iI] not in lHolyCity:
szCityName += u"%c" %(gc.getReligionInfo(lReligions[iI]).getChar())
szCityName += pLoopCity.getName()[0:17] + " "
if (iLinkReligion == -1):
bFirst = True
for iI in range(len(lReligions)):
szTempBuffer = CyGameTextMgr().getReligionHelpCity(lReligions[iI], pLoopCity.GetCy(), False, False, False, True)
if (szTempBuffer):
if (not bFirst):
szCityName += u", "
szCityName += szTempBuffer
bFirst = False
else:
szCityName += CyGameTextMgr().getReligionHelpCity(iLinkReligion, pLoopCity.GetCy(), False, False, True, False)
if bFirstColumn:
szLeftCities += u"<font=3>" + szCityName + u"</font>\n"
else:
szRightCities += u"<font=3>" + szCityName + u"</font>\n"
screen.addMultilineText("Child" + self.AREA1_ID, szLeftCities, self.X_CITY1_AREA+5, self.Y_CITY_AREA+5, self.W_CITY_AREA-10, self.H_CITY_AREA-10, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
screen.addMultilineText("Child" + self.AREA2_ID, szRightCities, self.X_CITY2_AREA+5, self.Y_CITY_AREA+5, self.W_CITY_AREA-10, self.H_CITY_AREA-10, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
# Convert Button....
iLink = 0
if (gc.getPlayer(self.iActivePlayer).canChangeReligion()):
iLink = 1
if (not self.canConvert(iLinkReligion) or iLinkReligion == self.iReligionOriginal):
screen.setText(self.CONVERT_NAME, "Background", self.EXIT_TEXT, CvUtil.FONT_RIGHT_JUSTIFY, self.X_EXIT, self.Y_EXIT, self.Z_TEXT, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, 1, 0)
screen.hide(self.CANCEL_NAME)
szAnarchyTime = CyGameTextMgr().setConvertHelp(self.iActivePlayer, iLinkReligion)
else:
screen.setText(self.CONVERT_NAME, "Background", self.CONVERT_TEXT, CvUtil.FONT_RIGHT_JUSTIFY, self.X_EXIT, self.Y_EXIT, self.Z_TEXT, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_CONVERT, iLinkReligion, 1)
screen.show(self.CANCEL_NAME)
szAnarchyTime = localText.getText("TXT_KEY_ANARCHY_TURNS", (gc.getPlayer(self.iActivePlayer).getReligionAnarchyLength(), ))
# Turns of Anarchy Text...
screen.setLabel(self.RELIGION_ANARCHY_WIDGET, "Background", u"<font=3>" + szAnarchyTime + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, self.X_ANARCHY, self.Y_ANARCHY, self.Z_TEXT, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
def getReligionButtonName(self, iReligion):
szName = self.BUTTON_NAME + str(iReligion)
return szName
def getReligionTextName(self, iReligion):
szName = self.RELIGION_NAME + str(iReligion)
return szName
def canConvert(self, iReligion):
iCurrentReligion = gc.getPlayer(self.iActivePlayer).getStateReligion()
if (iReligion == gc.getNumReligionInfos()):
iConvertReligion = -1
else:
iConvertReligion = iReligion
return (iConvertReligion != iCurrentReligion and gc.getPlayer(self.iActivePlayer).canConvert(iConvertReligion))
# Will handle the input for this screen...
def handleInput (self, inputClass):
if (inputClass.getNotifyCode() == NotifyCode.NOTIFY_LISTBOX_ITEM_SELECTED):
screen = self.getScreen()
iIndex = screen.getSelectedPullDownID(self.DEBUG_DROPDOWN_ID)
self.iActivePlayer = screen.getPullDownData(self.DEBUG_DROPDOWN_ID, iIndex)
self.drawReligionInfo()
self.drawCityInfo(self.iReligionSelected)
return 1
elif (self.ReligionScreenInputMap.has_key(inputClass.getFunctionName())):
'Calls function mapped in ReligionScreenInputMap'
# only get from the map if it has the key
# get bound function from map and call it
self.ReligionScreenInputMap.get(inputClass.getFunctionName())(inputClass)
return 1
return 0
def update(self, fDelta):
return
# Religion Button
def ReligionScreenButton( self, inputClass ):
if ( inputClass.getNotifyCode() == NotifyCode.NOTIFY_CLICKED ) :
if (inputClass.getID() == gc.getNumReligionInfos() or gc.getGame().getReligionGameTurnFounded(inputClass.getID()) >= 0) :
self.iReligionSelected = inputClass.getID()
self.iReligionExamined = self.iReligionSelected
self.drawCityInfo(self.iReligionSelected)
elif ( inputClass.getNotifyCode() == NotifyCode.NOTIFY_CURSOR_MOVE_ON ) :
if ( inputClass.getID() == gc.getNumReligionInfos() or gc.getGame().getReligionGameTurnFounded(inputClass.getID()) >= 0) :
self.iReligionExamined = inputClass.getID()
self.drawCityInfo(self.iReligionExamined)
elif ( inputClass.getNotifyCode() == NotifyCode.NOTIFY_CURSOR_MOVE_OFF ) :
self.iReligionExamined = self.iReligionSelected
self.drawCityInfo(self.iReligionSelected)
return 0
def ReligionConvert(self, inputClass):
screen = self.getScreen()
if (inputClass.getNotifyCode() == NotifyCode.NOTIFY_CLICKED) :
screen.hideScreen()
def ReligionCancel(self, inputClass):
screen = self.getScreen()
if (inputClass.getNotifyCode() == NotifyCode.NOTIFY_CLICKED) :
self.iReligionSelected = self.iReligionOriginal
if (-1 == self.iReligionSelected):
self.iReligionSelected = gc.getNumReligionInfos()
self.drawCityInfo(self.iReligionSelected)
| true | true |
1c34246ce637530967a2bee08f523a8a7a9369c3 | 320 | py | Python | fdk_client/platform/models/Closing.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/models/Closing.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/models/Closing.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | """Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class Closing(BaseSchema):
# Order swagger.json
hour = fields.Int(required=False)
minute = fields.Int(required=False)
| 14.545455 | 42 | 0.7 |
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class Closing(BaseSchema):
hour = fields.Int(required=False)
minute = fields.Int(required=False)
| true | true |
1c342489fb983dedab183c7013a31c7de3874fc7 | 456 | py | Python | thecut/pages/migrations/0003_auto_20160211_1514.py | exemplarysoftware/thecut-pagse | 94126622ec790721b0a05db0742bbc4c6733ba2c | [
"Apache-2.0"
] | null | null | null | thecut/pages/migrations/0003_auto_20160211_1514.py | exemplarysoftware/thecut-pagse | 94126622ec790721b0a05db0742bbc4c6733ba2c | [
"Apache-2.0"
] | null | null | null | thecut/pages/migrations/0003_auto_20160211_1514.py | exemplarysoftware/thecut-pagse | 94126622ec790721b0a05db0742bbc4c6733ba2c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0002_set_ondelete'),
]
operations = [
migrations.AlterField(
model_name='page',
name='url',
field=models.CharField(help_text='Example: /my-page', max_length=100, verbose_name='URL', db_index=True),
),
]
| 22.8 | 117 | 0.614035 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0002_set_ondelete'),
]
operations = [
migrations.AlterField(
model_name='page',
name='url',
field=models.CharField(help_text='Example: /my-page', max_length=100, verbose_name='URL', db_index=True),
),
]
| true | true |
1c3424aa4b61bdff7606f6972ad265b4e7b500d6 | 5,469 | py | Python | scripts/e271.py | JackKelly/neuralnilm_prototype | 2119292e7d5c8a137797ad3c9abf9f37e7f749af | [
"MIT"
] | 38 | 2015-08-14T14:38:52.000Z | 2021-12-15T03:21:04.000Z | scripts/e271.py | VidipG/neuralnilm_prototype | 2119292e7d5c8a137797ad3c9abf9f37e7f749af | [
"MIT"
] | null | null | null | scripts/e271.py | VidipG/neuralnilm_prototype | 2119292e7d5c8a137797ad3c9abf9f37e7f749af | [
"MIT"
] | 26 | 2015-09-24T20:55:26.000Z | 2021-12-07T15:42:09.000Z | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from neuralnilm.net import BidirectionalRecurrentLayer
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer, RecurrentLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
from math import sqrt
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
241
output is prediction
260
standardise inputs and outputs.
261
trying just 3 appliances. Standardisation
263
conv1d between layers
ideas for next TODO:
* 3 LSTM layers with smaller conv between them
* why does pooling hurt us?
"""
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
#'dish washer',
#['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.0,
n_seq_per_batch=50,
# subsample_target=5,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0
)
def change_learning_rate(net, epoch):
net.updates = partial(nesterov_momentum, learning_rate=0.01)
net.compile()
def change_subsample(net, epoch):
net.source.subsample_target = 5
net.generate_validation_data_and_set_shapes()
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.001),
do_save_activations=True
# epoch_callbacks={250: change_learning_rate}
)
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
source.subsample_target = 5
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': 25,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 5, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.mean
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 25,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(25)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(25)))
}
]
net = Net(**net_dict_copy)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
raise
if __name__ == "__main__":
main()
| 26.678049 | 109 | 0.674163 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg')
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from neuralnilm.net import BidirectionalRecurrentLayer
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer, RecurrentLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
from math import sqrt
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=50,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0
)
def change_learning_rate(net, epoch):
net.updates = partial(nesterov_momentum, learning_rate=0.01)
net.compile()
def change_subsample(net, epoch):
net.source.subsample_target = 5
net.generate_validation_data_and_set_shapes()
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.001),
do_save_activations=True
)
def exp_a(name):
global source
source.subsample_target = 5
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': 25,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 5,
'axis': 1,
'pool_function': T.mean
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 25,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(25)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(25)))
}
]
net = Net(**net_dict_copy)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
raise
if __name__ == "__main__":
main()
| true | true |
1c3425cee571a808d9c84539b9ebe5165cfce67e | 1,134 | py | Python | tests/test_set_attr.py | joinee0208/manim | 8e1407e7fd5c01258f75748cc947d31d67ffd92b | [
"MIT"
] | null | null | null | tests/test_set_attr.py | joinee0208/manim | 8e1407e7fd5c01258f75748cc947d31d67ffd92b | [
"MIT"
] | null | null | null | tests/test_set_attr.py | joinee0208/manim | 8e1407e7fd5c01258f75748cc947d31d67ffd92b | [
"MIT"
] | null | null | null | from __future__ import annotations
import numpy as np
from manim import config
from manim.constants import RIGHT
from manim.mobject.geometry import Square
def test_Data():
config.renderer = "opengl"
a = Square().move_to(RIGHT)
data_bb = a.data["bounding_box"]
assert np.array_equal(
data_bb,
np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [2.0, 1.0, 0.0]]),
)
# test that calling the attribute equals calling it from self.data
assert np.array_equal(a.bounding_box, data_bb)
# test that the array can be indexed
assert np.array_equal(
a.bounding_box[1],
np.array(
[1.0, 0.0, 0.0],
),
)
# test that a value can be set
a.bounding_box[1] = 300
# test that both the attr and self.data arrays match after adjusting a value
data_bb = a.data["bounding_box"]
assert np.array_equal(
data_bb,
np.array([[0.0, -1.0, 0.0], [300.0, 300.0, 300.0], [2.0, 1.0, 0.0]]),
)
assert np.array_equal(a.bounding_box, data_bb)
config.renderer = "cairo" # needs to be here or else the following cairo tests fail
| 26.372093 | 88 | 0.62522 | from __future__ import annotations
import numpy as np
from manim import config
from manim.constants import RIGHT
from manim.mobject.geometry import Square
def test_Data():
config.renderer = "opengl"
a = Square().move_to(RIGHT)
data_bb = a.data["bounding_box"]
assert np.array_equal(
data_bb,
np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [2.0, 1.0, 0.0]]),
)
assert np.array_equal(a.bounding_box, data_bb)
assert np.array_equal(
a.bounding_box[1],
np.array(
[1.0, 0.0, 0.0],
),
)
a.bounding_box[1] = 300
data_bb = a.data["bounding_box"]
assert np.array_equal(
data_bb,
np.array([[0.0, -1.0, 0.0], [300.0, 300.0, 300.0], [2.0, 1.0, 0.0]]),
)
assert np.array_equal(a.bounding_box, data_bb)
config.renderer = "cairo"
| true | true |
1c3426d04076f8ffbda91a465f8b7007e6e7c23c | 13,436 | py | Python | userbot/plugins/stickers.py | midhunkm1294-bit/TeleBot | b4309fb662e834d9d3826172b69fd07d42ef83a2 | [
"MIT"
] | null | null | null | userbot/plugins/stickers.py | midhunkm1294-bit/TeleBot | b4309fb662e834d9d3826172b69fd07d42ef83a2 | [
"MIT"
] | null | null | null | userbot/plugins/stickers.py | midhunkm1294-bit/TeleBot | b4309fb662e834d9d3826172b69fd07d42ef83a2 | [
"MIT"
] | null | null | null | """Make / Download Telegram Sticker Packs without installing Third Party applications
Available Commands:
.kangsticker [Optional Emoji]
.packinfo
.getsticker"""
from telethon import events
from io import BytesIO
from PIL import Image
import asyncio
import datetime
from collections import defaultdict
import math
import os
import requests
import zipfile
from telethon.errors.rpcerrorlist import StickersetInvalidError
from telethon.errors import MessageNotModifiedError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputMediaUploadedDocument,
InputPeerNotifySettings,
InputStickerSetID,
InputStickerSetShortName,
MessageMediaPhoto
)
from userbot.utils import admin_cmd
from userbot import ALIVE_NAME
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "No name set yet nibba, check pinned in @TeleBotHelp"
@borg.on(admin_cmd(pattern="kang ?(.*)"))
async def _(event):
if event.fwd_from:
return
if not event.is_reply:
await event.edit("Reply to a photo to add to my personal sticker pack..")
return
reply_message = await event.get_reply_message()
sticker_emoji = "🔥"
input_str = event.pattern_match.group(1)
if input_str:
sticker_emoji = input_str
me = borg.me
userid = event.from_id
packname = f"@TeleBotHelp kang by {userid}"
packshortname = f" My Master {userid} Awesome Pack By Friday " # format: Uni_Borg_userid
is_a_s = is_it_animated_sticker(reply_message)
file_ext_ns_ion = "@UniBorg_Sticker.png"
file = await borg.download_file(reply_message.media)
uploaded_sticker = None
if is_a_s:
file_ext_ns_ion = "AnimatedSticker.tgs"
uploaded_sticker = await borg.upload_file(file, file_name=file_ext_ns_ion)
packname = f"{userid}'s @AnimatedStickersGroup"
if userid == 719877937:
packshortname = "TeleBot_Animated"
else:
packshortname = f"Uni_Borg_{userid}_as" # format: Uni_Borg_userid
elif not is_message_image(reply_message):
await event.edit("Invalid message type")
return
else:
with BytesIO(file) as mem_file, BytesIO() as sticker:
resize_image(mem_file, sticker)
sticker.seek(0)
uploaded_sticker = await borg.upload_file(sticker, file_name=file_ext_ns_ion)
await event.edit("`Sticker is being kanged...... `")
async with borg.conversation("@Stickers") as bot_conv:
now = datetime.datetime.now()
dt = now + datetime.timedelta(minutes=1)
if not await stickerset_exists(bot_conv, packshortname):
await silently_send_message(bot_conv, "/cancel")
if is_a_s:
response = await silently_send_message(bot_conv, "/newanimated")
else:
response = await silently_send_message(bot_conv, "/newpack")
if "Yay!" not in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
response = await silently_send_message(bot_conv, packname)
if not response.text.startswith("Alright!"):
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
w = await bot_conv.send_file(
file=uploaded_sticker,
allow_cache=False,
force_document=True
)
response = await bot_conv.get_response()
if "Sorry" in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
await silently_send_message(bot_conv, sticker_emoji)
await silently_send_message(bot_conv, "/publish")
response = await silently_send_message(bot_conv, f"<{packname}>")
await silently_send_message(bot_conv, "/skip")
response = await silently_send_message(bot_conv, packshortname)
if response.text == "Sorry, this short name is already taken.":
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
else:
await silently_send_message(bot_conv, "/cancel")
await silently_send_message(bot_conv, "/addsticker")
await silently_send_message(bot_conv, packshortname)
await bot_conv.send_file(
file=uploaded_sticker,
allow_cache=False,
force_document=True
)
response = await bot_conv.get_response()
if "Sorry" in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
await silently_send_message(bot_conv, response)
await silently_send_message(bot_conv, sticker_emoji)
await silently_send_message(bot_conv, "/done")
await event.edit(f"**BOOM**\n`Sticker added! This sticker has been Kanged And Can Be Found` [here](t.me/addstickers/{packshortname})"
f" by {DEFAULTUSER}")
@borg.on(admin_cmd(pattern="packinfo"))
async def _(event):
if event.fwd_from:
return
if not event.is_reply:
await event.edit("Reply to any sticker to get it's pack info.")
return
rep_msg = await event.get_reply_message()
if not rep_msg.document:
await event.edit("Reply to any sticker to get it's pack info.")
return
stickerset_attr_s = rep_msg.document.attributes
stickerset_attr = find_instance(stickerset_attr_s, DocumentAttributeSticker)
if not stickerset_attr.stickerset:
await event.edit("sticker does not belong to a pack.")
return
get_stickerset = await borg(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash
)
)
)
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
await event.edit(f"**Sticker Title:** `{get_stickerset.set.title}\n`"
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n"
f"**Official:** `{get_stickerset.set.official}`\n"
f"**Archived:** `{get_stickerset.set.archived}`\n"
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n"
f"**Emojis In Pack:** {' '.join(pack_emojis)}")
@borg.on(admin_cmd(pattern="getsticker ?(.*)"))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
# https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7
if not reply_message.sticker:
return
sticker = reply_message.sticker
sticker_attrib = find_instance(sticker.attributes, DocumentAttributeSticker)
if not sticker_attrib.stickerset:
await event.reply("This sticker is not part of a pack")
return
is_a_s = is_it_animated_sticker(reply_message)
file_ext_ns_ion = "webp"
file_caption = "https://t.me/RoseSupport/33801"
if is_a_s:
file_ext_ns_ion = "tgs"
file_caption = "Forward the ZIP file to @AnimatedStickersRoBot to get lottIE JSON containing the vector information."
sticker_set = await borg(GetStickerSetRequest(sticker_attrib.stickerset))
pack_file = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, sticker_set.set.short_name, "pack.txt")
if os.path.isfile(pack_file):
os.remove(pack_file)
# Sticker emojis are retrieved as a mapping of
# <emoji>: <list of document ids that have this emoji>
# So we need to build a mapping of <document id>: <list of emoji>
# Thanks, Durov
emojis = defaultdict(str)
for pack in sticker_set.packs:
for document_id in pack.documents:
emojis[document_id] += pack.emoticon
async def download(sticker, emojis, path, file):
await borg.download_media(sticker, file=os.path.join(path, file))
with open(pack_file, "a") as f:
f.write(f"{{'image_file': '{file}','emojis':{emojis[sticker.id]}}},")
pending_tasks = [
asyncio.ensure_future(
download(document, emojis, Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name, f"{i:03d}.{file_ext_ns_ion}")
) for i, document in enumerate(sticker_set.documents)
]
await event.edit(f"Downloading {sticker_set.set.count} sticker(s) to .{Config.TMP_DOWNLOAD_DIRECTORY}{sticker_set.set.short_name}...")
num_tasks = len(pending_tasks)
while 1:
done, pending_tasks = await asyncio.wait(pending_tasks, timeout=2.5,
return_when=asyncio.FIRST_COMPLETED)
try:
await event.edit(
f"Downloaded {num_tasks - len(pending_tasks)}/{sticker_set.set.count}")
except MessageNotModifiedError:
pass
if not pending_tasks:
break
await event.edit("Downloading to my local completed")
# https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7
directory_name = Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name
zipf = zipfile.ZipFile(directory_name + ".zip", "w", zipfile.ZIP_DEFLATED)
zipdir(directory_name, zipf)
zipf.close()
await borg.send_file(
event.chat_id,
directory_name + ".zip",
caption=file_caption,
force_document=True,
allow_cache=False,
reply_to=event.message.id,
progress_callback=progress
)
try:
os.remove(directory_name + ".zip")
os.remove(directory_name)
except:
pass
await event.edit("task Completed")
await asyncio.sleep(3)
await event.delete()
else:
await event.edit("TODO: Not Implemented")
# Helpers
def is_it_animated_sticker(message):
try:
if message.media and message.media.document:
mime_type = message.media.document.mime_type
if "tgsticker" in mime_type:
return True
else:
return False
else:
return False
except:
return False
def is_message_image(message):
if message.media:
if isinstance(message.media, MessageMediaPhoto):
return True
if message.media.document:
if message.media.document.mime_type.split("/")[0] == "image":
return True
return False
return False
async def silently_send_message(conv, text):
await conv.send_message(text)
response = await conv.get_response()
await conv.mark_read(message=response)
return response
async def stickerset_exists(conv, setname):
try:
await borg(GetStickerSetRequest(InputStickerSetShortName(setname)))
response = await silently_send_message(conv, "/addsticker")
if response.text == "Invalid pack selected.":
await silently_send_message(conv, "/cancel")
return False
await silently_send_message(conv, "/cancel")
return True
except StickersetInvalidError:
return False
def resize_image(image, save_locaton):
""" Copyright Rhyse Simpson:
https://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py
"""
im = Image.open(image)
maxsize = (512, 512)
if (im.width and im.height) < 512:
size1 = im.width
size2 = im.height
if im.width > im.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
im = im.resize(sizenew)
else:
im.thumbnail(maxsize)
im.save(save_locaton, "PNG")
def progress(current, total):
logger.info("Uploaded: {} of {}\nCompleted {}".format(current, total, (current / total) * 100))
def find_instance(items, class_or_tuple):
for item in items:
if isinstance(item, class_or_tuple):
return item
return None
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
os.remove(os.path.join(root, file))
| 39.751479 | 143 | 0.620572 | from telethon import events
from io import BytesIO
from PIL import Image
import asyncio
import datetime
from collections import defaultdict
import math
import os
import requests
import zipfile
from telethon.errors.rpcerrorlist import StickersetInvalidError
from telethon.errors import MessageNotModifiedError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputMediaUploadedDocument,
InputPeerNotifySettings,
InputStickerSetID,
InputStickerSetShortName,
MessageMediaPhoto
)
from userbot.utils import admin_cmd
from userbot import ALIVE_NAME
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "No name set yet nibba, check pinned in @TeleBotHelp"
@borg.on(admin_cmd(pattern="kang ?(.*)"))
async def _(event):
if event.fwd_from:
return
if not event.is_reply:
await event.edit("Reply to a photo to add to my personal sticker pack..")
return
reply_message = await event.get_reply_message()
sticker_emoji = "🔥"
input_str = event.pattern_match.group(1)
if input_str:
sticker_emoji = input_str
me = borg.me
userid = event.from_id
packname = f"@TeleBotHelp kang by {userid}"
packshortname = f" My Master {userid} Awesome Pack By Friday "
is_a_s = is_it_animated_sticker(reply_message)
file_ext_ns_ion = "@UniBorg_Sticker.png"
file = await borg.download_file(reply_message.media)
uploaded_sticker = None
if is_a_s:
file_ext_ns_ion = "AnimatedSticker.tgs"
uploaded_sticker = await borg.upload_file(file, file_name=file_ext_ns_ion)
packname = f"{userid}'s @AnimatedStickersGroup"
if userid == 719877937:
packshortname = "TeleBot_Animated"
else:
packshortname = f"Uni_Borg_{userid}_as" # format: Uni_Borg_userid
elif not is_message_image(reply_message):
await event.edit("Invalid message type")
return
else:
with BytesIO(file) as mem_file, BytesIO() as sticker:
resize_image(mem_file, sticker)
sticker.seek(0)
uploaded_sticker = await borg.upload_file(sticker, file_name=file_ext_ns_ion)
await event.edit("`Sticker is being kanged...... `")
async with borg.conversation("@Stickers") as bot_conv:
now = datetime.datetime.now()
dt = now + datetime.timedelta(minutes=1)
if not await stickerset_exists(bot_conv, packshortname):
await silently_send_message(bot_conv, "/cancel")
if is_a_s:
response = await silently_send_message(bot_conv, "/newanimated")
else:
response = await silently_send_message(bot_conv, "/newpack")
if "Yay!" not in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
response = await silently_send_message(bot_conv, packname)
if not response.text.startswith("Alright!"):
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
w = await bot_conv.send_file(
file=uploaded_sticker,
allow_cache=False,
force_document=True
)
response = await bot_conv.get_response()
if "Sorry" in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
await silently_send_message(bot_conv, sticker_emoji)
await silently_send_message(bot_conv, "/publish")
response = await silently_send_message(bot_conv, f"<{packname}>")
await silently_send_message(bot_conv, "/skip")
response = await silently_send_message(bot_conv, packshortname)
if response.text == "Sorry, this short name is already taken.":
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
else:
await silently_send_message(bot_conv, "/cancel")
await silently_send_message(bot_conv, "/addsticker")
await silently_send_message(bot_conv, packshortname)
await bot_conv.send_file(
file=uploaded_sticker,
allow_cache=False,
force_document=True
)
response = await bot_conv.get_response()
if "Sorry" in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
await silently_send_message(bot_conv, response)
await silently_send_message(bot_conv, sticker_emoji)
await silently_send_message(bot_conv, "/done")
await event.edit(f"**BOOM**\n`Sticker added! This sticker has been Kanged And Can Be Found` [here](t.me/addstickers/{packshortname})"
f" by {DEFAULTUSER}")
@borg.on(admin_cmd(pattern="packinfo"))
async def _(event):
if event.fwd_from:
return
if not event.is_reply:
await event.edit("Reply to any sticker to get it's pack info.")
return
rep_msg = await event.get_reply_message()
if not rep_msg.document:
await event.edit("Reply to any sticker to get it's pack info.")
return
stickerset_attr_s = rep_msg.document.attributes
stickerset_attr = find_instance(stickerset_attr_s, DocumentAttributeSticker)
if not stickerset_attr.stickerset:
await event.edit("sticker does not belong to a pack.")
return
get_stickerset = await borg(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash
)
)
)
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
await event.edit(f"**Sticker Title:** `{get_stickerset.set.title}\n`"
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n"
f"**Official:** `{get_stickerset.set.official}`\n"
f"**Archived:** `{get_stickerset.set.archived}`\n"
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n"
f"**Emojis In Pack:** {' '.join(pack_emojis)}")
@borg.on(admin_cmd(pattern="getsticker ?(.*)"))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
# https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7
if not reply_message.sticker:
return
sticker = reply_message.sticker
sticker_attrib = find_instance(sticker.attributes, DocumentAttributeSticker)
if not sticker_attrib.stickerset:
await event.reply("This sticker is not part of a pack")
return
is_a_s = is_it_animated_sticker(reply_message)
file_ext_ns_ion = "webp"
file_caption = "https://t.me/RoseSupport/33801"
if is_a_s:
file_ext_ns_ion = "tgs"
file_caption = "Forward the ZIP file to @AnimatedStickersRoBot to get lottIE JSON containing the vector information."
sticker_set = await borg(GetStickerSetRequest(sticker_attrib.stickerset))
pack_file = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, sticker_set.set.short_name, "pack.txt")
if os.path.isfile(pack_file):
os.remove(pack_file)
# Sticker emojis are retrieved as a mapping of
# <emoji>: <list of document ids that have this emoji>
# So we need to build a mapping of <document id>: <list of emoji>
# Thanks, Durov
emojis = defaultdict(str)
for pack in sticker_set.packs:
for document_id in pack.documents:
emojis[document_id] += pack.emoticon
async def download(sticker, emojis, path, file):
await borg.download_media(sticker, file=os.path.join(path, file))
with open(pack_file, "a") as f:
f.write(f"{{'image_file': '{file}','emojis':{emojis[sticker.id]}}},")
pending_tasks = [
asyncio.ensure_future(
download(document, emojis, Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name, f"{i:03d}.{file_ext_ns_ion}")
) for i, document in enumerate(sticker_set.documents)
]
await event.edit(f"Downloading {sticker_set.set.count} sticker(s) to .{Config.TMP_DOWNLOAD_DIRECTORY}{sticker_set.set.short_name}...")
num_tasks = len(pending_tasks)
while 1:
done, pending_tasks = await asyncio.wait(pending_tasks, timeout=2.5,
return_when=asyncio.FIRST_COMPLETED)
try:
await event.edit(
f"Downloaded {num_tasks - len(pending_tasks)}/{sticker_set.set.count}")
except MessageNotModifiedError:
pass
if not pending_tasks:
break
await event.edit("Downloading to my local completed")
# https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7
directory_name = Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name
zipf = zipfile.ZipFile(directory_name + ".zip", "w", zipfile.ZIP_DEFLATED)
zipdir(directory_name, zipf)
zipf.close()
await borg.send_file(
event.chat_id,
directory_name + ".zip",
caption=file_caption,
force_document=True,
allow_cache=False,
reply_to=event.message.id,
progress_callback=progress
)
try:
os.remove(directory_name + ".zip")
os.remove(directory_name)
except:
pass
await event.edit("task Completed")
await asyncio.sleep(3)
await event.delete()
else:
await event.edit("TODO: Not Implemented")
# Helpers
def is_it_animated_sticker(message):
try:
if message.media and message.media.document:
mime_type = message.media.document.mime_type
if "tgsticker" in mime_type:
return True
else:
return False
else:
return False
except:
return False
def is_message_image(message):
if message.media:
if isinstance(message.media, MessageMediaPhoto):
return True
if message.media.document:
if message.media.document.mime_type.split("/")[0] == "image":
return True
return False
return False
async def silently_send_message(conv, text):
await conv.send_message(text)
response = await conv.get_response()
await conv.mark_read(message=response)
return response
async def stickerset_exists(conv, setname):
try:
await borg(GetStickerSetRequest(InputStickerSetShortName(setname)))
response = await silently_send_message(conv, "/addsticker")
if response.text == "Invalid pack selected.":
await silently_send_message(conv, "/cancel")
return False
await silently_send_message(conv, "/cancel")
return True
except StickersetInvalidError:
return False
def resize_image(image, save_locaton):
im = Image.open(image)
maxsize = (512, 512)
if (im.width and im.height) < 512:
size1 = im.width
size2 = im.height
if im.width > im.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
im = im.resize(sizenew)
else:
im.thumbnail(maxsize)
im.save(save_locaton, "PNG")
def progress(current, total):
logger.info("Uploaded: {} of {}\nCompleted {}".format(current, total, (current / total) * 100))
def find_instance(items, class_or_tuple):
for item in items:
if isinstance(item, class_or_tuple):
return item
return None
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
os.remove(os.path.join(root, file))
| true | true |
1c3427370eeee82128768c3f43bb6fff1f493fe1 | 20,617 | py | Python | keystoneclient/shell.py | citrix-openstack-build/python-keystoneclient | e170955d6de5cbf521d54105bdefaf606ccdb356 | [
"Apache-1.1"
] | null | null | null | keystoneclient/shell.py | citrix-openstack-build/python-keystoneclient | e170955d6de5cbf521d54105bdefaf606ccdb356 | [
"Apache-1.1"
] | null | null | null | keystoneclient/shell.py | citrix-openstack-build/python-keystoneclient | e170955d6de5cbf521d54105bdefaf606ccdb356 | [
"Apache-1.1"
] | null | null | null | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Pending deprecation: Command-line interface to the OpenStack Identity API.
This CLI is pending deprecation in favor of python-openstackclient. For a
Python library, continue using python-keystoneclient.
"""
from __future__ import print_function
import argparse
import getpass
import os
import sys
import keystoneclient
from keystoneclient import access
from keystoneclient.contrib.bootstrap import shell as shell_bootstrap
from keystoneclient import exceptions as exc
from keystoneclient.generic import shell as shell_generic
from keystoneclient import utils
from keystoneclient.v2_0 import shell as shell_v2_0
def positive_non_zero_float(argument_value):
if argument_value is None:
return None
try:
value = float(argument_value)
except ValueError:
msg = "%s must be a float" % argument_value
raise argparse.ArgumentTypeError(msg)
if value <= 0:
msg = "%s must be greater than 0" % argument_value
raise argparse.ArgumentTypeError(msg)
return value
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
class OpenStackIdentityShell(object):
def __init__(self, parser_class=argparse.ArgumentParser):
self.parser_class = parser_class
def get_base_parser(self):
parser = self.parser_class(
prog='keystone',
description=__doc__.strip(),
epilog='See "keystone help COMMAND" '
'for help on a specific command.',
add_help=False,
formatter_class=OpenStackHelpFormatter,
)
# Global arguments
parser.add_argument('-h',
'--help',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--version',
action='version',
version=keystoneclient.__version__,
help="Shows the client version and exits")
parser.add_argument('--debug',
default=False,
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--timeout',
default=600,
type=positive_non_zero_float,
metavar='<seconds>',
help="Set request timeout (in seconds)")
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=env('OS_USERNAME'),
help='Name used for authentication with the '
'OpenStack Identity service. '
'Defaults to env[OS_USERNAME]')
parser.add_argument('--os_username',
help=argparse.SUPPRESS)
parser.add_argument('--os-password',
metavar='<auth-password>',
default=env('OS_PASSWORD'),
help='Password used for authentication with the '
'OpenStack Identity service. '
'Defaults to env[OS_PASSWORD]')
parser.add_argument('--os_password',
help=argparse.SUPPRESS)
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=env('OS_TENANT_NAME'),
help='Tenant to request authorization on. '
'Defaults to env[OS_TENANT_NAME]')
parser.add_argument('--os_tenant_name',
help=argparse.SUPPRESS)
parser.add_argument('--os-tenant-id',
metavar='<tenant-id>',
default=env('OS_TENANT_ID'),
help='Tenant to request authorization on. '
'Defaults to env[OS_TENANT_ID]')
parser.add_argument('--os_tenant_id',
help=argparse.SUPPRESS)
parser.add_argument('--os-auth-url',
metavar='<auth-url>',
default=env('OS_AUTH_URL'),
help='Specify the Identity endpoint to use for '
'authentication. '
'Defaults to env[OS_AUTH_URL]')
parser.add_argument('--os_auth_url',
help=argparse.SUPPRESS)
parser.add_argument('--os-region-name',
metavar='<region-name>',
default=env('OS_REGION_NAME'),
help='Defaults to env[OS_REGION_NAME]')
parser.add_argument('--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument('--os-identity-api-version',
metavar='<identity-api-version>',
default=env('OS_IDENTITY_API_VERSION',
'KEYSTONE_VERSION'),
help='Defaults to env[OS_IDENTITY_API_VERSION]'
' or 2.0')
parser.add_argument('--os_identity_api_version',
help=argparse.SUPPRESS)
parser.add_argument('--os-token',
metavar='<service-token>',
default=env('OS_SERVICE_TOKEN'),
help='Specify an existing token to use instead of '
'retrieving one via authentication (e.g. '
'with username & password). '
'Defaults to env[OS_SERVICE_TOKEN]')
parser.add_argument('--os-endpoint',
metavar='<service-endpoint>',
default=env('OS_SERVICE_ENDPOINT'),
help='Specify an endpoint to use instead of '
'retrieving one from the service catalog '
'(via authentication). '
'Defaults to env[OS_SERVICE_ENDPOINT]')
parser.add_argument('--os-cacert',
metavar='<ca-certificate>',
default=env('OS_CACERT', default=None),
help='Specify a CA bundle file to use in '
'verifying a TLS (https) server certificate. '
'Defaults to env[OS_CACERT]')
parser.add_argument('--os_cacert',
help=argparse.SUPPRESS)
parser.add_argument('--insecure',
default=False,
action="store_true",
help='Explicitly allow keystoneclient to perform '
'"insecure" TLS (https) requests. The '
'server\'s certificate will not be verified '
'against any certificate authorities. This '
'option should be used with caution.')
parser.add_argument('--os-cert',
metavar='<certificate>',
default=env('OS_CERT'),
help='Defaults to env[OS_CERT]')
parser.add_argument('--os_cert',
help=argparse.SUPPRESS)
parser.add_argument('--os-key',
metavar='<key>',
default=env('OS_KEY'),
help='Defaults to env[OS_KEY]')
parser.add_argument('--os_key',
help=argparse.SUPPRESS)
parser.add_argument('--os-cache',
default=env('OS_CACHE', default=False),
action='store_true',
help='Use the auth token cache. '
'Defaults to env[OS_CACHE]')
parser.add_argument('--os_cache',
help=argparse.SUPPRESS)
parser.add_argument('--force-new-token',
default=False,
action="store_true",
dest='force_new_token',
help="If the keyring is available and in use, "
"token will always be stored and fetched "
"from the keyring until the token has "
"expired. Use this option to request a "
"new token and replace the existing one "
"in the keyring.")
parser.add_argument('--stale-duration',
metavar='<seconds>',
default=access.STALE_TOKEN_DURATION,
dest='stale_duration',
help="Stale duration (in seconds) used to "
"determine whether a token has expired "
"when retrieving it from keyring. This "
"is useful in mitigating process or "
"network delays. Default is %s seconds." % (
access.STALE_TOKEN_DURATION))
#FIXME(heckj):
# deprecated command line options for essex compatibility. To be
# removed in Grizzly release cycle.
parser.add_argument('--token',
metavar='<service-token>',
dest='os_token',
default=env('SERVICE_TOKEN'),
help=argparse.SUPPRESS)
parser.add_argument('--endpoint',
dest='os_endpoint',
metavar='<service-endpoint>',
default=env('SERVICE_ENDPOINT'),
help=argparse.SUPPRESS)
return parser
def get_subcommand_parser(self, version):
parser = self.get_base_parser()
self.subcommands = {}
subparsers = parser.add_subparsers(metavar='<subcommand>')
try:
actions_module = {
'2.0': shell_v2_0,
}[version]
except KeyError:
actions_module = shell_v2_0
self._find_actions(subparsers, actions_module)
self._find_actions(subparsers, shell_generic)
self._find_actions(subparsers, shell_bootstrap)
self._find_actions(subparsers, self)
self._add_bash_completion_subparser(subparsers)
return parser
def _add_bash_completion_subparser(self, subparsers):
subparser = subparsers.add_parser(
'bash_completion',
add_help=False,
formatter_class=OpenStackHelpFormatter
)
self.subcommands['bash_completion'] = subparser
subparser.set_defaults(func=self.do_bash_completion)
def _find_actions(self, subparsers, actions_module):
for attr in (a for a in dir(actions_module) if a.startswith('do_')):
# I prefer to be hypen-separated instead of underscores.
command = attr[3:].replace('_', '-')
callback = getattr(actions_module, attr)
desc = callback.__doc__ or ''
help = desc.strip().split('\n')[0]
arguments = getattr(callback, 'arguments', [])
subparser = subparsers.add_parser(
command,
help=help,
description=desc,
add_help=False,
formatter_class=OpenStackHelpFormatter)
subparser.add_argument('-h', '--help', action='help',
help=argparse.SUPPRESS)
self.subcommands[command] = subparser
group = subparser.add_argument_group(title='Arguments')
for (args, kwargs) in arguments:
group.add_argument(*args, **kwargs)
subparser.set_defaults(func=callback)
def auth_check(self, args):
if args.os_token or args.os_endpoint:
if not args.os_token:
raise exc.CommandError(
'Expecting a token provided via either --os-token or '
'env[OS_SERVICE_TOKEN]')
if not args.os_endpoint:
raise exc.CommandError(
'Expecting an endpoint provided via either '
'--os-endpoint or env[OS_SERVICE_ENDPOINT]')
# user supplied a token and endpoint and at least one other cred
if args.os_username or args.os_password or args.os_auth_url:
msg = ('WARNING: Bypassing authentication using a token & '
'endpoint (authentication credentials are being '
'ignored).')
print(msg)
else:
if not args.os_auth_url:
raise exc.CommandError(
'Expecting an auth URL via either --os-auth-url or '
'env[OS_AUTH_URL]')
if args.os_username or args.os_password:
if not args.os_username:
raise exc.CommandError(
'Expecting a username provided via either '
'--os-username or env[OS_USERNAME]')
if not args.os_password:
# No password, If we've got a tty, try prompting for it
if hasattr(sys.stdin, 'isatty') and sys.stdin.isatty():
# Check for Ctl-D
try:
args.os_password = getpass.getpass('OS Password: ')
except EOFError:
pass
# No password because we did't have a tty or the
# user Ctl-D when prompted?
if not args.os_password:
raise exc.CommandError(
'Expecting a password provided via either '
'--os-password, env[OS_PASSWORD], or '
'prompted response')
else:
raise exc.CommandError('Expecting authentication method via'
'\n either a service token, '
'--os-token or env[OS_SERVICE_TOKEN], '
'\n credentials, '
'--os-username or env[OS_USERNAME]')
def main(self, argv):
# Parse args once to find version
parser = self.get_base_parser()
(options, args) = parser.parse_known_args(argv)
# build available subcommands based on version
api_version = options.os_identity_api_version
subcommand_parser = self.get_subcommand_parser(api_version)
self.parser = subcommand_parser
# Handle top-level --help/-h before attempting to parse
# a command off the command line
if not argv or options.help:
self.do_help(options)
return 0
# Parse args again and call whatever callback was selected
args = subcommand_parser.parse_args(argv)
# Short-circuit and deal with help command right away.
if args.func == self.do_help:
self.do_help(args)
return 0
elif args.func == self.do_bash_completion:
self.do_bash_completion(args)
return 0
# TODO(heckj): supporting backwards compatibility with environment
# variables. To be removed after DEVSTACK is updated, ideally in
# the Grizzly release cycle.
args.os_token = args.os_token or env('SERVICE_TOKEN')
args.os_endpoint = args.os_endpoint or env('SERVICE_ENDPOINT')
if utils.isunauthenticated(args.func):
self.cs = shell_generic.CLIENT_CLASS(endpoint=args.os_auth_url,
cacert=args.os_cacert,
key=args.os_key,
cert=args.os_cert,
insecure=args.insecure,
debug=args.debug,
timeout=args.timeout)
else:
self.auth_check(args)
token = None
if args.os_token and args.os_endpoint:
token = args.os_token
api_version = options.os_identity_api_version
self.cs = self.get_api_class(api_version)(
username=args.os_username,
tenant_name=args.os_tenant_name,
tenant_id=args.os_tenant_id,
token=token,
endpoint=args.os_endpoint,
password=args.os_password,
auth_url=args.os_auth_url,
region_name=args.os_region_name,
cacert=args.os_cacert,
key=args.os_key,
cert=args.os_cert,
insecure=args.insecure,
debug=args.debug,
use_keyring=args.os_cache,
force_new_token=args.force_new_token,
stale_duration=args.stale_duration,
timeout=args.timeout)
try:
args.func(self.cs, args)
except exc.Unauthorized:
raise exc.CommandError("Invalid OpenStack Identity credentials.")
except exc.AuthorizationFailure:
raise exc.CommandError("Unable to authorize user")
def get_api_class(self, version):
try:
return {
"2.0": shell_v2_0.CLIENT_CLASS,
}[version]
except KeyError:
return shell_v2_0.CLIENT_CLASS
def do_bash_completion(self, args):
"""Prints all of the commands and options to stdout.
The keystone.bash_completion script doesn't have to hard code them.
"""
commands = set()
options = set()
for sc_str, sc in self.subcommands.items():
commands.add(sc_str)
for option in sc._optionals._option_string_actions.keys():
options.add(option)
commands.remove('bash-completion')
commands.remove('bash_completion')
print(' '.join(commands | options))
@utils.arg('command', metavar='<subcommand>', nargs='?',
help='Display help for <subcommand>')
def do_help(self, args):
"""Display help about this program or one of its subcommands."""
if getattr(args, 'command', None):
if args.command in self.subcommands:
self.subcommands[args.command].print_help()
else:
raise exc.CommandError("'%s' is not a valid subcommand" %
args.command)
else:
self.parser.print_help()
# I'm picky about my shell help.
class OpenStackHelpFormatter(argparse.HelpFormatter):
def start_section(self, heading):
# Title-case the headings
heading = '%s%s' % (heading[0].upper(), heading[1:])
super(OpenStackHelpFormatter, self).start_section(heading)
def main():
try:
OpenStackIdentityShell().main(sys.argv[1:])
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
| 41.482897 | 79 | 0.51962 |
from __future__ import print_function
import argparse
import getpass
import os
import sys
import keystoneclient
from keystoneclient import access
from keystoneclient.contrib.bootstrap import shell as shell_bootstrap
from keystoneclient import exceptions as exc
from keystoneclient.generic import shell as shell_generic
from keystoneclient import utils
from keystoneclient.v2_0 import shell as shell_v2_0
def positive_non_zero_float(argument_value):
if argument_value is None:
return None
try:
value = float(argument_value)
except ValueError:
msg = "%s must be a float" % argument_value
raise argparse.ArgumentTypeError(msg)
if value <= 0:
msg = "%s must be greater than 0" % argument_value
raise argparse.ArgumentTypeError(msg)
return value
def env(*vars, **kwargs):
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
class OpenStackIdentityShell(object):
def __init__(self, parser_class=argparse.ArgumentParser):
self.parser_class = parser_class
def get_base_parser(self):
parser = self.parser_class(
prog='keystone',
description=__doc__.strip(),
epilog='See "keystone help COMMAND" '
'for help on a specific command.',
add_help=False,
formatter_class=OpenStackHelpFormatter,
)
parser.add_argument('-h',
'--help',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--version',
action='version',
version=keystoneclient.__version__,
help="Shows the client version and exits")
parser.add_argument('--debug',
default=False,
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--timeout',
default=600,
type=positive_non_zero_float,
metavar='<seconds>',
help="Set request timeout (in seconds)")
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=env('OS_USERNAME'),
help='Name used for authentication with the '
'OpenStack Identity service. '
'Defaults to env[OS_USERNAME]')
parser.add_argument('--os_username',
help=argparse.SUPPRESS)
parser.add_argument('--os-password',
metavar='<auth-password>',
default=env('OS_PASSWORD'),
help='Password used for authentication with the '
'OpenStack Identity service. '
'Defaults to env[OS_PASSWORD]')
parser.add_argument('--os_password',
help=argparse.SUPPRESS)
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=env('OS_TENANT_NAME'),
help='Tenant to request authorization on. '
'Defaults to env[OS_TENANT_NAME]')
parser.add_argument('--os_tenant_name',
help=argparse.SUPPRESS)
parser.add_argument('--os-tenant-id',
metavar='<tenant-id>',
default=env('OS_TENANT_ID'),
help='Tenant to request authorization on. '
'Defaults to env[OS_TENANT_ID]')
parser.add_argument('--os_tenant_id',
help=argparse.SUPPRESS)
parser.add_argument('--os-auth-url',
metavar='<auth-url>',
default=env('OS_AUTH_URL'),
help='Specify the Identity endpoint to use for '
'authentication. '
'Defaults to env[OS_AUTH_URL]')
parser.add_argument('--os_auth_url',
help=argparse.SUPPRESS)
parser.add_argument('--os-region-name',
metavar='<region-name>',
default=env('OS_REGION_NAME'),
help='Defaults to env[OS_REGION_NAME]')
parser.add_argument('--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument('--os-identity-api-version',
metavar='<identity-api-version>',
default=env('OS_IDENTITY_API_VERSION',
'KEYSTONE_VERSION'),
help='Defaults to env[OS_IDENTITY_API_VERSION]'
' or 2.0')
parser.add_argument('--os_identity_api_version',
help=argparse.SUPPRESS)
parser.add_argument('--os-token',
metavar='<service-token>',
default=env('OS_SERVICE_TOKEN'),
help='Specify an existing token to use instead of '
'retrieving one via authentication (e.g. '
'with username & password). '
'Defaults to env[OS_SERVICE_TOKEN]')
parser.add_argument('--os-endpoint',
metavar='<service-endpoint>',
default=env('OS_SERVICE_ENDPOINT'),
help='Specify an endpoint to use instead of '
'retrieving one from the service catalog '
'(via authentication). '
'Defaults to env[OS_SERVICE_ENDPOINT]')
parser.add_argument('--os-cacert',
metavar='<ca-certificate>',
default=env('OS_CACERT', default=None),
help='Specify a CA bundle file to use in '
'verifying a TLS (https) server certificate. '
'Defaults to env[OS_CACERT]')
parser.add_argument('--os_cacert',
help=argparse.SUPPRESS)
parser.add_argument('--insecure',
default=False,
action="store_true",
help='Explicitly allow keystoneclient to perform '
'"insecure" TLS (https) requests. The '
'server\'s certificate will not be verified '
'against any certificate authorities. This '
'option should be used with caution.')
parser.add_argument('--os-cert',
metavar='<certificate>',
default=env('OS_CERT'),
help='Defaults to env[OS_CERT]')
parser.add_argument('--os_cert',
help=argparse.SUPPRESS)
parser.add_argument('--os-key',
metavar='<key>',
default=env('OS_KEY'),
help='Defaults to env[OS_KEY]')
parser.add_argument('--os_key',
help=argparse.SUPPRESS)
parser.add_argument('--os-cache',
default=env('OS_CACHE', default=False),
action='store_true',
help='Use the auth token cache. '
'Defaults to env[OS_CACHE]')
parser.add_argument('--os_cache',
help=argparse.SUPPRESS)
parser.add_argument('--force-new-token',
default=False,
action="store_true",
dest='force_new_token',
help="If the keyring is available and in use, "
"token will always be stored and fetched "
"from the keyring until the token has "
"expired. Use this option to request a "
"new token and replace the existing one "
"in the keyring.")
parser.add_argument('--stale-duration',
metavar='<seconds>',
default=access.STALE_TOKEN_DURATION,
dest='stale_duration',
help="Stale duration (in seconds) used to "
"determine whether a token has expired "
"when retrieving it from keyring. This "
"is useful in mitigating process or "
"network delays. Default is %s seconds." % (
access.STALE_TOKEN_DURATION))
#FIXME(heckj):
# deprecated command line options for essex compatibility. To be
# removed in Grizzly release cycle.
parser.add_argument('--token',
metavar='<service-token>',
dest='os_token',
default=env('SERVICE_TOKEN'),
help=argparse.SUPPRESS)
parser.add_argument('--endpoint',
dest='os_endpoint',
metavar='<service-endpoint>',
default=env('SERVICE_ENDPOINT'),
help=argparse.SUPPRESS)
return parser
def get_subcommand_parser(self, version):
parser = self.get_base_parser()
self.subcommands = {}
subparsers = parser.add_subparsers(metavar='<subcommand>')
try:
actions_module = {
'2.0': shell_v2_0,
}[version]
except KeyError:
actions_module = shell_v2_0
self._find_actions(subparsers, actions_module)
self._find_actions(subparsers, shell_generic)
self._find_actions(subparsers, shell_bootstrap)
self._find_actions(subparsers, self)
self._add_bash_completion_subparser(subparsers)
return parser
def _add_bash_completion_subparser(self, subparsers):
subparser = subparsers.add_parser(
'bash_completion',
add_help=False,
formatter_class=OpenStackHelpFormatter
)
self.subcommands['bash_completion'] = subparser
subparser.set_defaults(func=self.do_bash_completion)
def _find_actions(self, subparsers, actions_module):
for attr in (a for a in dir(actions_module) if a.startswith('do_')):
# I prefer to be hypen-separated instead of underscores.
command = attr[3:].replace('_', '-')
callback = getattr(actions_module, attr)
desc = callback.__doc__ or ''
help = desc.strip().split('\n')[0]
arguments = getattr(callback, 'arguments', [])
subparser = subparsers.add_parser(
command,
help=help,
description=desc,
add_help=False,
formatter_class=OpenStackHelpFormatter)
subparser.add_argument('-h', '--help', action='help',
help=argparse.SUPPRESS)
self.subcommands[command] = subparser
group = subparser.add_argument_group(title='Arguments')
for (args, kwargs) in arguments:
group.add_argument(*args, **kwargs)
subparser.set_defaults(func=callback)
def auth_check(self, args):
if args.os_token or args.os_endpoint:
if not args.os_token:
raise exc.CommandError(
'Expecting a token provided via either --os-token or '
'env[OS_SERVICE_TOKEN]')
if not args.os_endpoint:
raise exc.CommandError(
'Expecting an endpoint provided via either '
'--os-endpoint or env[OS_SERVICE_ENDPOINT]')
# user supplied a token and endpoint and at least one other cred
if args.os_username or args.os_password or args.os_auth_url:
msg = ('WARNING: Bypassing authentication using a token & '
'endpoint (authentication credentials are being '
'ignored).')
print(msg)
else:
if not args.os_auth_url:
raise exc.CommandError(
'Expecting an auth URL via either --os-auth-url or '
'env[OS_AUTH_URL]')
if args.os_username or args.os_password:
if not args.os_username:
raise exc.CommandError(
'Expecting a username provided via either '
'--os-username or env[OS_USERNAME]')
if not args.os_password:
# No password, If we've got a tty, try prompting for it
if hasattr(sys.stdin, 'isatty') and sys.stdin.isatty():
try:
args.os_password = getpass.getpass('OS Password: ')
except EOFError:
pass
# user Ctl-D when prompted?
if not args.os_password:
raise exc.CommandError(
'Expecting a password provided via either '
'--os-password, env[OS_PASSWORD], or '
'prompted response')
else:
raise exc.CommandError('Expecting authentication method via'
'\n either a service token, '
'--os-token or env[OS_SERVICE_TOKEN], '
'\n credentials, '
'--os-username or env[OS_USERNAME]')
def main(self, argv):
# Parse args once to find version
parser = self.get_base_parser()
(options, args) = parser.parse_known_args(argv)
# build available subcommands based on version
api_version = options.os_identity_api_version
subcommand_parser = self.get_subcommand_parser(api_version)
self.parser = subcommand_parser
# Handle top-level --help/-h before attempting to parse
# a command off the command line
if not argv or options.help:
self.do_help(options)
return 0
# Parse args again and call whatever callback was selected
args = subcommand_parser.parse_args(argv)
# Short-circuit and deal with help command right away.
if args.func == self.do_help:
self.do_help(args)
return 0
elif args.func == self.do_bash_completion:
self.do_bash_completion(args)
return 0
# TODO(heckj): supporting backwards compatibility with environment
# variables. To be removed after DEVSTACK is updated, ideally in
# the Grizzly release cycle.
args.os_token = args.os_token or env('SERVICE_TOKEN')
args.os_endpoint = args.os_endpoint or env('SERVICE_ENDPOINT')
if utils.isunauthenticated(args.func):
self.cs = shell_generic.CLIENT_CLASS(endpoint=args.os_auth_url,
cacert=args.os_cacert,
key=args.os_key,
cert=args.os_cert,
insecure=args.insecure,
debug=args.debug,
timeout=args.timeout)
else:
self.auth_check(args)
token = None
if args.os_token and args.os_endpoint:
token = args.os_token
api_version = options.os_identity_api_version
self.cs = self.get_api_class(api_version)(
username=args.os_username,
tenant_name=args.os_tenant_name,
tenant_id=args.os_tenant_id,
token=token,
endpoint=args.os_endpoint,
password=args.os_password,
auth_url=args.os_auth_url,
region_name=args.os_region_name,
cacert=args.os_cacert,
key=args.os_key,
cert=args.os_cert,
insecure=args.insecure,
debug=args.debug,
use_keyring=args.os_cache,
force_new_token=args.force_new_token,
stale_duration=args.stale_duration,
timeout=args.timeout)
try:
args.func(self.cs, args)
except exc.Unauthorized:
raise exc.CommandError("Invalid OpenStack Identity credentials.")
except exc.AuthorizationFailure:
raise exc.CommandError("Unable to authorize user")
def get_api_class(self, version):
try:
return {
"2.0": shell_v2_0.CLIENT_CLASS,
}[version]
except KeyError:
return shell_v2_0.CLIENT_CLASS
def do_bash_completion(self, args):
commands = set()
options = set()
for sc_str, sc in self.subcommands.items():
commands.add(sc_str)
for option in sc._optionals._option_string_actions.keys():
options.add(option)
commands.remove('bash-completion')
commands.remove('bash_completion')
print(' '.join(commands | options))
@utils.arg('command', metavar='<subcommand>', nargs='?',
help='Display help for <subcommand>')
def do_help(self, args):
if getattr(args, 'command', None):
if args.command in self.subcommands:
self.subcommands[args.command].print_help()
else:
raise exc.CommandError("'%s' is not a valid subcommand" %
args.command)
else:
self.parser.print_help()
# I'm picky about my shell help.
class OpenStackHelpFormatter(argparse.HelpFormatter):
def start_section(self, heading):
heading = '%s%s' % (heading[0].upper(), heading[1:])
super(OpenStackHelpFormatter, self).start_section(heading)
def main():
try:
OpenStackIdentityShell().main(sys.argv[1:])
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
| true | true |
1c3427fc0b2f89af9bf052f3f2803cfd4c032dfd | 7,351 | py | Python | bccs_dump.py | sasquatch-jr/bcldb_wholesale_cannabis_price_list | c39b6172a1ed34945361774a6cc0024e7a1393c1 | [
"MIT"
] | 8 | 2021-02-12T03:00:08.000Z | 2021-12-19T21:18:17.000Z | bccs_dump.py | sasquatch-jr/bcldb_wholesale_cannabis_price_list | c39b6172a1ed34945361774a6cc0024e7a1393c1 | [
"MIT"
] | null | null | null | bccs_dump.py | sasquatch-jr/bcldb_wholesale_cannabis_price_list | c39b6172a1ed34945361774a6cc0024e7a1393c1 | [
"MIT"
] | 6 | 2021-02-12T03:00:10.000Z | 2021-02-24T03:05:53.000Z | import csv
import time
import jinja2
import requests
from datetime import datetime
template = """<html>
<head><title>BCLDB Wholesale Cannabis Price List</title></head>
<body>
<h1>BCLDB Wholesale Cannabis Price List</h1>
<h2><a href="https://github.com/sasquatch-jr/bcldb_wholesale_cannabis_price_list/blob/main/README.md">FAQ</a>
<a href="https://github.com/sasquatch-jr/bcldb_wholesale_cannabis_price_list/blob/main/bccs_dump.py">Source Code</a>
<a href="https://raw.githubusercontent.com/sasquatch-jr/bcldb_wholesale_cannabis_price_list/main/dump.csv">CSV</a>
</h2>
<h4>The "last activity" on a SKU is the last time there was a sale or a restock of the item. I am actively working on new features as I find cool things that I can do with a growing archive of this data that I am collecting. I want this resource to help the legal cannabis industry and am happy to chat with people who have feature requests (unfortunately the public product API does not include stock levels) or wanted to share ideas. Feel free to email me at sasquatch__jr@outlook.com.</h4>
<h3>Generated {{now}} PST - This page will be re-generated every 10 minutes.</h3>
<table>
{% for item in products %}
<tr>
<td><img src={{item['thumb']}} width=150/></td>
<td><a href={{item['url']}}>{{item['name']}}</a></td>
<td>{{item['brand']}} - {{item['lp']}}</td>
<td><table>{% for size in item['sizes'] %}
<tr>
<td>{{size['name']}}
${{size['price']}}
(${{'%0.2f' % size['price_per_item']}} each) -
{{size['in_stock']}}
{% if size['retail_price'] %}
<br />
<a href=https://www.bccannabisstores.com/products/{{item['id']}}>Retail price ${{size['retail_price']}} ({{size['retail_markup']}}% markup)</a>
{% endif %}
{% if size['order_limit'] %}
<br />
Retailers may only purchase {{size['order_limit']}} of this item!
{% endif %}
<br />
LP cut: ${{size['lp_cut']}}, BCLDB Cut: ${{size['bcldb_cut']}}
<br />
Last activity on SKU: {{size['updated']}} PST
</td>
</tr>
{% endfor %}
</table></td>
</tr>
{% endfor %}
</table>
</body>
</html>"""
def fetch_products_from_base_url(base_url):
"""Fetch all product definitions from the products.json endpoint
"""
page_number = 1
products = []
prods = []
more_pages = True
while more_pages:
req = requests.get(base_url + "/products.json?limit=250&page=" + str(page_number))
if req.status_code == 200:
new_products = req.json()['products']
if len(new_products) == 0:
more_pages = False
else:
products += req.json()['products']
page_number += 1
time.sleep(0.5)
else:
time.sleep(5)
# Parse combined products.json into more usable data structure
for p in products:
if p['title'] == "Container Deposit Fee":
continue
try:
thumb = p['images'][0]['src']
except IndexError:
thumb = None
sku_order_limits = {}
for tag in p['tags']:
if tag.startswith('brand::'):
brand = tag.split('::')[-1]
elif tag.startswith('b2b_order_limit'):
for sku_limit in tag.split('::')[-1].split('|'):
sku, limit = sku_limit.split('=')
sku_order_limits[sku] = limit
sizes = []
for v in p['variants']:
if v['available']:
in_stock = "In Stock"
else:
in_stock = "Out of Stock"
try:
items_per_pack = int(v['title'].split(' ')[-1].split(')')[0])
price_per_item = round(float(v['price']) / items_per_pack, 2)
except:
items_per_pack = None
price_per_item = None
lp_cut = '%0.2f' % round(float(v['price']) * 0.85, 2)
bcldb_cut = '%0.2f' % round(float(v['price']) * 0.15, 2)
updated = datetime.fromisoformat(v['updated_at']).strftime("%b %d %Y %H:%M:%S")
sizes.append({'name': v['title'].replace('\n', ''),
'price': v['price'],
'in_stock':in_stock,
'price_per_item': price_per_item,
'order_limit': sku_order_limits.get(v['sku']),
'lp_cut': lp_cut,
'bcldb_cut': bcldb_cut,
'updated': updated,
'updated_sortable': v['updated_at']})
prods.append({'name': p['title'],
'lp': p['vendor'],
'brand': brand,
'thumb': thumb,
'url': base_url + '/products/' + p['handle'],
'created': p['created_at'],
'sizes': sizes,
'id': p['handle']})
return prods
def main():
products = fetch_products_from_base_url('https://www.bccannabiswholesale.com')
retail_products = fetch_products_from_base_url('https://www.bccannabisstores.com')
# Attempt to find the same products in the retail list to compare
for i, prod in enumerate(products):
retail = [x for x in filter(lambda x: x['id'] == prod['id'], retail_products)]
for s_idx, s in enumerate(prod['sizes']):
retail_name = s['name'].split(' (')[0]
retail_price = None
retail_markup = None
for r in retail:
retail_var = [x for x in filter(lambda x: x['name'] == retail_name, r['sizes'])]
# Sometimes the retail version has or does not have a space before the g
if len(retail_var) == 0:
if retail_name.find(' g') != -1:
retail_name = retail_name.replace(' g', 'g')
elif retail_name.find('g'):
retail_name = retail_name.replace('g', ' g')
retail_var = [x for x in filter(lambda x: x['name'] == retail_name, r['sizes'])]
if len(retail_var) != 0:
retail_price = float(retail_var[0]['price'])
retail_markup = round(((float(retail_price) / s['price_per_item']) - 1) * 100)
prod['sizes'][s_idx]['retail_price'] = retail_price
prod['sizes'][s_idx]['retail_markup'] = retail_markup
products[i] = prod
prods = sorted(products, key=lambda k: k['created'], reverse=True)
t = jinja2.Template(template)
open('index' + '.html', 'w').write(t.render(products=prods, now=datetime.now()))
with open('dump.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(["name", "lp", "brand", "size", "price", "price_per_item", "in_stock", "retail_price", "retail_markup", "order_limit", "lp_cut", "bcldb_cut", "last_updated"])
for p in prods:
for s in p['sizes']:
csvwriter.writerow([p["name"], p["lp"], p["brand"], s["name"], s["price"], s["price_per_item"], s["in_stock"], s["retail_price"], s["retail_markup"], s["order_limit"], s["lp_cut"], s["bcldb_cut"], s["updated_sortable"]])
if __name__ == '__main__':
main()
| 41.067039 | 492 | 0.543736 | import csv
import time
import jinja2
import requests
from datetime import datetime
template = """<html>
<head><title>BCLDB Wholesale Cannabis Price List</title></head>
<body>
<h1>BCLDB Wholesale Cannabis Price List</h1>
<h2><a href="https://github.com/sasquatch-jr/bcldb_wholesale_cannabis_price_list/blob/main/README.md">FAQ</a>
<a href="https://github.com/sasquatch-jr/bcldb_wholesale_cannabis_price_list/blob/main/bccs_dump.py">Source Code</a>
<a href="https://raw.githubusercontent.com/sasquatch-jr/bcldb_wholesale_cannabis_price_list/main/dump.csv">CSV</a>
</h2>
<h4>The "last activity" on a SKU is the last time there was a sale or a restock of the item. I am actively working on new features as I find cool things that I can do with a growing archive of this data that I am collecting. I want this resource to help the legal cannabis industry and am happy to chat with people who have feature requests (unfortunately the public product API does not include stock levels) or wanted to share ideas. Feel free to email me at sasquatch__jr@outlook.com.</h4>
<h3>Generated {{now}} PST - This page will be re-generated every 10 minutes.</h3>
<table>
{% for item in products %}
<tr>
<td><img src={{item['thumb']}} width=150/></td>
<td><a href={{item['url']}}>{{item['name']}}</a></td>
<td>{{item['brand']}} - {{item['lp']}}</td>
<td><table>{% for size in item['sizes'] %}
<tr>
<td>{{size['name']}}
${{size['price']}}
(${{'%0.2f' % size['price_per_item']}} each) -
{{size['in_stock']}}
{% if size['retail_price'] %}
<br />
<a href=https://www.bccannabisstores.com/products/{{item['id']}}>Retail price ${{size['retail_price']}} ({{size['retail_markup']}}% markup)</a>
{% endif %}
{% if size['order_limit'] %}
<br />
Retailers may only purchase {{size['order_limit']}} of this item!
{% endif %}
<br />
LP cut: ${{size['lp_cut']}}, BCLDB Cut: ${{size['bcldb_cut']}}
<br />
Last activity on SKU: {{size['updated']}} PST
</td>
</tr>
{% endfor %}
</table></td>
</tr>
{% endfor %}
</table>
</body>
</html>"""
def fetch_products_from_base_url(base_url):
page_number = 1
products = []
prods = []
more_pages = True
while more_pages:
req = requests.get(base_url + "/products.json?limit=250&page=" + str(page_number))
if req.status_code == 200:
new_products = req.json()['products']
if len(new_products) == 0:
more_pages = False
else:
products += req.json()['products']
page_number += 1
time.sleep(0.5)
else:
time.sleep(5)
for p in products:
if p['title'] == "Container Deposit Fee":
continue
try:
thumb = p['images'][0]['src']
except IndexError:
thumb = None
sku_order_limits = {}
for tag in p['tags']:
if tag.startswith('brand::'):
brand = tag.split('::')[-1]
elif tag.startswith('b2b_order_limit'):
for sku_limit in tag.split('::')[-1].split('|'):
sku, limit = sku_limit.split('=')
sku_order_limits[sku] = limit
sizes = []
for v in p['variants']:
if v['available']:
in_stock = "In Stock"
else:
in_stock = "Out of Stock"
try:
items_per_pack = int(v['title'].split(' ')[-1].split(')')[0])
price_per_item = round(float(v['price']) / items_per_pack, 2)
except:
items_per_pack = None
price_per_item = None
lp_cut = '%0.2f' % round(float(v['price']) * 0.85, 2)
bcldb_cut = '%0.2f' % round(float(v['price']) * 0.15, 2)
updated = datetime.fromisoformat(v['updated_at']).strftime("%b %d %Y %H:%M:%S")
sizes.append({'name': v['title'].replace('\n', ''),
'price': v['price'],
'in_stock':in_stock,
'price_per_item': price_per_item,
'order_limit': sku_order_limits.get(v['sku']),
'lp_cut': lp_cut,
'bcldb_cut': bcldb_cut,
'updated': updated,
'updated_sortable': v['updated_at']})
prods.append({'name': p['title'],
'lp': p['vendor'],
'brand': brand,
'thumb': thumb,
'url': base_url + '/products/' + p['handle'],
'created': p['created_at'],
'sizes': sizes,
'id': p['handle']})
return prods
def main():
products = fetch_products_from_base_url('https://www.bccannabiswholesale.com')
retail_products = fetch_products_from_base_url('https://www.bccannabisstores.com')
for i, prod in enumerate(products):
retail = [x for x in filter(lambda x: x['id'] == prod['id'], retail_products)]
for s_idx, s in enumerate(prod['sizes']):
retail_name = s['name'].split(' (')[0]
retail_price = None
retail_markup = None
for r in retail:
retail_var = [x for x in filter(lambda x: x['name'] == retail_name, r['sizes'])]
if len(retail_var) == 0:
if retail_name.find(' g') != -1:
retail_name = retail_name.replace(' g', 'g')
elif retail_name.find('g'):
retail_name = retail_name.replace('g', ' g')
retail_var = [x for x in filter(lambda x: x['name'] == retail_name, r['sizes'])]
if len(retail_var) != 0:
retail_price = float(retail_var[0]['price'])
retail_markup = round(((float(retail_price) / s['price_per_item']) - 1) * 100)
prod['sizes'][s_idx]['retail_price'] = retail_price
prod['sizes'][s_idx]['retail_markup'] = retail_markup
products[i] = prod
prods = sorted(products, key=lambda k: k['created'], reverse=True)
t = jinja2.Template(template)
open('index' + '.html', 'w').write(t.render(products=prods, now=datetime.now()))
with open('dump.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(["name", "lp", "brand", "size", "price", "price_per_item", "in_stock", "retail_price", "retail_markup", "order_limit", "lp_cut", "bcldb_cut", "last_updated"])
for p in prods:
for s in p['sizes']:
csvwriter.writerow([p["name"], p["lp"], p["brand"], s["name"], s["price"], s["price_per_item"], s["in_stock"], s["retail_price"], s["retail_markup"], s["order_limit"], s["lp_cut"], s["bcldb_cut"], s["updated_sortable"]])
if __name__ == '__main__':
main()
| true | true |
1c3429d36012e6fbbc3345c7e7d976ddbaa9eef1 | 1,966 | py | Python | waveconverter.py | SMMoseley/Background_Colony_Noise | eb88ccf725939d501f02854b4c82659e8f6d5a3a | [
"MIT"
] | null | null | null | waveconverter.py | SMMoseley/Background_Colony_Noise | eb88ccf725939d501f02854b4c82659e8f6d5a3a | [
"MIT"
] | null | null | null | waveconverter.py | SMMoseley/Background_Colony_Noise | eb88ccf725939d501f02854b4c82659e8f6d5a3a | [
"MIT"
] | null | null | null | ##Packages##
import h5py as h5
import numpy as np
import pandas as pd
import os
import ewave
##Functions##
def wav_intervals(csv, sample_rate, hour_to_sample_rate):
for row in csv.itertuples(): #iterates through each row in the csv file
arf_file = row[0]
start_time = row[1]*hour_to_sample_rate #takes the second column and calculates the start point
duration = row[2]*sample_rate #takes the third column and calculates the sample duration (i.e. 7 seconds)
return arf_file, start_time, duration
def wavdata (sample, start_time, duration):
subsample = np.zeros(duration) #creates a one dimensional array filled with zeros that is the length of duration
for i in range(start_time,start_time + duration): # sample a range from the start time to the start time + duration
subsample[i - start_time] = sample[i] # pulls the data and assigns it to subsample
data = subsample # Sets the data to be converted
return data
##Script##
sample_rate = 48000
hour_to_sample_rate = 60*60*sample_rate
csv = pd.read_csv('Hour.csv',index_col = 0) #reads in the csv and indexes the first column
arf_file, start_time, duration = wav_intervals(csv, sample_rate, hour_to_sample_rate)
with h5.File(arf_file, 'r') as R1: # takes the first column file path and imports the arf file
ls = list(R1.keys()) # lists the keys
name = ls[0]
data = R1.get(ls[0]) # pulls the first key which has the data
lsdata = list(data.keys()) # lists the keys in this shell
sample = data.get(lsdata[0]) # pulls the data
data = wavdata(sample, start_time, duration)
start_time_hour = int(start_time/hour_to_sample_rate)
duration_seconds = int(duration/sample_rate)
with ewave.open("{}_{}_{}.wav".format(name, start_time_hour, duration_seconds),"w+",sampling_rate=sample_rate,dtype='f') as fp: #sets the name of the file, sets the mode to open a file, the sampling rate, and the float datatype
fp.write(data)
| 50.410256 | 227 | 0.725331 | s h5
import numpy as np
import pandas as pd
import os
import ewave
als(csv, sample_rate, hour_to_sample_rate):
for row in csv.itertuples():
arf_file = row[0]
start_time = row[1]*hour_to_sample_rate
duration = row[2]*sample_rate
return arf_file, start_time, duration
def wavdata (sample, start_time, duration):
subsample = np.zeros(duration)
for i in range(start_time,start_time + duration):
subsample[i - start_time] = sample[i]
data = subsample
return data
= 48000
hour_to_sample_rate = 60*60*sample_rate
csv = pd.read_csv('Hour.csv',index_col = 0)
arf_file, start_time, duration = wav_intervals(csv, sample_rate, hour_to_sample_rate)
with h5.File(arf_file, 'r') as R1:
ls = list(R1.keys())
name = ls[0]
data = R1.get(ls[0])
lsdata = list(data.keys())
sample = data.get(lsdata[0])
data = wavdata(sample, start_time, duration)
start_time_hour = int(start_time/hour_to_sample_rate)
duration_seconds = int(duration/sample_rate)
with ewave.open("{}_{}_{}.wav".format(name, start_time_hour, duration_seconds),"w+",sampling_rate=sample_rate,dtype='f') as fp:
fp.write(data)
| true | true |
1c342a98c9f0dd3f9df730fa05eb9f4315d5f656 | 503 | py | Python | leetcode/algorithm/roman_int.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
] | 1 | 2018-12-19T22:07:56.000Z | 2018-12-19T22:07:56.000Z | leetcode/algorithm/roman_int.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
] | 12 | 2020-03-14T05:32:26.000Z | 2022-03-12T00:08:49.000Z | leetcode/algorithm/roman_int.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
] | 1 | 2018-12-19T22:08:00.000Z | 2018-12-19T22:08:00.000Z | """
@author: magician
@date: 2019/12/18
@file: roman_int.py
"""
def roman_to_int(s: str) -> int:
"""
roman_to_int
:param s:
:return:
"""
roman_dict = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000,
'IV': 4,
'XL': 40,
'XC': 90,
'CD': 400,
'CM': 900,
}
return True
if __name__ == '__main__':
# assert is_palindrome(121) is True
pass
| 14.371429 | 39 | 0.399602 |
def roman_to_int(s: str) -> int:
roman_dict = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000,
'IV': 4,
'XL': 40,
'XC': 90,
'CD': 400,
'CM': 900,
}
return True
if __name__ == '__main__':
pass
| true | true |
1c342aefb37a2171edc1cc7cf09da1834a0464a8 | 2,461 | py | Python | examples/bank_reserves/bank_reserves/server.py | DoofCoder/mesa | b290439e4f68a1a5a4906246546b69e7d783dcfb | [
"Apache-2.0"
] | 2 | 2017-05-30T02:46:20.000Z | 2017-07-18T19:42:33.000Z | examples/bank_reserves/bank_reserves/server.py | DoofCoder/mesa | b290439e4f68a1a5a4906246546b69e7d783dcfb | [
"Apache-2.0"
] | 40 | 2019-08-07T13:57:52.000Z | 2022-03-18T05:21:42.000Z | examples/bank_reserves/bank_reserves/server.py | DoofCoder/mesa | b290439e4f68a1a5a4906246546b69e7d783dcfb | [
"Apache-2.0"
] | 2 | 2017-07-17T15:25:41.000Z | 2022-03-31T07:00:41.000Z | from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.modules import CanvasGrid, ChartModule
from mesa.visualization.UserParam import UserSettableParameter
from bank_reserves.agents import Person
from bank_reserves.model import BankReserves
"""
Citation:
The following code was adapted from server.py at
https://github.com/projectmesa/mesa/blob/master/examples/wolf_sheep/wolf_sheep/server.py
Accessed on: November 2, 2017
Author of original code: Taylor Mutch
"""
# Green
RICH_COLOR = "#46FF33"
# Red
POOR_COLOR = "#FF3C33"
# Blue
MID_COLOR = "#3349FF"
def person_portrayal(agent):
if agent is None:
return
portrayal = {}
# update portrayal characteristics for each Person object
if isinstance(agent, Person):
portrayal["Shape"] = "circle"
portrayal["r"] = 0.5
portrayal["Layer"] = 0
portrayal["Filled"] = "true"
color = MID_COLOR
# set agent color based on savings and loans
if agent.savings > agent.model.rich_threshold:
color = RICH_COLOR
if agent.savings < 10 and agent.loans < 10:
color = MID_COLOR
if agent.loans > 10:
color = POOR_COLOR
portrayal["Color"] = color
return portrayal
# dictionary of user settable parameters - these map to the model __init__ parameters
model_params = {
"init_people": UserSettableParameter(
"slider", "People", 25, 1, 200, description="Initial Number of People"
),
"rich_threshold": UserSettableParameter(
"slider",
"Rich Threshold",
10,
1,
20,
description="Upper End of Random Initial Wallet Amount",
),
"reserve_percent": UserSettableParameter(
"slider",
"Reserves",
50,
1,
100,
description="Percent of deposits the bank has to hold in reserve",
),
}
# set the portrayal function and size of the canvas for visualization
canvas_element = CanvasGrid(person_portrayal, 20, 20, 500, 500)
# map data to chart in the ChartModule
chart_element = ChartModule(
[
{"Label": "Rich", "Color": RICH_COLOR},
{"Label": "Poor", "Color": POOR_COLOR},
{"Label": "Middle Class", "Color": MID_COLOR},
]
)
# create instance of Mesa ModularServer
server = ModularServer(
BankReserves,
[canvas_element, chart_element],
"Bank Reserves Model",
model_params=model_params,
)
| 26.462366 | 88 | 0.663145 | from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.modules import CanvasGrid, ChartModule
from mesa.visualization.UserParam import UserSettableParameter
from bank_reserves.agents import Person
from bank_reserves.model import BankReserves
RICH_COLOR = "#46FF33"
POOR_COLOR = "#FF3C33"
MID_COLOR = "#3349FF"
def person_portrayal(agent):
if agent is None:
return
portrayal = {}
if isinstance(agent, Person):
portrayal["Shape"] = "circle"
portrayal["r"] = 0.5
portrayal["Layer"] = 0
portrayal["Filled"] = "true"
color = MID_COLOR
if agent.savings > agent.model.rich_threshold:
color = RICH_COLOR
if agent.savings < 10 and agent.loans < 10:
color = MID_COLOR
if agent.loans > 10:
color = POOR_COLOR
portrayal["Color"] = color
return portrayal
model_params = {
"init_people": UserSettableParameter(
"slider", "People", 25, 1, 200, description="Initial Number of People"
),
"rich_threshold": UserSettableParameter(
"slider",
"Rich Threshold",
10,
1,
20,
description="Upper End of Random Initial Wallet Amount",
),
"reserve_percent": UserSettableParameter(
"slider",
"Reserves",
50,
1,
100,
description="Percent of deposits the bank has to hold in reserve",
),
}
canvas_element = CanvasGrid(person_portrayal, 20, 20, 500, 500)
chart_element = ChartModule(
[
{"Label": "Rich", "Color": RICH_COLOR},
{"Label": "Poor", "Color": POOR_COLOR},
{"Label": "Middle Class", "Color": MID_COLOR},
]
)
server = ModularServer(
BankReserves,
[canvas_element, chart_element],
"Bank Reserves Model",
model_params=model_params,
)
| true | true |
1c342be00019af11edb90f23f188a1168dbdeff5 | 91 | py | Python | thirdparty/his_evaluators/his_evaluators/__init__.py | Puneet-G/Impersonator-NNProject | 980cfc260feebbc873b4150326791340f6526c42 | [
"MIT"
] | 1 | 2020-05-11T19:10:27.000Z | 2020-05-11T19:10:27.000Z | thirdparty/his_evaluators/his_evaluators/__init__.py | Puneet-G/Impersonator-NNProject | 980cfc260feebbc873b4150326791340f6526c42 | [
"MIT"
] | 4 | 2020-05-11T19:12:18.000Z | 2021-10-12T22:52:12.000Z | thirdparty/his_evaluators/his_evaluators/__init__.py | Puneet-G/Impersonator-NNProject | 980cfc260feebbc873b4150326791340f6526c42 | [
"MIT"
] | 1 | 2020-05-27T01:59:41.000Z | 2020-05-27T01:59:41.000Z | from .evaluators.motion_imitation import MotionImitationModel, IPERMotionImitationEvaluator | 91 | 91 | 0.923077 | from .evaluators.motion_imitation import MotionImitationModel, IPERMotionImitationEvaluator | true | true |
1c342c58860d1a2185286318dccd14166af7af0f | 94,847 | py | Python | plugins/modules/oci_compute_management_instance_configuration_actions.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_compute_management_instance_configuration_actions.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_compute_management_instance_configuration_actions.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_compute_management_instance_configuration_actions
short_description: Perform actions on an InstanceConfiguration resource in Oracle Cloud Infrastructure
description:
- Perform actions on an InstanceConfiguration resource in Oracle Cloud Infrastructure
- "For I(action=change_compartment), moves an instance configuration into a different compartment within the same tenancy.
For information about moving resources between compartments, see
L(Moving Resources to a Different Compartment,https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes).
When you move an instance configuration to a different compartment, associated resources such as
instance pools are not moved.
**Important:** Most of the properties for an existing instance configuration, including the compartment,
cannot be modified after you create the instance configuration. Although you can move an instance configuration
to a different compartment, you will not be able to use the instance configuration to manage instance pools
in the new compartment. If you want to update an instance configuration to point to a different compartment,
you should instead create a new instance configuration in the target compartment using
L(CreateInstanceConfiguration,https://docs.cloud.oracle.com/iaas/api/#/en/iaas/20160918/InstanceConfiguration/CreateInstanceConfiguration)."
- For I(action=launch), launches an instance from an instance configuration.
If the instance configuration does not include all of the parameters that are
required to launch an instance, such as the availability domain and subnet ID, you must
provide these parameters when you launch an instance from the instance configuration.
For more information, see the L(InstanceConfiguration,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/InstanceConfiguration/)
resource.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
instance_configuration_id:
description:
- The OCID of the instance configuration.
type: str
aliases: ["id"]
required: true
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to
move the instance configuration to.
- Required for I(action=change_compartment).
type: str
instance_type:
description:
- The type of instance details. Supported instanceType is compute
- Required for I(action=launch).
type: str
choices:
- "compute"
block_volumes:
description:
- ""
- Applicable only for I(action=launch).
type: list
elements: dict
suboptions:
attach_details:
description:
- ""
type: dict
suboptions:
display_name:
description:
- A user-friendly name. Does not have to be unique, and it cannot be changed. Avoid entering confidential information.
type: str
aliases: ["name"]
is_read_only:
description:
- Whether the attachment should be created in read-only mode.
type: bool
device:
description:
- The device name.
type: str
is_shareable:
description:
- Whether the attachment should be created in shareable mode. If an attachment
is created in shareable mode, then other instances can attach the same volume, provided
that they also create their attachments in shareable mode. Only certain volume types can
be attached in shareable mode. Defaults to false if not specified.
type: bool
type:
description:
- "The type of volume. The only supported values are \\"iscsi\\" and \\"paravirtualized\\"."
type: str
choices:
- "iscsi"
- "paravirtualized"
required: true
use_chap:
description:
- Whether to use CHAP authentication for the volume attachment. Defaults to false.
- Applicable when type is 'iscsi'
type: bool
is_pv_encryption_in_transit_enabled:
description:
- Whether to enable in-transit encryption for the data volume's paravirtualized attachment. The default value is false.
- Applicable when type is 'paravirtualized'
type: bool
create_details:
description:
- ""
type: dict
suboptions:
availability_domain:
description:
- The availability domain of the volume.
- "Example: `Uocm:PHX-AD-1`"
type: str
backup_policy_id:
description:
- If provided, specifies the ID of the volume backup policy to assign to the newly
created volume. If omitted, no policy will be assigned.
type: str
compartment_id:
description:
- The OCID of the compartment that contains the volume.
type: str
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
type: dict
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
type: str
aliases: ["name"]
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
type: dict
kms_key_id:
description:
- The OCID of the Key Management key to assign as the master encryption key
for the volume.
type: str
vpus_per_gb:
description:
- The number of volume performance units (VPUs) that will be applied to this volume per GB,
representing the Block Volume service's elastic performance options.
See L(Block Volume Elastic
Performance,https://docs.cloud.oracle.com/iaas/Content/Block/Concepts/blockvolumeelasticperformance.htm) for more information.
- "Allowed values:"
- " * `0`: Represents Lower Cost option."
- " * `10`: Represents Balanced option."
- " * `20`: Represents Higher Performance option."
type: int
size_in_gbs:
description:
- The size of the volume in GBs.
type: int
source_details:
description:
- ""
type: dict
suboptions:
type:
description:
- ""
type: str
choices:
- "volumeBackup"
- "volume"
required: true
id:
description:
- The OCID of the volume backup.
type: str
volume_id:
description:
- The OCID of the volume.
type: str
launch_details:
description:
- ""
- Applicable only for I(action=launch).
type: dict
suboptions:
availability_domain:
description:
- The availability domain of the instance.
- "Example: `Uocm:PHX-AD-1`"
type: str
capacity_reservation_id:
description:
- The OCID of the compute capacity reservation this instance is launched under.
type: str
compartment_id:
description:
- The OCID of the compartment.
type: str
create_vnic_details:
description:
- ""
type: dict
suboptions:
assign_public_ip:
description:
- Whether the VNIC should be assigned a public IP address. See the `assignPublicIp` attribute of
L(CreateVnicDetails,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/CreateVnicDetails/)
for more information.
type: bool
assign_private_dns_record:
description:
- Whether the VNIC should be assigned a private DNS record. See the `assignPrivateDnsRecord` attribute of
L(CreateVnicDetails,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/CreateVnicDetails/)
for more information.
type: bool
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
type: dict
display_name:
description:
- A user-friendly name for the VNIC. Does not have to be unique.
Avoid entering confidential information.
type: str
aliases: ["name"]
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
type: dict
hostname_label:
description:
- The hostname for the VNIC's primary private IP.
See the `hostnameLabel` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
nsg_ids:
description:
- A list of the OCIDs of the network security groups (NSGs) to add the VNIC to. For more
information about NSGs, see
L(NetworkSecurityGroup,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/NetworkSecurityGroup/).
type: list
elements: str
private_ip:
description:
- A private IP address of your choice to assign to the VNIC.
See the `privateIp` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
skip_source_dest_check:
description:
- Whether the source/destination check is disabled on the VNIC.
See the `skipSourceDestCheck` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: bool
subnet_id:
description:
- The OCID of the subnet to create the VNIC in.
See the `subnetId` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
type: dict
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
- "Example: `My bare metal instance`"
type: str
aliases: ["name"]
extended_metadata:
description:
- Additional metadata key/value pairs that you provide. They serve the same purpose and
functionality as fields in the `metadata` object.
- They are distinguished from `metadata` fields in that these can be nested JSON objects
(whereas `metadata` fields are string/string maps only).
- The combined size of the `metadata` and `extendedMetadata` objects can be a maximum of
32,000 bytes.
type: dict
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
type: dict
ipxe_script:
description:
- This is an advanced option.
- When a bare metal or virtual machine
instance boots, the iPXE firmware that runs on the instance is
configured to run an iPXE script to continue the boot process.
- If you want more control over the boot process, you can provide
your own custom iPXE script that will run when the instance boots;
however, you should be aware that the same iPXE script will run
every time an instance boots; not only after the initial
LaunchInstance call.
- "The default iPXE script connects to the instance's local boot
volume over iSCSI and performs a network boot. If you use a custom iPXE
script and want to network-boot from the instance's local boot volume
over iSCSI the same way as the default iPXE script, you should use the
following iSCSI IP address: 169.254.0.2, and boot volume IQN:
iqn.2015-02.oracle.boot."
- For more information about the Bring Your Own Image feature of
Oracle Cloud Infrastructure, see
L(Bring Your Own Image,https://docs.cloud.oracle.com/iaas/Content/Compute/References/bringyourownimage.htm).
- For more information about iPXE, see http://ipxe.org.
type: str
metadata:
description:
- Custom metadata key/value pairs that you provide, such as the SSH public key
required to connect to the instance.
- "A metadata service runs on every launched instance. The service is an HTTP
endpoint listening on 169.254.169.254. You can use the service to:"
- "* Provide information to L(Cloud-Init,https://cloudinit.readthedocs.org/en/latest/)
to be used for various system initialization tasks."
- "* Get information about the instance, including the custom metadata that you
provide when you launch the instance."
- "**Providing Cloud-Init Metadata**"
- "You can use the following metadata key names to provide information to
Cloud-Init:"
- "**\\"ssh_authorized_keys\\"** - Provide one or more public SSH keys to be
included in the `~/.ssh/authorized_keys` file for the default user on the
instance. Use a newline character to separate multiple keys. The SSH
keys must be in the format necessary for the `authorized_keys` file, as shown
in the example below."
- "**\\"user_data\\"** - Provide your own base64-encoded data to be used by
Cloud-Init to run custom scripts or provide custom Cloud-Init configuration. For
information about how to take advantage of user data, see the
L(Cloud-Init Documentation,http://cloudinit.readthedocs.org/en/latest/topics/format.html)."
- "**Metadata Example**"
- " \\"metadata\\" : {
\\"quake_bot_level\\" : \\"Severe\\",
\\"ssh_authorized_keys\\" : \\"ssh-rsa <your_public_SSH_key>== rsa-key-20160227\\",
\\"user_data\\" : \\"<your_public_SSH_key>==\\"
}
**Getting Metadata on the Instance**"
- "To get information about your instance, connect to the instance using SSH and issue any of the
following GET requests:"
- " curl -H \\"Authorization: Bearer Oracle\\" http://169.254.169.254/opc/v2/instance/
curl -H \\"Authorization: Bearer Oracle\\" http://169.254.169.254/opc/v2/instance/metadata/
curl -H \\"Authorization: Bearer Oracle\\" http://169.254.169.254/opc/v2/instance/metadata/<any-key-name>"
- You'll get back a response that includes all the instance information; only the metadata information; or
the metadata information for the specified key name, respectively.
- The combined size of the `metadata` and `extendedMetadata` objects can be a maximum of 32,000 bytes.
type: dict
shape:
description:
- The shape of an instance. The shape determines the number of CPUs, amount of memory,
and other resources allocated to the instance.
- You can enumerate all available shapes by calling L(ListShapes,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/Shape/ListShapes).
type: str
shape_config:
description:
- ""
type: dict
suboptions:
ocpus:
description:
- The total number of OCPUs available to the instance.
type: float
memory_in_gbs:
description:
- The total amount of memory available to the instance, in gigabytes.
type: float
baseline_ocpu_utilization:
description:
- The baseline OCPU utilization for a subcore burstable VM instance. Leave this attribute blank for a
non-burstable instance, or explicitly specify non-burstable with `BASELINE_1_1`.
- "The following values are supported:
- `BASELINE_1_8` - baseline usage is 1/8 of an OCPU.
- `BASELINE_1_2` - baseline usage is 1/2 of an OCPU.
- `BASELINE_1_1` - baseline usage is an entire OCPU. This represents a non-burstable instance."
type: str
choices:
- "BASELINE_1_8"
- "BASELINE_1_2"
- "BASELINE_1_1"
platform_config:
description:
- ""
type: dict
suboptions:
type:
description:
- The type of platform being configured.
type: str
choices:
- "AMD_MILAN_BM"
- "INTEL_VM"
- "AMD_ROME_BM"
- "INTEL_SKYLAKE_BM"
- "AMD_VM"
required: true
is_secure_boot_enabled:
description:
- Whether Secure Boot is enabled on the instance.
type: bool
is_trusted_platform_module_enabled:
description:
- Whether the Trusted Platform Module (TPM) is enabled on the instance.
type: bool
is_measured_boot_enabled:
description:
- Whether the Measured Boot feature is enabled on the instance.
type: bool
numa_nodes_per_socket:
description:
- The number of NUMA nodes per socket.
- Applicable when type is 'AMD_MILAN_BM'
type: str
choices:
- "NPS0"
- "NPS1"
- "NPS2"
- "NPS4"
source_details:
description:
- ""
type: dict
suboptions:
source_type:
description:
- The source type for the instance.
Use `image` when specifying the image OCID. Use `bootVolume` when specifying
the boot volume OCID.
type: str
choices:
- "image"
- "bootVolume"
required: true
boot_volume_size_in_gbs:
description:
- The size of the boot volume in GBs. The minimum value is 50 GB and the maximum
value is 32,768 GB (32 TB).
- Applicable when source_type is 'image'
type: int
image_id:
description:
- The OCID of the image used to boot the instance.
- Applicable when source_type is 'image'
type: str
boot_volume_id:
description:
- The OCID of the boot volume used to boot the instance.
- Applicable when source_type is 'bootVolume'
type: str
fault_domain:
description:
- A fault domain is a grouping of hardware and infrastructure within an availability domain.
Each availability domain contains three fault domains. Fault domains let you distribute your
instances so that they are not on the same physical hardware within a single availability domain.
A hardware failure or Compute hardware maintenance that affects one fault domain does not affect
instances in other fault domains.
- If you do not specify the fault domain, the system selects one for you.
- To get a list of fault domains, use the
L(ListFaultDomains,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/identity/20160918/FaultDomain/ListFaultDomains) operation in the
Identity and Access Management Service API.
- "Example: `FAULT-DOMAIN-1`"
type: str
dedicated_vm_host_id:
description:
- The OCID of dedicated VM host.
- Dedicated VM hosts can be used when launching individual instances from an instance configuration. They
cannot be used to launch instance pools.
type: str
launch_mode:
description:
- "Specifies the configuration mode for launching virtual machine (VM) instances. The configuration modes are:
* `NATIVE` - VM instances launch with iSCSI boot and VFIO devices. The default value for platform images.
* `EMULATED` - VM instances launch with emulated devices, such as the E1000 network driver and emulated SCSI disk controller.
* `PARAVIRTUALIZED` - VM instances launch with paravirtualized devices using VirtIO drivers.
* `CUSTOM` - VM instances launch with custom configuration settings specified in the `LaunchOptions` parameter."
type: str
choices:
- "NATIVE"
- "EMULATED"
- "PARAVIRTUALIZED"
- "CUSTOM"
launch_options:
description:
- ""
type: dict
suboptions:
boot_volume_type:
description:
- "Emulation type for the boot volume.
* `ISCSI` - ISCSI attached block storage device.
* `SCSI` - Emulated SCSI disk.
* `IDE` - Emulated IDE disk.
* `VFIO` - Direct attached Virtual Function storage. This is the default option for local data
volumes on platform images.
* `PARAVIRTUALIZED` - Paravirtualized disk. This is the default for boot volumes and remote block
storage volumes on platform images."
type: str
choices:
- "ISCSI"
- "SCSI"
- "IDE"
- "VFIO"
- "PARAVIRTUALIZED"
firmware:
description:
- "Firmware used to boot VM. Select the option that matches your operating system.
* `BIOS` - Boot VM using BIOS style firmware. This is compatible with both 32 bit and 64 bit operating
systems that boot using MBR style bootloaders.
* `UEFI_64` - Boot VM using UEFI style firmware compatible with 64 bit operating systems. This is the
default for platform images."
type: str
choices:
- "BIOS"
- "UEFI_64"
network_type:
description:
- "Emulation type for the physical network interface card (NIC).
* `E1000` - Emulated Gigabit ethernet controller. Compatible with Linux e1000 network driver.
* `VFIO` - Direct attached Virtual Function network controller. This is the networking type
when you launch an instance using hardware-assisted (SR-IOV) networking.
* `PARAVIRTUALIZED` - VM instances launch with paravirtualized devices using VirtIO drivers."
type: str
choices:
- "E1000"
- "VFIO"
- "PARAVIRTUALIZED"
remote_data_volume_type:
description:
- "Emulation type for volume.
* `ISCSI` - ISCSI attached block storage device.
* `SCSI` - Emulated SCSI disk.
* `IDE` - Emulated IDE disk.
* `VFIO` - Direct attached Virtual Function storage. This is the default option for local data
volumes on platform images.
* `PARAVIRTUALIZED` - Paravirtualized disk. This is the default for boot volumes and remote block
storage volumes on platform images."
type: str
choices:
- "ISCSI"
- "SCSI"
- "IDE"
- "VFIO"
- "PARAVIRTUALIZED"
is_pv_encryption_in_transit_enabled:
description:
- Deprecated. Instead use `isPvEncryptionInTransitEnabled` in
L(InstanceConfigurationLaunchInstanceDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/datatypes/InstanceConfigurationLaunchInstanceDetails).
type: bool
is_consistent_volume_naming_enabled:
description:
- Whether to enable consistent volume naming feature. Defaults to false.
type: bool
agent_config:
description:
- ""
type: dict
suboptions:
is_monitoring_disabled:
description:
- Whether Oracle Cloud Agent can gather performance metrics and monitor the instance using the
monitoring plugins. Default value is false (monitoring plugins are enabled).
- "These are the monitoring plugins: Compute Instance Monitoring
and Custom Logs Monitoring."
- The monitoring plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- "- If `isMonitoringDisabled` is true, all of the monitoring plugins are disabled, regardless of
the per-plugin configuration.
- If `isMonitoringDisabled` is false, all of the monitoring plugins are enabled. You
can optionally disable individual monitoring plugins by providing a value in the `pluginsConfig`
object."
type: bool
is_management_disabled:
description:
- Whether Oracle Cloud Agent can run all the available management plugins.
Default value is false (management plugins are enabled).
- "These are the management plugins: OS Management Service Agent and Compute Instance
Run Command."
- The management plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- "- If `isManagementDisabled` is true, all of the management plugins are disabled, regardless of
the per-plugin configuration.
- If `isManagementDisabled` is false, all of the management plugins are enabled. You
can optionally disable individual management plugins by providing a value in the `pluginsConfig`
object."
type: bool
are_all_plugins_disabled:
description:
- Whether Oracle Cloud Agent can run all the available plugins.
This includes the management and monitoring plugins.
- To get a list of available plugins, use the
L(ListInstanceagentAvailablePlugins,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/instanceagent/20180530/Plugin/ListInstanceagentAvailablePlugins)
operation in the Oracle Cloud Agent API. For more information about the available plugins, see
L(Managing Plugins with Oracle Cloud Agent,https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/manage-plugins.htm).
type: bool
plugins_config:
description:
- The configuration of plugins associated with this instance.
type: list
elements: dict
suboptions:
name:
description:
- The plugin name. To get a list of available plugins, use the
L(ListInstanceagentAvailablePlugins,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/instanceagent/20180530/Plugin/ListInstanceagentAvailablePlugins)
operation in the Oracle Cloud Agent API. For more information about the available plugins, see
L(Managing Plugins with Oracle Cloud Agent,https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/manage-plugins.htm).
type: str
required: true
desired_state:
description:
- Whether the plugin should be enabled or disabled.
- To enable the monitoring and management plugins, the `isMonitoringDisabled` and
`isManagementDisabled` attributes must also be set to false.
type: str
choices:
- "ENABLED"
- "DISABLED"
required: true
is_pv_encryption_in_transit_enabled:
description:
- Whether to enable in-transit encryption for the data volume's paravirtualized attachment. The default value is false.
type: bool
preferred_maintenance_action:
description:
- "The preferred maintenance action for an instance. The default is LIVE_MIGRATE, if live migration is supported.
* `LIVE_MIGRATE` - Run maintenance using a live migration.
* `REBOOT` - Run maintenance using a reboot."
type: str
choices:
- "LIVE_MIGRATE"
- "REBOOT"
instance_options:
description:
- ""
type: dict
suboptions:
are_legacy_imds_endpoints_disabled:
description:
- Whether to disable the legacy (/v1) instance metadata service endpoints.
Customers who have migrated to /v2 should set this to true for added security.
Default is false.
type: bool
availability_config:
description:
- ""
type: dict
suboptions:
recovery_action:
description:
- "The lifecycle state for an instance when it is recovered after infrastructure maintenance.
* `RESTORE_INSTANCE` - The instance is restored to the lifecycle state it was in before the maintenance event.
If the instance was running, it is automatically rebooted. This is the default action when a value is not set.
* `STOP_INSTANCE` - The instance is recovered in the stopped state."
type: str
choices:
- "RESTORE_INSTANCE"
- "STOP_INSTANCE"
preemptible_instance_config:
description:
- ""
type: dict
suboptions:
preemption_action:
description:
- ""
type: dict
required: true
suboptions:
type:
description:
- The type of action to run when the instance is interrupted for eviction.
type: str
choices:
- "TERMINATE"
required: true
preserve_boot_volume:
description:
- Whether to preserve the boot volume that was used to launch the preemptible instance when the instance is terminated.
Defaults to false if not specified.
type: bool
secondary_vnics:
description:
- ""
- Applicable only for I(action=launch).
type: list
elements: dict
suboptions:
create_vnic_details:
description:
- ""
type: dict
suboptions:
assign_public_ip:
description:
- Whether the VNIC should be assigned a public IP address. See the `assignPublicIp` attribute of
L(CreateVnicDetails,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/CreateVnicDetails/)
for more information.
type: bool
assign_private_dns_record:
description:
- Whether the VNIC should be assigned a private DNS record. See the `assignPrivateDnsRecord` attribute of
L(CreateVnicDetails,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/CreateVnicDetails/)
for more information.
type: bool
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
type: dict
display_name:
description:
- A user-friendly name for the VNIC. Does not have to be unique.
Avoid entering confidential information.
type: str
aliases: ["name"]
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
type: dict
hostname_label:
description:
- The hostname for the VNIC's primary private IP.
See the `hostnameLabel` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
nsg_ids:
description:
- A list of the OCIDs of the network security groups (NSGs) to add the VNIC to. For more
information about NSGs, see
L(NetworkSecurityGroup,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/NetworkSecurityGroup/).
type: list
elements: str
private_ip:
description:
- A private IP address of your choice to assign to the VNIC.
See the `privateIp` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
skip_source_dest_check:
description:
- Whether the source/destination check is disabled on the VNIC.
See the `skipSourceDestCheck` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: bool
subnet_id:
description:
- The OCID of the subnet to create the VNIC in.
See the `subnetId` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
display_name:
description:
- A user-friendly name for the attachment. Does not have to be unique, and it cannot be changed.
type: str
aliases: ["name"]
nic_index:
description:
- Which physical network interface card (NIC) the VNIC will use. Defaults to 0.
Certain bare metal instance shapes have two active physical NICs (0 and 1). If
you add a secondary VNIC to one of these instances, you can specify which NIC
the VNIC will use. For more information, see
L(Virtual Network Interface Cards (VNICs),https://docs.cloud.oracle.com/iaas/Content/Network/Tasks/managingVNICs.htm).
type: int
action:
description:
- The action to perform on the InstanceConfiguration.
type: str
required: true
choices:
- "change_compartment"
- "launch"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action change_compartment on instance_configuration
oci_compute_management_instance_configuration_actions:
compartment_id: "ocid1.compartment.oc1..unique_ID"
instance_configuration_id: "ocid1.instanceconfiguration.oc1..xxxxxxEXAMPLExxxxxx"
action: "change_compartment"
- name: Perform action launch on instance_configuration
oci_compute_management_instance_configuration_actions:
instance_configuration_id: "ocid1.instanceconfiguration.oc1..xxxxxxEXAMPLExxxxxx"
instance_type: compute
action: launch
"""
RETURN = """
instance:
description:
- Details of the InstanceConfiguration resource acted upon by the current operation
returned: on success
type: complex
contains:
availability_domain:
description:
- The availability domain the instance is running in.
- "Example: `Uocm:PHX-AD-1`"
returned: on success
type: str
sample: Uocm:PHX-AD-1
capacity_reservation_id:
description:
- The OCID of the compute capacity reservation this instance is launched under.
When this field contains an empty string or is null, the instance is not currently in a capacity reservation.
For more information, see L(Capacity Reservations,https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/reserve-capacity.htm#default).
returned: on success
type: str
sample: "ocid1.capacityreservation.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The OCID of the compartment that contains the instance.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
dedicated_vm_host_id:
description:
- The OCID of dedicated VM host.
returned: on success
type: str
sample: "ocid1.dedicatedvmhost.oc1..xxxxxxEXAMPLExxxxxx"
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
- "Example: `My bare metal instance`"
returned: on success
type: str
sample: My bare metal instance
extended_metadata:
description:
- Additional metadata key/value pairs that you provide. They serve the same purpose and functionality
as fields in the `metadata` object.
- They are distinguished from `metadata` fields in that these can be nested JSON objects (whereas `metadata`
fields are string/string maps only).
returned: on success
type: dict
sample: {}
fault_domain:
description:
- The name of the fault domain the instance is running in.
- A fault domain is a grouping of hardware and infrastructure within an availability domain.
Each availability domain contains three fault domains. Fault domains let you distribute your
instances so that they are not on the same physical hardware within a single availability domain.
A hardware failure or Compute hardware maintenance that affects one fault domain does not affect
instances in other fault domains.
- If you do not specify the fault domain, the system selects one for you.
- "Example: `FAULT-DOMAIN-1`"
returned: on success
type: str
sample: FAULT-DOMAIN-1
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
id:
description:
- The OCID of the instance.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
image_id:
description:
- Deprecated. Use `sourceDetails` instead.
returned: on success
type: str
sample: "ocid1.image.oc1..xxxxxxEXAMPLExxxxxx"
ipxe_script:
description:
- When a bare metal or virtual machine
instance boots, the iPXE firmware that runs on the instance is
configured to run an iPXE script to continue the boot process.
- If you want more control over the boot process, you can provide
your own custom iPXE script that will run when the instance boots;
however, you should be aware that the same iPXE script will run
every time an instance boots; not only after the initial
LaunchInstance call.
- "The default iPXE script connects to the instance's local boot
volume over iSCSI and performs a network boot. If you use a custom iPXE
script and want to network-boot from the instance's local boot volume
over iSCSI the same way as the default iPXE script, you should use the
following iSCSI IP address: 169.254.0.2, and boot volume IQN:
iqn.2015-02.oracle.boot."
- For more information about the Bring Your Own Image feature of
Oracle Cloud Infrastructure, see
L(Bring Your Own Image,https://docs.cloud.oracle.com/iaas/Content/Compute/References/bringyourownimage.htm).
- For more information about iPXE, see http://ipxe.org.
returned: on success
type: str
sample: ipxe_script_example
launch_mode:
description:
- "Specifies the configuration mode for launching virtual machine (VM) instances. The configuration modes are:
* `NATIVE` - VM instances launch with iSCSI boot and VFIO devices. The default value for platform images.
* `EMULATED` - VM instances launch with emulated devices, such as the E1000 network driver and emulated SCSI disk controller.
* `PARAVIRTUALIZED` - VM instances launch with paravirtualized devices using VirtIO drivers.
* `CUSTOM` - VM instances launch with custom configuration settings specified in the `LaunchOptions` parameter."
returned: on success
type: str
sample: NATIVE
launch_options:
description:
- ""
returned: on success
type: complex
contains:
boot_volume_type:
description:
- "Emulation type for the boot volume.
* `ISCSI` - ISCSI attached block storage device.
* `SCSI` - Emulated SCSI disk.
* `IDE` - Emulated IDE disk.
* `VFIO` - Direct attached Virtual Function storage. This is the default option for local data
volumes on platform images.
* `PARAVIRTUALIZED` - Paravirtualized disk. This is the default for boot volumes and remote block
storage volumes on platform images."
returned: on success
type: str
sample: ISCSI
firmware:
description:
- "Firmware used to boot VM. Select the option that matches your operating system.
* `BIOS` - Boot VM using BIOS style firmware. This is compatible with both 32 bit and 64 bit operating
systems that boot using MBR style bootloaders.
* `UEFI_64` - Boot VM using UEFI style firmware compatible with 64 bit operating systems. This is the
default for platform images."
returned: on success
type: str
sample: BIOS
network_type:
description:
- "Emulation type for the physical network interface card (NIC).
* `E1000` - Emulated Gigabit ethernet controller. Compatible with Linux e1000 network driver.
* `VFIO` - Direct attached Virtual Function network controller. This is the networking type
when you launch an instance using hardware-assisted (SR-IOV) networking.
* `PARAVIRTUALIZED` - VM instances launch with paravirtualized devices using VirtIO drivers."
returned: on success
type: str
sample: E1000
remote_data_volume_type:
description:
- "Emulation type for volume.
* `ISCSI` - ISCSI attached block storage device.
* `SCSI` - Emulated SCSI disk.
* `IDE` - Emulated IDE disk.
* `VFIO` - Direct attached Virtual Function storage. This is the default option for local data
volumes on platform images.
* `PARAVIRTUALIZED` - Paravirtualized disk. This is the default for boot volumes and remote block
storage volumes on platform images."
returned: on success
type: str
sample: ISCSI
is_pv_encryption_in_transit_enabled:
description:
- Deprecated. Instead use `isPvEncryptionInTransitEnabled` in
L(LaunchInstanceDetails,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/datatypes/LaunchInstanceDetails).
returned: on success
type: bool
sample: true
is_consistent_volume_naming_enabled:
description:
- Whether to enable consistent volume naming feature. Defaults to false.
returned: on success
type: bool
sample: true
instance_options:
description:
- ""
returned: on success
type: complex
contains:
are_legacy_imds_endpoints_disabled:
description:
- Whether to disable the legacy (/v1) instance metadata service endpoints.
Customers who have migrated to /v2 should set this to true for added security.
Default is false.
returned: on success
type: bool
sample: true
availability_config:
description:
- ""
returned: on success
type: complex
contains:
is_live_migration_preferred:
description:
- Whether to live migrate supported VM instances to a healthy physical VM host without
disrupting running instances during infrastructure maintenance events. If null, Oracle
chooses the best option for migrating the VM during infrastructure maintenance events.
returned: on success
type: bool
sample: true
recovery_action:
description:
- "The lifecycle state for an instance when it is recovered after infrastructure maintenance.
* `RESTORE_INSTANCE` - The instance is restored to the lifecycle state it was in before the maintenance event.
If the instance was running, it is automatically rebooted. This is the default action when a value is not set.
* `STOP_INSTANCE` - The instance is recovered in the stopped state."
returned: on success
type: str
sample: RESTORE_INSTANCE
preemptible_instance_config:
description:
- ""
returned: on success
type: complex
contains:
preemption_action:
description:
- ""
returned: on success
type: complex
contains:
type:
description:
- The type of action to run when the instance is interrupted for eviction.
returned: on success
type: str
sample: TERMINATE
preserve_boot_volume:
description:
- Whether to preserve the boot volume that was used to launch the preemptible instance when the instance is terminated. Defaults
to false if not specified.
returned: on success
type: bool
sample: true
lifecycle_state:
description:
- The current state of the instance.
returned: on success
type: str
sample: MOVING
metadata:
description:
- Custom metadata that you provide.
returned: on success
type: dict
sample: {}
region:
description:
- The region that contains the availability domain the instance is running in.
- For the us-phoenix-1 and us-ashburn-1 regions, `phx` and `iad` are returned, respectively.
For all other regions, the full region name is returned.
- "Examples: `phx`, `eu-frankfurt-1`"
returned: on success
type: str
sample: region_example
shape:
description:
- The shape of the instance. The shape determines the number of CPUs and the amount of memory
allocated to the instance. You can enumerate all available shapes by calling
L(ListShapes,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/Shape/ListShapes).
returned: on success
type: str
sample: shape_example
shape_config:
description:
- ""
returned: on success
type: complex
contains:
ocpus:
description:
- The total number of OCPUs available to the instance.
returned: on success
type: float
sample: 3.4
memory_in_gbs:
description:
- The total amount of memory available to the instance, in gigabytes.
returned: on success
type: float
sample: 3.4
baseline_ocpu_utilization:
description:
- The baseline OCPU utilization for a subcore burstable VM instance. Leave this attribute blank for a
non-burstable instance, or explicitly specify non-burstable with `BASELINE_1_1`.
- "The following values are supported:
- `BASELINE_1_8` - baseline usage is 1/8 of an OCPU.
- `BASELINE_1_2` - baseline usage is 1/2 of an OCPU.
- `BASELINE_1_1` - baseline usage is the entire OCPU. This represents a non-burstable instance."
returned: on success
type: str
sample: BASELINE_1_8
processor_description:
description:
- A short description of the instance's processor (CPU).
returned: on success
type: str
sample: processor_description_example
networking_bandwidth_in_gbps:
description:
- The networking bandwidth available to the instance, in gigabits per second.
returned: on success
type: float
sample: 3.4
max_vnic_attachments:
description:
- The maximum number of VNIC attachments for the instance.
returned: on success
type: int
sample: 56
gpus:
description:
- The number of GPUs available to the instance.
returned: on success
type: int
sample: 56
gpu_description:
description:
- A short description of the instance's graphics processing unit (GPU).
- If the instance does not have any GPUs, this field is `null`.
returned: on success
type: str
sample: gpu_description_example
local_disks:
description:
- The number of local disks available to the instance.
returned: on success
type: int
sample: 56
local_disks_total_size_in_gbs:
description:
- The aggregate size of all local disks, in gigabytes.
- If the instance does not have any local disks, this field is `null`.
returned: on success
type: float
sample: 3.4
local_disk_description:
description:
- A short description of the local disks available to this instance.
- If the instance does not have any local disks, this field is `null`.
returned: on success
type: str
sample: local_disk_description_example
source_details:
description:
- ""
returned: on success
type: complex
contains:
source_type:
description:
- The source type for the instance.
Use `image` when specifying the image OCID. Use `bootVolume` when specifying
the boot volume OCID.
returned: on success
type: str
sample: bootVolume
boot_volume_id:
description:
- The OCID of the boot volume used to boot the instance.
returned: on success
type: str
sample: "ocid1.bootvolume.oc1..xxxxxxEXAMPLExxxxxx"
boot_volume_size_in_gbs:
description:
- The size of the boot volume in GBs. Minimum value is 50 GB and maximum value is 32,768 GB (32 TB).
returned: on success
type: int
sample: 56
image_id:
description:
- The OCID of the image used to boot the instance.
returned: on success
type: str
sample: "ocid1.image.oc1..xxxxxxEXAMPLExxxxxx"
kms_key_id:
description:
- The OCID of the Key Management key to assign as the master encryption key for the boot volume.
returned: on success
type: str
sample: "ocid1.kmskey.oc1..xxxxxxEXAMPLExxxxxx"
system_tags:
description:
- "System tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {}
time_created:
description:
- The date and time the instance was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2016-08-25T21:10:29.600Z"
agent_config:
description:
- ""
returned: on success
type: complex
contains:
is_monitoring_disabled:
description:
- Whether Oracle Cloud Agent can gather performance metrics and monitor the instance using the
monitoring plugins.
- "These are the monitoring plugins: Compute Instance Monitoring
and Custom Logs Monitoring."
- The monitoring plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- "- If `isMonitoringDisabled` is true, all of the monitoring plugins are disabled, regardless of
the per-plugin configuration.
- If `isMonitoringDisabled` is false, all of the monitoring plugins are enabled. You
can optionally disable individual monitoring plugins by providing a value in the `pluginsConfig`
object."
returned: on success
type: bool
sample: true
is_management_disabled:
description:
- Whether Oracle Cloud Agent can run all the available management plugins.
- "These are the management plugins: OS Management Service Agent and Compute Instance
Run Command."
- The management plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- "- If `isManagementDisabled` is true, all of the management plugins are disabled, regardless of
the per-plugin configuration.
- If `isManagementDisabled` is false, all of the management plugins are enabled. You
can optionally disable individual management plugins by providing a value in the `pluginsConfig`
object."
returned: on success
type: bool
sample: true
are_all_plugins_disabled:
description:
- Whether Oracle Cloud Agent can run all of the available plugins.
This includes the management and monitoring plugins.
- For more information about the available plugins, see
L(Managing Plugins with Oracle Cloud Agent,https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/manage-plugins.htm).
returned: on success
type: bool
sample: true
plugins_config:
description:
- The configuration of plugins associated with this instance.
returned: on success
type: complex
contains:
name:
description:
- The plugin name. To get a list of available plugins, use the
L(ListInstanceagentAvailablePlugins,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/instanceagent/20180530/Plugin/ListInstanceagentAvailablePlugins)
operation in the Oracle Cloud Agent API. For more information about the available plugins, see
L(Managing Plugins with Oracle Cloud Agent,https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/manage-plugins.htm).
returned: on success
type: str
sample: name_example
desired_state:
description:
- Whether the plugin should be enabled or disabled.
- To enable the monitoring and management plugins, the `isMonitoringDisabled` and
`isManagementDisabled` attributes must also be set to false.
returned: on success
type: str
sample: ENABLED
time_maintenance_reboot_due:
description:
- "The date and time the instance is expected to be stopped / started, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
After that time if instance hasn't been rebooted, Oracle will reboot the instance within 24 hours of the due time.
Regardless of how the instance was stopped, the flag will be reset to empty as soon as instance reaches Stopped state.
Example: `2018-05-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2018-05-25T21:10:29.600Z"
platform_config:
description:
- ""
returned: on success
type: complex
contains:
type:
description:
- The type of platform being configured.
returned: on success
type: str
sample: AMD_MILAN_BM
is_secure_boot_enabled:
description:
- Whether Secure Boot is enabled on the instance.
returned: on success
type: bool
sample: true
is_trusted_platform_module_enabled:
description:
- Whether the Trusted Platform Module (TPM) is enabled on the instance.
returned: on success
type: bool
sample: true
is_measured_boot_enabled:
description:
- Whether the Measured Boot feature is enabled on the instance.
returned: on success
type: bool
sample: true
numa_nodes_per_socket:
description:
- The number of NUMA nodes per socket (NPS).
returned: on success
type: str
sample: NPS0
sample: {
"availability_domain": "Uocm:PHX-AD-1",
"capacity_reservation_id": "ocid1.capacityreservation.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"dedicated_vm_host_id": "ocid1.dedicatedvmhost.oc1..xxxxxxEXAMPLExxxxxx",
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"display_name": "My bare metal instance",
"extended_metadata": {},
"fault_domain": "FAULT-DOMAIN-1",
"freeform_tags": {'Department': 'Finance'},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"image_id": "ocid1.image.oc1..xxxxxxEXAMPLExxxxxx",
"ipxe_script": "ipxe_script_example",
"launch_mode": "NATIVE",
"launch_options": {
"boot_volume_type": "ISCSI",
"firmware": "BIOS",
"network_type": "E1000",
"remote_data_volume_type": "ISCSI",
"is_pv_encryption_in_transit_enabled": true,
"is_consistent_volume_naming_enabled": true
},
"instance_options": {
"are_legacy_imds_endpoints_disabled": true
},
"availability_config": {
"is_live_migration_preferred": true,
"recovery_action": "RESTORE_INSTANCE"
},
"preemptible_instance_config": {
"preemption_action": {
"type": "TERMINATE",
"preserve_boot_volume": true
}
},
"lifecycle_state": "MOVING",
"metadata": {},
"region": "region_example",
"shape": "shape_example",
"shape_config": {
"ocpus": 3.4,
"memory_in_gbs": 3.4,
"baseline_ocpu_utilization": "BASELINE_1_8",
"processor_description": "processor_description_example",
"networking_bandwidth_in_gbps": 3.4,
"max_vnic_attachments": 56,
"gpus": 56,
"gpu_description": "gpu_description_example",
"local_disks": 56,
"local_disks_total_size_in_gbs": 3.4,
"local_disk_description": "local_disk_description_example"
},
"source_details": {
"source_type": "bootVolume",
"boot_volume_id": "ocid1.bootvolume.oc1..xxxxxxEXAMPLExxxxxx",
"boot_volume_size_in_gbs": 56,
"image_id": "ocid1.image.oc1..xxxxxxEXAMPLExxxxxx",
"kms_key_id": "ocid1.kmskey.oc1..xxxxxxEXAMPLExxxxxx"
},
"system_tags": {},
"time_created": "2016-08-25T21:10:29.600Z",
"agent_config": {
"is_monitoring_disabled": true,
"is_management_disabled": true,
"are_all_plugins_disabled": true,
"plugins_config": [{
"name": "name_example",
"desired_state": "ENABLED"
}]
},
"time_maintenance_reboot_due": "2018-05-25T21:10:29.600Z",
"platform_config": {
"type": "AMD_MILAN_BM",
"is_secure_boot_enabled": true,
"is_trusted_platform_module_enabled": true,
"is_measured_boot_enabled": true,
"numa_nodes_per_socket": "NPS0"
}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.work_requests import WorkRequestClient
from oci.core import ComputeManagementClient
from oci.core.models import ChangeInstanceConfigurationCompartmentDetails
from oci.core.models import InstanceConfigurationInstanceDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class InstanceConfigurationActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
change_compartment
launch
"""
def __init__(self, *args, **kwargs):
super(InstanceConfigurationActionsHelperGen, self).__init__(*args, **kwargs)
self.work_request_client = WorkRequestClient(
self.client._config, **self.client._kwargs
)
@staticmethod
def get_module_resource_id_param():
return "instance_configuration_id"
def get_module_resource_id(self):
return self.module.params.get("instance_configuration_id")
def get_get_fn(self):
return self.client.get_instance_configuration
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_instance_configuration,
instance_configuration_id=self.module.params.get(
"instance_configuration_id"
),
)
def get_response_field_name(self, action):
return "instance"
def change_compartment(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ChangeInstanceConfigurationCompartmentDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.change_instance_configuration_compartment,
call_fn_args=(),
call_fn_kwargs=dict(
instance_configuration_id=self.module.params.get(
"instance_configuration_id"
),
change_instance_configuration_compartment_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
def launch(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, InstanceConfigurationInstanceDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.launch_instance_configuration,
call_fn_args=(),
call_fn_kwargs=dict(
instance_configuration_id=self.module.params.get(
"instance_configuration_id"
),
instance_configuration=action_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.work_request_client,
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
InstanceConfigurationActionsHelperCustom = get_custom_class(
"InstanceConfigurationActionsHelperCustom"
)
class ResourceHelper(
InstanceConfigurationActionsHelperCustom, InstanceConfigurationActionsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
instance_configuration_id=dict(aliases=["id"], type="str", required=True),
compartment_id=dict(type="str"),
instance_type=dict(type="str", choices=["compute"]),
block_volumes=dict(
type="list",
elements="dict",
options=dict(
attach_details=dict(
type="dict",
options=dict(
display_name=dict(aliases=["name"], type="str"),
is_read_only=dict(type="bool"),
device=dict(type="str"),
is_shareable=dict(type="bool"),
type=dict(
type="str",
required=True,
choices=["iscsi", "paravirtualized"],
),
use_chap=dict(type="bool"),
is_pv_encryption_in_transit_enabled=dict(type="bool"),
),
),
create_details=dict(
type="dict",
options=dict(
availability_domain=dict(type="str"),
backup_policy_id=dict(type="str"),
compartment_id=dict(type="str"),
defined_tags=dict(type="dict"),
display_name=dict(aliases=["name"], type="str"),
freeform_tags=dict(type="dict"),
kms_key_id=dict(type="str"),
vpus_per_gb=dict(type="int"),
size_in_gbs=dict(type="int"),
source_details=dict(
type="dict",
options=dict(
type=dict(
type="str",
required=True,
choices=["volumeBackup", "volume"],
),
id=dict(type="str"),
),
),
),
),
volume_id=dict(type="str"),
),
),
launch_details=dict(
type="dict",
options=dict(
availability_domain=dict(type="str"),
capacity_reservation_id=dict(type="str"),
compartment_id=dict(type="str"),
create_vnic_details=dict(
type="dict",
options=dict(
assign_public_ip=dict(type="bool"),
assign_private_dns_record=dict(type="bool"),
defined_tags=dict(type="dict"),
display_name=dict(aliases=["name"], type="str"),
freeform_tags=dict(type="dict"),
hostname_label=dict(type="str"),
nsg_ids=dict(type="list", elements="str"),
private_ip=dict(type="str"),
skip_source_dest_check=dict(type="bool"),
subnet_id=dict(type="str"),
),
),
defined_tags=dict(type="dict"),
display_name=dict(aliases=["name"], type="str"),
extended_metadata=dict(type="dict"),
freeform_tags=dict(type="dict"),
ipxe_script=dict(type="str"),
metadata=dict(type="dict"),
shape=dict(type="str"),
shape_config=dict(
type="dict",
options=dict(
ocpus=dict(type="float"),
memory_in_gbs=dict(type="float"),
baseline_ocpu_utilization=dict(
type="str",
choices=[
"BASELINE_1_8",
"BASELINE_1_2",
"BASELINE_1_1",
],
),
),
),
platform_config=dict(
type="dict",
options=dict(
type=dict(
type="str",
required=True,
choices=[
"AMD_MILAN_BM",
"INTEL_VM",
"AMD_ROME_BM",
"INTEL_SKYLAKE_BM",
"AMD_VM",
],
),
is_secure_boot_enabled=dict(type="bool"),
is_trusted_platform_module_enabled=dict(type="bool"),
is_measured_boot_enabled=dict(type="bool"),
numa_nodes_per_socket=dict(
type="str", choices=["NPS0", "NPS1", "NPS2", "NPS4"]
),
),
),
source_details=dict(
type="dict",
options=dict(
source_type=dict(
type="str",
required=True,
choices=["image", "bootVolume"],
),
boot_volume_size_in_gbs=dict(type="int"),
image_id=dict(type="str"),
boot_volume_id=dict(type="str"),
),
),
fault_domain=dict(type="str"),
dedicated_vm_host_id=dict(type="str"),
launch_mode=dict(
type="str",
choices=["NATIVE", "EMULATED", "PARAVIRTUALIZED", "CUSTOM"],
),
launch_options=dict(
type="dict",
options=dict(
boot_volume_type=dict(
type="str",
choices=[
"ISCSI",
"SCSI",
"IDE",
"VFIO",
"PARAVIRTUALIZED",
],
),
firmware=dict(type="str", choices=["BIOS", "UEFI_64"]),
network_type=dict(
type="str", choices=["E1000", "VFIO", "PARAVIRTUALIZED"]
),
remote_data_volume_type=dict(
type="str",
choices=[
"ISCSI",
"SCSI",
"IDE",
"VFIO",
"PARAVIRTUALIZED",
],
),
is_pv_encryption_in_transit_enabled=dict(type="bool"),
is_consistent_volume_naming_enabled=dict(type="bool"),
),
),
agent_config=dict(
type="dict",
options=dict(
is_monitoring_disabled=dict(type="bool"),
is_management_disabled=dict(type="bool"),
are_all_plugins_disabled=dict(type="bool"),
plugins_config=dict(
type="list",
elements="dict",
options=dict(
name=dict(type="str", required=True),
desired_state=dict(
type="str",
required=True,
choices=["ENABLED", "DISABLED"],
),
),
),
),
),
is_pv_encryption_in_transit_enabled=dict(type="bool"),
preferred_maintenance_action=dict(
type="str", choices=["LIVE_MIGRATE", "REBOOT"]
),
instance_options=dict(
type="dict",
options=dict(
are_legacy_imds_endpoints_disabled=dict(type="bool")
),
),
availability_config=dict(
type="dict",
options=dict(
recovery_action=dict(
type="str",
choices=["RESTORE_INSTANCE", "STOP_INSTANCE"],
)
),
),
preemptible_instance_config=dict(
type="dict",
options=dict(
preemption_action=dict(
type="dict",
required=True,
options=dict(
type=dict(
type="str", required=True, choices=["TERMINATE"]
),
preserve_boot_volume=dict(type="bool"),
),
)
),
),
),
),
secondary_vnics=dict(
type="list",
elements="dict",
options=dict(
create_vnic_details=dict(
type="dict",
options=dict(
assign_public_ip=dict(type="bool"),
assign_private_dns_record=dict(type="bool"),
defined_tags=dict(type="dict"),
display_name=dict(aliases=["name"], type="str"),
freeform_tags=dict(type="dict"),
hostname_label=dict(type="str"),
nsg_ids=dict(type="list", elements="str"),
private_ip=dict(type="str"),
skip_source_dest_check=dict(type="bool"),
subnet_id=dict(type="str"),
),
),
display_name=dict(aliases=["name"], type="str"),
nic_index=dict(type="int"),
),
),
action=dict(
type="str", required=True, choices=["change_compartment", "launch"]
),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="instance_configuration",
service_client_class=ComputeManagementClient,
namespace="core",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| 52.056531 | 160 | 0.49155 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_compute_management_instance_configuration_actions
short_description: Perform actions on an InstanceConfiguration resource in Oracle Cloud Infrastructure
description:
- Perform actions on an InstanceConfiguration resource in Oracle Cloud Infrastructure
- "For I(action=change_compartment), moves an instance configuration into a different compartment within the same tenancy.
For information about moving resources between compartments, see
L(Moving Resources to a Different Compartment,https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes).
When you move an instance configuration to a different compartment, associated resources such as
instance pools are not moved.
**Important:** Most of the properties for an existing instance configuration, including the compartment,
cannot be modified after you create the instance configuration. Although you can move an instance configuration
to a different compartment, you will not be able to use the instance configuration to manage instance pools
in the new compartment. If you want to update an instance configuration to point to a different compartment,
you should instead create a new instance configuration in the target compartment using
L(CreateInstanceConfiguration,https://docs.cloud.oracle.com/iaas/api/#/en/iaas/20160918/InstanceConfiguration/CreateInstanceConfiguration)."
- For I(action=launch), launches an instance from an instance configuration.
If the instance configuration does not include all of the parameters that are
required to launch an instance, such as the availability domain and subnet ID, you must
provide these parameters when you launch an instance from the instance configuration.
For more information, see the L(InstanceConfiguration,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/InstanceConfiguration/)
resource.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
instance_configuration_id:
description:
- The OCID of the instance configuration.
type: str
aliases: ["id"]
required: true
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to
move the instance configuration to.
- Required for I(action=change_compartment).
type: str
instance_type:
description:
- The type of instance details. Supported instanceType is compute
- Required for I(action=launch).
type: str
choices:
- "compute"
block_volumes:
description:
- ""
- Applicable only for I(action=launch).
type: list
elements: dict
suboptions:
attach_details:
description:
- ""
type: dict
suboptions:
display_name:
description:
- A user-friendly name. Does not have to be unique, and it cannot be changed. Avoid entering confidential information.
type: str
aliases: ["name"]
is_read_only:
description:
- Whether the attachment should be created in read-only mode.
type: bool
device:
description:
- The device name.
type: str
is_shareable:
description:
- Whether the attachment should be created in shareable mode. If an attachment
is created in shareable mode, then other instances can attach the same volume, provided
that they also create their attachments in shareable mode. Only certain volume types can
be attached in shareable mode. Defaults to false if not specified.
type: bool
type:
description:
- "The type of volume. The only supported values are \\"iscsi\\" and \\"paravirtualized\\"."
type: str
choices:
- "iscsi"
- "paravirtualized"
required: true
use_chap:
description:
- Whether to use CHAP authentication for the volume attachment. Defaults to false.
- Applicable when type is 'iscsi'
type: bool
is_pv_encryption_in_transit_enabled:
description:
- Whether to enable in-transit encryption for the data volume's paravirtualized attachment. The default value is false.
- Applicable when type is 'paravirtualized'
type: bool
create_details:
description:
- ""
type: dict
suboptions:
availability_domain:
description:
- The availability domain of the volume.
- "Example: `Uocm:PHX-AD-1`"
type: str
backup_policy_id:
description:
- If provided, specifies the ID of the volume backup policy to assign to the newly
created volume. If omitted, no policy will be assigned.
type: str
compartment_id:
description:
- The OCID of the compartment that contains the volume.
type: str
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
type: dict
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
type: str
aliases: ["name"]
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
type: dict
kms_key_id:
description:
- The OCID of the Key Management key to assign as the master encryption key
for the volume.
type: str
vpus_per_gb:
description:
- The number of volume performance units (VPUs) that will be applied to this volume per GB,
representing the Block Volume service's elastic performance options.
See L(Block Volume Elastic
Performance,https://docs.cloud.oracle.com/iaas/Content/Block/Concepts/blockvolumeelasticperformance.htm) for more information.
- "Allowed values:"
- " * `0`: Represents Lower Cost option."
- " * `10`: Represents Balanced option."
- " * `20`: Represents Higher Performance option."
type: int
size_in_gbs:
description:
- The size of the volume in GBs.
type: int
source_details:
description:
- ""
type: dict
suboptions:
type:
description:
- ""
type: str
choices:
- "volumeBackup"
- "volume"
required: true
id:
description:
- The OCID of the volume backup.
type: str
volume_id:
description:
- The OCID of the volume.
type: str
launch_details:
description:
- ""
- Applicable only for I(action=launch).
type: dict
suboptions:
availability_domain:
description:
- The availability domain of the instance.
- "Example: `Uocm:PHX-AD-1`"
type: str
capacity_reservation_id:
description:
- The OCID of the compute capacity reservation this instance is launched under.
type: str
compartment_id:
description:
- The OCID of the compartment.
type: str
create_vnic_details:
description:
- ""
type: dict
suboptions:
assign_public_ip:
description:
- Whether the VNIC should be assigned a public IP address. See the `assignPublicIp` attribute of
L(CreateVnicDetails,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/CreateVnicDetails/)
for more information.
type: bool
assign_private_dns_record:
description:
- Whether the VNIC should be assigned a private DNS record. See the `assignPrivateDnsRecord` attribute of
L(CreateVnicDetails,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/CreateVnicDetails/)
for more information.
type: bool
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
type: dict
display_name:
description:
- A user-friendly name for the VNIC. Does not have to be unique.
Avoid entering confidential information.
type: str
aliases: ["name"]
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
type: dict
hostname_label:
description:
- The hostname for the VNIC's primary private IP.
See the `hostnameLabel` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
nsg_ids:
description:
- A list of the OCIDs of the network security groups (NSGs) to add the VNIC to. For more
information about NSGs, see
L(NetworkSecurityGroup,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/NetworkSecurityGroup/).
type: list
elements: str
private_ip:
description:
- A private IP address of your choice to assign to the VNIC.
See the `privateIp` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
skip_source_dest_check:
description:
- Whether the source/destination check is disabled on the VNIC.
See the `skipSourceDestCheck` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: bool
subnet_id:
description:
- The OCID of the subnet to create the VNIC in.
See the `subnetId` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
type: dict
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
- "Example: `My bare metal instance`"
type: str
aliases: ["name"]
extended_metadata:
description:
- Additional metadata key/value pairs that you provide. They serve the same purpose and
functionality as fields in the `metadata` object.
- They are distinguished from `metadata` fields in that these can be nested JSON objects
(whereas `metadata` fields are string/string maps only).
- The combined size of the `metadata` and `extendedMetadata` objects can be a maximum of
32,000 bytes.
type: dict
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
type: dict
ipxe_script:
description:
- This is an advanced option.
- When a bare metal or virtual machine
instance boots, the iPXE firmware that runs on the instance is
configured to run an iPXE script to continue the boot process.
- If you want more control over the boot process, you can provide
your own custom iPXE script that will run when the instance boots;
however, you should be aware that the same iPXE script will run
every time an instance boots; not only after the initial
LaunchInstance call.
- "The default iPXE script connects to the instance's local boot
volume over iSCSI and performs a network boot. If you use a custom iPXE
script and want to network-boot from the instance's local boot volume
over iSCSI the same way as the default iPXE script, you should use the
following iSCSI IP address: 169.254.0.2, and boot volume IQN:
iqn.2015-02.oracle.boot."
- For more information about the Bring Your Own Image feature of
Oracle Cloud Infrastructure, see
L(Bring Your Own Image,https://docs.cloud.oracle.com/iaas/Content/Compute/References/bringyourownimage.htm).
- For more information about iPXE, see http://ipxe.org.
type: str
metadata:
description:
- Custom metadata key/value pairs that you provide, such as the SSH public key
required to connect to the instance.
- "A metadata service runs on every launched instance. The service is an HTTP
endpoint listening on 169.254.169.254. You can use the service to:"
- "* Provide information to L(Cloud-Init,https://cloudinit.readthedocs.org/en/latest/)
to be used for various system initialization tasks."
- "* Get information about the instance, including the custom metadata that you
provide when you launch the instance."
- "**Providing Cloud-Init Metadata**"
- "You can use the following metadata key names to provide information to
Cloud-Init:"
- "**\\"ssh_authorized_keys\\"** - Provide one or more public SSH keys to be
included in the `~/.ssh/authorized_keys` file for the default user on the
instance. Use a newline character to separate multiple keys. The SSH
keys must be in the format necessary for the `authorized_keys` file, as shown
in the example below."
- "**\\"user_data\\"** - Provide your own base64-encoded data to be used by
Cloud-Init to run custom scripts or provide custom Cloud-Init configuration. For
information about how to take advantage of user data, see the
L(Cloud-Init Documentation,http://cloudinit.readthedocs.org/en/latest/topics/format.html)."
- "**Metadata Example**"
- " \\"metadata\\" : {
\\"quake_bot_level\\" : \\"Severe\\",
\\"ssh_authorized_keys\\" : \\"ssh-rsa <your_public_SSH_key>== rsa-key-20160227\\",
\\"user_data\\" : \\"<your_public_SSH_key>==\\"
}
**Getting Metadata on the Instance**"
- "To get information about your instance, connect to the instance using SSH and issue any of the
following GET requests:"
- " curl -H \\"Authorization: Bearer Oracle\\" http://169.254.169.254/opc/v2/instance/
curl -H \\"Authorization: Bearer Oracle\\" http://169.254.169.254/opc/v2/instance/metadata/
curl -H \\"Authorization: Bearer Oracle\\" http://169.254.169.254/opc/v2/instance/metadata/<any-key-name>"
- You'll get back a response that includes all the instance information; only the metadata information; or
the metadata information for the specified key name, respectively.
- The combined size of the `metadata` and `extendedMetadata` objects can be a maximum of 32,000 bytes.
type: dict
shape:
description:
- The shape of an instance. The shape determines the number of CPUs, amount of memory,
and other resources allocated to the instance.
- You can enumerate all available shapes by calling L(ListShapes,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/Shape/ListShapes).
type: str
shape_config:
description:
- ""
type: dict
suboptions:
ocpus:
description:
- The total number of OCPUs available to the instance.
type: float
memory_in_gbs:
description:
- The total amount of memory available to the instance, in gigabytes.
type: float
baseline_ocpu_utilization:
description:
- The baseline OCPU utilization for a subcore burstable VM instance. Leave this attribute blank for a
non-burstable instance, or explicitly specify non-burstable with `BASELINE_1_1`.
- "The following values are supported:
- `BASELINE_1_8` - baseline usage is 1/8 of an OCPU.
- `BASELINE_1_2` - baseline usage is 1/2 of an OCPU.
- `BASELINE_1_1` - baseline usage is an entire OCPU. This represents a non-burstable instance."
type: str
choices:
- "BASELINE_1_8"
- "BASELINE_1_2"
- "BASELINE_1_1"
platform_config:
description:
- ""
type: dict
suboptions:
type:
description:
- The type of platform being configured.
type: str
choices:
- "AMD_MILAN_BM"
- "INTEL_VM"
- "AMD_ROME_BM"
- "INTEL_SKYLAKE_BM"
- "AMD_VM"
required: true
is_secure_boot_enabled:
description:
- Whether Secure Boot is enabled on the instance.
type: bool
is_trusted_platform_module_enabled:
description:
- Whether the Trusted Platform Module (TPM) is enabled on the instance.
type: bool
is_measured_boot_enabled:
description:
- Whether the Measured Boot feature is enabled on the instance.
type: bool
numa_nodes_per_socket:
description:
- The number of NUMA nodes per socket.
- Applicable when type is 'AMD_MILAN_BM'
type: str
choices:
- "NPS0"
- "NPS1"
- "NPS2"
- "NPS4"
source_details:
description:
- ""
type: dict
suboptions:
source_type:
description:
- The source type for the instance.
Use `image` when specifying the image OCID. Use `bootVolume` when specifying
the boot volume OCID.
type: str
choices:
- "image"
- "bootVolume"
required: true
boot_volume_size_in_gbs:
description:
- The size of the boot volume in GBs. The minimum value is 50 GB and the maximum
value is 32,768 GB (32 TB).
- Applicable when source_type is 'image'
type: int
image_id:
description:
- The OCID of the image used to boot the instance.
- Applicable when source_type is 'image'
type: str
boot_volume_id:
description:
- The OCID of the boot volume used to boot the instance.
- Applicable when source_type is 'bootVolume'
type: str
fault_domain:
description:
- A fault domain is a grouping of hardware and infrastructure within an availability domain.
Each availability domain contains three fault domains. Fault domains let you distribute your
instances so that they are not on the same physical hardware within a single availability domain.
A hardware failure or Compute hardware maintenance that affects one fault domain does not affect
instances in other fault domains.
- If you do not specify the fault domain, the system selects one for you.
- To get a list of fault domains, use the
L(ListFaultDomains,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/identity/20160918/FaultDomain/ListFaultDomains) operation in the
Identity and Access Management Service API.
- "Example: `FAULT-DOMAIN-1`"
type: str
dedicated_vm_host_id:
description:
- The OCID of dedicated VM host.
- Dedicated VM hosts can be used when launching individual instances from an instance configuration. They
cannot be used to launch instance pools.
type: str
launch_mode:
description:
- "Specifies the configuration mode for launching virtual machine (VM) instances. The configuration modes are:
* `NATIVE` - VM instances launch with iSCSI boot and VFIO devices. The default value for platform images.
* `EMULATED` - VM instances launch with emulated devices, such as the E1000 network driver and emulated SCSI disk controller.
* `PARAVIRTUALIZED` - VM instances launch with paravirtualized devices using VirtIO drivers.
* `CUSTOM` - VM instances launch with custom configuration settings specified in the `LaunchOptions` parameter."
type: str
choices:
- "NATIVE"
- "EMULATED"
- "PARAVIRTUALIZED"
- "CUSTOM"
launch_options:
description:
- ""
type: dict
suboptions:
boot_volume_type:
description:
- "Emulation type for the boot volume.
* `ISCSI` - ISCSI attached block storage device.
* `SCSI` - Emulated SCSI disk.
* `IDE` - Emulated IDE disk.
* `VFIO` - Direct attached Virtual Function storage. This is the default option for local data
volumes on platform images.
* `PARAVIRTUALIZED` - Paravirtualized disk. This is the default for boot volumes and remote block
storage volumes on platform images."
type: str
choices:
- "ISCSI"
- "SCSI"
- "IDE"
- "VFIO"
- "PARAVIRTUALIZED"
firmware:
description:
- "Firmware used to boot VM. Select the option that matches your operating system.
* `BIOS` - Boot VM using BIOS style firmware. This is compatible with both 32 bit and 64 bit operating
systems that boot using MBR style bootloaders.
* `UEFI_64` - Boot VM using UEFI style firmware compatible with 64 bit operating systems. This is the
default for platform images."
type: str
choices:
- "BIOS"
- "UEFI_64"
network_type:
description:
- "Emulation type for the physical network interface card (NIC).
* `E1000` - Emulated Gigabit ethernet controller. Compatible with Linux e1000 network driver.
* `VFIO` - Direct attached Virtual Function network controller. This is the networking type
when you launch an instance using hardware-assisted (SR-IOV) networking.
* `PARAVIRTUALIZED` - VM instances launch with paravirtualized devices using VirtIO drivers."
type: str
choices:
- "E1000"
- "VFIO"
- "PARAVIRTUALIZED"
remote_data_volume_type:
description:
- "Emulation type for volume.
* `ISCSI` - ISCSI attached block storage device.
* `SCSI` - Emulated SCSI disk.
* `IDE` - Emulated IDE disk.
* `VFIO` - Direct attached Virtual Function storage. This is the default option for local data
volumes on platform images.
* `PARAVIRTUALIZED` - Paravirtualized disk. This is the default for boot volumes and remote block
storage volumes on platform images."
type: str
choices:
- "ISCSI"
- "SCSI"
- "IDE"
- "VFIO"
- "PARAVIRTUALIZED"
is_pv_encryption_in_transit_enabled:
description:
- Deprecated. Instead use `isPvEncryptionInTransitEnabled` in
L(InstanceConfigurationLaunchInstanceDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/datatypes/InstanceConfigurationLaunchInstanceDetails).
type: bool
is_consistent_volume_naming_enabled:
description:
- Whether to enable consistent volume naming feature. Defaults to false.
type: bool
agent_config:
description:
- ""
type: dict
suboptions:
is_monitoring_disabled:
description:
- Whether Oracle Cloud Agent can gather performance metrics and monitor the instance using the
monitoring plugins. Default value is false (monitoring plugins are enabled).
- "These are the monitoring plugins: Compute Instance Monitoring
and Custom Logs Monitoring."
- The monitoring plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- "- If `isMonitoringDisabled` is true, all of the monitoring plugins are disabled, regardless of
the per-plugin configuration.
- If `isMonitoringDisabled` is false, all of the monitoring plugins are enabled. You
can optionally disable individual monitoring plugins by providing a value in the `pluginsConfig`
object."
type: bool
is_management_disabled:
description:
- Whether Oracle Cloud Agent can run all the available management plugins.
Default value is false (management plugins are enabled).
- "These are the management plugins: OS Management Service Agent and Compute Instance
Run Command."
- The management plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- "- If `isManagementDisabled` is true, all of the management plugins are disabled, regardless of
the per-plugin configuration.
- If `isManagementDisabled` is false, all of the management plugins are enabled. You
can optionally disable individual management plugins by providing a value in the `pluginsConfig`
object."
type: bool
are_all_plugins_disabled:
description:
- Whether Oracle Cloud Agent can run all the available plugins.
This includes the management and monitoring plugins.
- To get a list of available plugins, use the
L(ListInstanceagentAvailablePlugins,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/instanceagent/20180530/Plugin/ListInstanceagentAvailablePlugins)
operation in the Oracle Cloud Agent API. For more information about the available plugins, see
L(Managing Plugins with Oracle Cloud Agent,https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/manage-plugins.htm).
type: bool
plugins_config:
description:
- The configuration of plugins associated with this instance.
type: list
elements: dict
suboptions:
name:
description:
- The plugin name. To get a list of available plugins, use the
L(ListInstanceagentAvailablePlugins,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/instanceagent/20180530/Plugin/ListInstanceagentAvailablePlugins)
operation in the Oracle Cloud Agent API. For more information about the available plugins, see
L(Managing Plugins with Oracle Cloud Agent,https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/manage-plugins.htm).
type: str
required: true
desired_state:
description:
- Whether the plugin should be enabled or disabled.
- To enable the monitoring and management plugins, the `isMonitoringDisabled` and
`isManagementDisabled` attributes must also be set to false.
type: str
choices:
- "ENABLED"
- "DISABLED"
required: true
is_pv_encryption_in_transit_enabled:
description:
- Whether to enable in-transit encryption for the data volume's paravirtualized attachment. The default value is false.
type: bool
preferred_maintenance_action:
description:
- "The preferred maintenance action for an instance. The default is LIVE_MIGRATE, if live migration is supported.
* `LIVE_MIGRATE` - Run maintenance using a live migration.
* `REBOOT` - Run maintenance using a reboot."
type: str
choices:
- "LIVE_MIGRATE"
- "REBOOT"
instance_options:
description:
- ""
type: dict
suboptions:
are_legacy_imds_endpoints_disabled:
description:
- Whether to disable the legacy (/v1) instance metadata service endpoints.
Customers who have migrated to /v2 should set this to true for added security.
Default is false.
type: bool
availability_config:
description:
- ""
type: dict
suboptions:
recovery_action:
description:
- "The lifecycle state for an instance when it is recovered after infrastructure maintenance.
* `RESTORE_INSTANCE` - The instance is restored to the lifecycle state it was in before the maintenance event.
If the instance was running, it is automatically rebooted. This is the default action when a value is not set.
* `STOP_INSTANCE` - The instance is recovered in the stopped state."
type: str
choices:
- "RESTORE_INSTANCE"
- "STOP_INSTANCE"
preemptible_instance_config:
description:
- ""
type: dict
suboptions:
preemption_action:
description:
- ""
type: dict
required: true
suboptions:
type:
description:
- The type of action to run when the instance is interrupted for eviction.
type: str
choices:
- "TERMINATE"
required: true
preserve_boot_volume:
description:
- Whether to preserve the boot volume that was used to launch the preemptible instance when the instance is terminated.
Defaults to false if not specified.
type: bool
secondary_vnics:
description:
- ""
- Applicable only for I(action=launch).
type: list
elements: dict
suboptions:
create_vnic_details:
description:
- ""
type: dict
suboptions:
assign_public_ip:
description:
- Whether the VNIC should be assigned a public IP address. See the `assignPublicIp` attribute of
L(CreateVnicDetails,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/CreateVnicDetails/)
for more information.
type: bool
assign_private_dns_record:
description:
- Whether the VNIC should be assigned a private DNS record. See the `assignPrivateDnsRecord` attribute of
L(CreateVnicDetails,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/CreateVnicDetails/)
for more information.
type: bool
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
type: dict
display_name:
description:
- A user-friendly name for the VNIC. Does not have to be unique.
Avoid entering confidential information.
type: str
aliases: ["name"]
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
type: dict
hostname_label:
description:
- The hostname for the VNIC's primary private IP.
See the `hostnameLabel` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
nsg_ids:
description:
- A list of the OCIDs of the network security groups (NSGs) to add the VNIC to. For more
information about NSGs, see
L(NetworkSecurityGroup,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/NetworkSecurityGroup/).
type: list
elements: str
private_ip:
description:
- A private IP address of your choice to assign to the VNIC.
See the `privateIp` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
skip_source_dest_check:
description:
- Whether the source/destination check is disabled on the VNIC.
See the `skipSourceDestCheck` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: bool
subnet_id:
description:
- The OCID of the subnet to create the VNIC in.
See the `subnetId` attribute of L(CreateVnicDetails,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/latest/CreateVnicDetails/) for more information.
type: str
display_name:
description:
- A user-friendly name for the attachment. Does not have to be unique, and it cannot be changed.
type: str
aliases: ["name"]
nic_index:
description:
- Which physical network interface card (NIC) the VNIC will use. Defaults to 0.
Certain bare metal instance shapes have two active physical NICs (0 and 1). If
you add a secondary VNIC to one of these instances, you can specify which NIC
the VNIC will use. For more information, see
L(Virtual Network Interface Cards (VNICs),https://docs.cloud.oracle.com/iaas/Content/Network/Tasks/managingVNICs.htm).
type: int
action:
description:
- The action to perform on the InstanceConfiguration.
type: str
required: true
choices:
- "change_compartment"
- "launch"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action change_compartment on instance_configuration
oci_compute_management_instance_configuration_actions:
compartment_id: "ocid1.compartment.oc1..unique_ID"
instance_configuration_id: "ocid1.instanceconfiguration.oc1..xxxxxxEXAMPLExxxxxx"
action: "change_compartment"
- name: Perform action launch on instance_configuration
oci_compute_management_instance_configuration_actions:
instance_configuration_id: "ocid1.instanceconfiguration.oc1..xxxxxxEXAMPLExxxxxx"
instance_type: compute
action: launch
"""
RETURN = """
instance:
description:
- Details of the InstanceConfiguration resource acted upon by the current operation
returned: on success
type: complex
contains:
availability_domain:
description:
- The availability domain the instance is running in.
- "Example: `Uocm:PHX-AD-1`"
returned: on success
type: str
sample: Uocm:PHX-AD-1
capacity_reservation_id:
description:
- The OCID of the compute capacity reservation this instance is launched under.
When this field contains an empty string or is null, the instance is not currently in a capacity reservation.
For more information, see L(Capacity Reservations,https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/reserve-capacity.htm#default).
returned: on success
type: str
sample: "ocid1.capacityreservation.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The OCID of the compartment that contains the instance.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
dedicated_vm_host_id:
description:
- The OCID of dedicated VM host.
returned: on success
type: str
sample: "ocid1.dedicatedvmhost.oc1..xxxxxxEXAMPLExxxxxx"
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
- "Example: `My bare metal instance`"
returned: on success
type: str
sample: My bare metal instance
extended_metadata:
description:
- Additional metadata key/value pairs that you provide. They serve the same purpose and functionality
as fields in the `metadata` object.
- They are distinguished from `metadata` fields in that these can be nested JSON objects (whereas `metadata`
fields are string/string maps only).
returned: on success
type: dict
sample: {}
fault_domain:
description:
- The name of the fault domain the instance is running in.
- A fault domain is a grouping of hardware and infrastructure within an availability domain.
Each availability domain contains three fault domains. Fault domains let you distribute your
instances so that they are not on the same physical hardware within a single availability domain.
A hardware failure or Compute hardware maintenance that affects one fault domain does not affect
instances in other fault domains.
- If you do not specify the fault domain, the system selects one for you.
- "Example: `FAULT-DOMAIN-1`"
returned: on success
type: str
sample: FAULT-DOMAIN-1
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
id:
description:
- The OCID of the instance.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
image_id:
description:
- Deprecated. Use `sourceDetails` instead.
returned: on success
type: str
sample: "ocid1.image.oc1..xxxxxxEXAMPLExxxxxx"
ipxe_script:
description:
- When a bare metal or virtual machine
instance boots, the iPXE firmware that runs on the instance is
configured to run an iPXE script to continue the boot process.
- If you want more control over the boot process, you can provide
your own custom iPXE script that will run when the instance boots;
however, you should be aware that the same iPXE script will run
every time an instance boots; not only after the initial
LaunchInstance call.
- "The default iPXE script connects to the instance's local boot
volume over iSCSI and performs a network boot. If you use a custom iPXE
script and want to network-boot from the instance's local boot volume
over iSCSI the same way as the default iPXE script, you should use the
following iSCSI IP address: 169.254.0.2, and boot volume IQN:
iqn.2015-02.oracle.boot."
- For more information about the Bring Your Own Image feature of
Oracle Cloud Infrastructure, see
L(Bring Your Own Image,https://docs.cloud.oracle.com/iaas/Content/Compute/References/bringyourownimage.htm).
- For more information about iPXE, see http://ipxe.org.
returned: on success
type: str
sample: ipxe_script_example
launch_mode:
description:
- "Specifies the configuration mode for launching virtual machine (VM) instances. The configuration modes are:
* `NATIVE` - VM instances launch with iSCSI boot and VFIO devices. The default value for platform images.
* `EMULATED` - VM instances launch with emulated devices, such as the E1000 network driver and emulated SCSI disk controller.
* `PARAVIRTUALIZED` - VM instances launch with paravirtualized devices using VirtIO drivers.
* `CUSTOM` - VM instances launch with custom configuration settings specified in the `LaunchOptions` parameter."
returned: on success
type: str
sample: NATIVE
launch_options:
description:
- ""
returned: on success
type: complex
contains:
boot_volume_type:
description:
- "Emulation type for the boot volume.
* `ISCSI` - ISCSI attached block storage device.
* `SCSI` - Emulated SCSI disk.
* `IDE` - Emulated IDE disk.
* `VFIO` - Direct attached Virtual Function storage. This is the default option for local data
volumes on platform images.
* `PARAVIRTUALIZED` - Paravirtualized disk. This is the default for boot volumes and remote block
storage volumes on platform images."
returned: on success
type: str
sample: ISCSI
firmware:
description:
- "Firmware used to boot VM. Select the option that matches your operating system.
* `BIOS` - Boot VM using BIOS style firmware. This is compatible with both 32 bit and 64 bit operating
systems that boot using MBR style bootloaders.
* `UEFI_64` - Boot VM using UEFI style firmware compatible with 64 bit operating systems. This is the
default for platform images."
returned: on success
type: str
sample: BIOS
network_type:
description:
- "Emulation type for the physical network interface card (NIC).
* `E1000` - Emulated Gigabit ethernet controller. Compatible with Linux e1000 network driver.
* `VFIO` - Direct attached Virtual Function network controller. This is the networking type
when you launch an instance using hardware-assisted (SR-IOV) networking.
* `PARAVIRTUALIZED` - VM instances launch with paravirtualized devices using VirtIO drivers."
returned: on success
type: str
sample: E1000
remote_data_volume_type:
description:
- "Emulation type for volume.
* `ISCSI` - ISCSI attached block storage device.
* `SCSI` - Emulated SCSI disk.
* `IDE` - Emulated IDE disk.
* `VFIO` - Direct attached Virtual Function storage. This is the default option for local data
volumes on platform images.
* `PARAVIRTUALIZED` - Paravirtualized disk. This is the default for boot volumes and remote block
storage volumes on platform images."
returned: on success
type: str
sample: ISCSI
is_pv_encryption_in_transit_enabled:
description:
- Deprecated. Instead use `isPvEncryptionInTransitEnabled` in
L(LaunchInstanceDetails,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/datatypes/LaunchInstanceDetails).
returned: on success
type: bool
sample: true
is_consistent_volume_naming_enabled:
description:
- Whether to enable consistent volume naming feature. Defaults to false.
returned: on success
type: bool
sample: true
instance_options:
description:
- ""
returned: on success
type: complex
contains:
are_legacy_imds_endpoints_disabled:
description:
- Whether to disable the legacy (/v1) instance metadata service endpoints.
Customers who have migrated to /v2 should set this to true for added security.
Default is false.
returned: on success
type: bool
sample: true
availability_config:
description:
- ""
returned: on success
type: complex
contains:
is_live_migration_preferred:
description:
- Whether to live migrate supported VM instances to a healthy physical VM host without
disrupting running instances during infrastructure maintenance events. If null, Oracle
chooses the best option for migrating the VM during infrastructure maintenance events.
returned: on success
type: bool
sample: true
recovery_action:
description:
- "The lifecycle state for an instance when it is recovered after infrastructure maintenance.
* `RESTORE_INSTANCE` - The instance is restored to the lifecycle state it was in before the maintenance event.
If the instance was running, it is automatically rebooted. This is the default action when a value is not set.
* `STOP_INSTANCE` - The instance is recovered in the stopped state."
returned: on success
type: str
sample: RESTORE_INSTANCE
preemptible_instance_config:
description:
- ""
returned: on success
type: complex
contains:
preemption_action:
description:
- ""
returned: on success
type: complex
contains:
type:
description:
- The type of action to run when the instance is interrupted for eviction.
returned: on success
type: str
sample: TERMINATE
preserve_boot_volume:
description:
- Whether to preserve the boot volume that was used to launch the preemptible instance when the instance is terminated. Defaults
to false if not specified.
returned: on success
type: bool
sample: true
lifecycle_state:
description:
- The current state of the instance.
returned: on success
type: str
sample: MOVING
metadata:
description:
- Custom metadata that you provide.
returned: on success
type: dict
sample: {}
region:
description:
- The region that contains the availability domain the instance is running in.
- For the us-phoenix-1 and us-ashburn-1 regions, `phx` and `iad` are returned, respectively.
For all other regions, the full region name is returned.
- "Examples: `phx`, `eu-frankfurt-1`"
returned: on success
type: str
sample: region_example
shape:
description:
- The shape of the instance. The shape determines the number of CPUs and the amount of memory
allocated to the instance. You can enumerate all available shapes by calling
L(ListShapes,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/Shape/ListShapes).
returned: on success
type: str
sample: shape_example
shape_config:
description:
- ""
returned: on success
type: complex
contains:
ocpus:
description:
- The total number of OCPUs available to the instance.
returned: on success
type: float
sample: 3.4
memory_in_gbs:
description:
- The total amount of memory available to the instance, in gigabytes.
returned: on success
type: float
sample: 3.4
baseline_ocpu_utilization:
description:
- The baseline OCPU utilization for a subcore burstable VM instance. Leave this attribute blank for a
non-burstable instance, or explicitly specify non-burstable with `BASELINE_1_1`.
- "The following values are supported:
- `BASELINE_1_8` - baseline usage is 1/8 of an OCPU.
- `BASELINE_1_2` - baseline usage is 1/2 of an OCPU.
- `BASELINE_1_1` - baseline usage is the entire OCPU. This represents a non-burstable instance."
returned: on success
type: str
sample: BASELINE_1_8
processor_description:
description:
- A short description of the instance's processor (CPU).
returned: on success
type: str
sample: processor_description_example
networking_bandwidth_in_gbps:
description:
- The networking bandwidth available to the instance, in gigabits per second.
returned: on success
type: float
sample: 3.4
max_vnic_attachments:
description:
- The maximum number of VNIC attachments for the instance.
returned: on success
type: int
sample: 56
gpus:
description:
- The number of GPUs available to the instance.
returned: on success
type: int
sample: 56
gpu_description:
description:
- A short description of the instance's graphics processing unit (GPU).
- If the instance does not have any GPUs, this field is `null`.
returned: on success
type: str
sample: gpu_description_example
local_disks:
description:
- The number of local disks available to the instance.
returned: on success
type: int
sample: 56
local_disks_total_size_in_gbs:
description:
- The aggregate size of all local disks, in gigabytes.
- If the instance does not have any local disks, this field is `null`.
returned: on success
type: float
sample: 3.4
local_disk_description:
description:
- A short description of the local disks available to this instance.
- If the instance does not have any local disks, this field is `null`.
returned: on success
type: str
sample: local_disk_description_example
source_details:
description:
- ""
returned: on success
type: complex
contains:
source_type:
description:
- The source type for the instance.
Use `image` when specifying the image OCID. Use `bootVolume` when specifying
the boot volume OCID.
returned: on success
type: str
sample: bootVolume
boot_volume_id:
description:
- The OCID of the boot volume used to boot the instance.
returned: on success
type: str
sample: "ocid1.bootvolume.oc1..xxxxxxEXAMPLExxxxxx"
boot_volume_size_in_gbs:
description:
- The size of the boot volume in GBs. Minimum value is 50 GB and maximum value is 32,768 GB (32 TB).
returned: on success
type: int
sample: 56
image_id:
description:
- The OCID of the image used to boot the instance.
returned: on success
type: str
sample: "ocid1.image.oc1..xxxxxxEXAMPLExxxxxx"
kms_key_id:
description:
- The OCID of the Key Management key to assign as the master encryption key for the boot volume.
returned: on success
type: str
sample: "ocid1.kmskey.oc1..xxxxxxEXAMPLExxxxxx"
system_tags:
description:
- "System tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {}
time_created:
description:
- The date and time the instance was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2016-08-25T21:10:29.600Z"
agent_config:
description:
- ""
returned: on success
type: complex
contains:
is_monitoring_disabled:
description:
- Whether Oracle Cloud Agent can gather performance metrics and monitor the instance using the
monitoring plugins.
- "These are the monitoring plugins: Compute Instance Monitoring
and Custom Logs Monitoring."
- The monitoring plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- "- If `isMonitoringDisabled` is true, all of the monitoring plugins are disabled, regardless of
the per-plugin configuration.
- If `isMonitoringDisabled` is false, all of the monitoring plugins are enabled. You
can optionally disable individual monitoring plugins by providing a value in the `pluginsConfig`
object."
returned: on success
type: bool
sample: true
is_management_disabled:
description:
- Whether Oracle Cloud Agent can run all the available management plugins.
- "These are the management plugins: OS Management Service Agent and Compute Instance
Run Command."
- The management plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- "- If `isManagementDisabled` is true, all of the management plugins are disabled, regardless of
the per-plugin configuration.
- If `isManagementDisabled` is false, all of the management plugins are enabled. You
can optionally disable individual management plugins by providing a value in the `pluginsConfig`
object."
returned: on success
type: bool
sample: true
are_all_plugins_disabled:
description:
- Whether Oracle Cloud Agent can run all of the available plugins.
This includes the management and monitoring plugins.
- For more information about the available plugins, see
L(Managing Plugins with Oracle Cloud Agent,https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/manage-plugins.htm).
returned: on success
type: bool
sample: true
plugins_config:
description:
- The configuration of plugins associated with this instance.
returned: on success
type: complex
contains:
name:
description:
- The plugin name. To get a list of available plugins, use the
L(ListInstanceagentAvailablePlugins,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/instanceagent/20180530/Plugin/ListInstanceagentAvailablePlugins)
operation in the Oracle Cloud Agent API. For more information about the available plugins, see
L(Managing Plugins with Oracle Cloud Agent,https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/manage-plugins.htm).
returned: on success
type: str
sample: name_example
desired_state:
description:
- Whether the plugin should be enabled or disabled.
- To enable the monitoring and management plugins, the `isMonitoringDisabled` and
`isManagementDisabled` attributes must also be set to false.
returned: on success
type: str
sample: ENABLED
time_maintenance_reboot_due:
description:
- "The date and time the instance is expected to be stopped / started, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
After that time if instance hasn't been rebooted, Oracle will reboot the instance within 24 hours of the due time.
Regardless of how the instance was stopped, the flag will be reset to empty as soon as instance reaches Stopped state.
Example: `2018-05-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2018-05-25T21:10:29.600Z"
platform_config:
description:
- ""
returned: on success
type: complex
contains:
type:
description:
- The type of platform being configured.
returned: on success
type: str
sample: AMD_MILAN_BM
is_secure_boot_enabled:
description:
- Whether Secure Boot is enabled on the instance.
returned: on success
type: bool
sample: true
is_trusted_platform_module_enabled:
description:
- Whether the Trusted Platform Module (TPM) is enabled on the instance.
returned: on success
type: bool
sample: true
is_measured_boot_enabled:
description:
- Whether the Measured Boot feature is enabled on the instance.
returned: on success
type: bool
sample: true
numa_nodes_per_socket:
description:
- The number of NUMA nodes per socket (NPS).
returned: on success
type: str
sample: NPS0
sample: {
"availability_domain": "Uocm:PHX-AD-1",
"capacity_reservation_id": "ocid1.capacityreservation.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"dedicated_vm_host_id": "ocid1.dedicatedvmhost.oc1..xxxxxxEXAMPLExxxxxx",
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"display_name": "My bare metal instance",
"extended_metadata": {},
"fault_domain": "FAULT-DOMAIN-1",
"freeform_tags": {'Department': 'Finance'},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"image_id": "ocid1.image.oc1..xxxxxxEXAMPLExxxxxx",
"ipxe_script": "ipxe_script_example",
"launch_mode": "NATIVE",
"launch_options": {
"boot_volume_type": "ISCSI",
"firmware": "BIOS",
"network_type": "E1000",
"remote_data_volume_type": "ISCSI",
"is_pv_encryption_in_transit_enabled": true,
"is_consistent_volume_naming_enabled": true
},
"instance_options": {
"are_legacy_imds_endpoints_disabled": true
},
"availability_config": {
"is_live_migration_preferred": true,
"recovery_action": "RESTORE_INSTANCE"
},
"preemptible_instance_config": {
"preemption_action": {
"type": "TERMINATE",
"preserve_boot_volume": true
}
},
"lifecycle_state": "MOVING",
"metadata": {},
"region": "region_example",
"shape": "shape_example",
"shape_config": {
"ocpus": 3.4,
"memory_in_gbs": 3.4,
"baseline_ocpu_utilization": "BASELINE_1_8",
"processor_description": "processor_description_example",
"networking_bandwidth_in_gbps": 3.4,
"max_vnic_attachments": 56,
"gpus": 56,
"gpu_description": "gpu_description_example",
"local_disks": 56,
"local_disks_total_size_in_gbs": 3.4,
"local_disk_description": "local_disk_description_example"
},
"source_details": {
"source_type": "bootVolume",
"boot_volume_id": "ocid1.bootvolume.oc1..xxxxxxEXAMPLExxxxxx",
"boot_volume_size_in_gbs": 56,
"image_id": "ocid1.image.oc1..xxxxxxEXAMPLExxxxxx",
"kms_key_id": "ocid1.kmskey.oc1..xxxxxxEXAMPLExxxxxx"
},
"system_tags": {},
"time_created": "2016-08-25T21:10:29.600Z",
"agent_config": {
"is_monitoring_disabled": true,
"is_management_disabled": true,
"are_all_plugins_disabled": true,
"plugins_config": [{
"name": "name_example",
"desired_state": "ENABLED"
}]
},
"time_maintenance_reboot_due": "2018-05-25T21:10:29.600Z",
"platform_config": {
"type": "AMD_MILAN_BM",
"is_secure_boot_enabled": true,
"is_trusted_platform_module_enabled": true,
"is_measured_boot_enabled": true,
"numa_nodes_per_socket": "NPS0"
}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.work_requests import WorkRequestClient
from oci.core import ComputeManagementClient
from oci.core.models import ChangeInstanceConfigurationCompartmentDetails
from oci.core.models import InstanceConfigurationInstanceDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class InstanceConfigurationActionsHelperGen(OCIActionsHelperBase):
def __init__(self, *args, **kwargs):
super(InstanceConfigurationActionsHelperGen, self).__init__(*args, **kwargs)
self.work_request_client = WorkRequestClient(
self.client._config, **self.client._kwargs
)
@staticmethod
def get_module_resource_id_param():
return "instance_configuration_id"
def get_module_resource_id(self):
return self.module.params.get("instance_configuration_id")
def get_get_fn(self):
return self.client.get_instance_configuration
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_instance_configuration,
instance_configuration_id=self.module.params.get(
"instance_configuration_id"
),
)
def get_response_field_name(self, action):
return "instance"
def change_compartment(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ChangeInstanceConfigurationCompartmentDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.change_instance_configuration_compartment,
call_fn_args=(),
call_fn_kwargs=dict(
instance_configuration_id=self.module.params.get(
"instance_configuration_id"
),
change_instance_configuration_compartment_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
def launch(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, InstanceConfigurationInstanceDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.launch_instance_configuration,
call_fn_args=(),
call_fn_kwargs=dict(
instance_configuration_id=self.module.params.get(
"instance_configuration_id"
),
instance_configuration=action_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.work_request_client,
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
InstanceConfigurationActionsHelperCustom = get_custom_class(
"InstanceConfigurationActionsHelperCustom"
)
class ResourceHelper(
InstanceConfigurationActionsHelperCustom, InstanceConfigurationActionsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
instance_configuration_id=dict(aliases=["id"], type="str", required=True),
compartment_id=dict(type="str"),
instance_type=dict(type="str", choices=["compute"]),
block_volumes=dict(
type="list",
elements="dict",
options=dict(
attach_details=dict(
type="dict",
options=dict(
display_name=dict(aliases=["name"], type="str"),
is_read_only=dict(type="bool"),
device=dict(type="str"),
is_shareable=dict(type="bool"),
type=dict(
type="str",
required=True,
choices=["iscsi", "paravirtualized"],
),
use_chap=dict(type="bool"),
is_pv_encryption_in_transit_enabled=dict(type="bool"),
),
),
create_details=dict(
type="dict",
options=dict(
availability_domain=dict(type="str"),
backup_policy_id=dict(type="str"),
compartment_id=dict(type="str"),
defined_tags=dict(type="dict"),
display_name=dict(aliases=["name"], type="str"),
freeform_tags=dict(type="dict"),
kms_key_id=dict(type="str"),
vpus_per_gb=dict(type="int"),
size_in_gbs=dict(type="int"),
source_details=dict(
type="dict",
options=dict(
type=dict(
type="str",
required=True,
choices=["volumeBackup", "volume"],
),
id=dict(type="str"),
),
),
),
),
volume_id=dict(type="str"),
),
),
launch_details=dict(
type="dict",
options=dict(
availability_domain=dict(type="str"),
capacity_reservation_id=dict(type="str"),
compartment_id=dict(type="str"),
create_vnic_details=dict(
type="dict",
options=dict(
assign_public_ip=dict(type="bool"),
assign_private_dns_record=dict(type="bool"),
defined_tags=dict(type="dict"),
display_name=dict(aliases=["name"], type="str"),
freeform_tags=dict(type="dict"),
hostname_label=dict(type="str"),
nsg_ids=dict(type="list", elements="str"),
private_ip=dict(type="str"),
skip_source_dest_check=dict(type="bool"),
subnet_id=dict(type="str"),
),
),
defined_tags=dict(type="dict"),
display_name=dict(aliases=["name"], type="str"),
extended_metadata=dict(type="dict"),
freeform_tags=dict(type="dict"),
ipxe_script=dict(type="str"),
metadata=dict(type="dict"),
shape=dict(type="str"),
shape_config=dict(
type="dict",
options=dict(
ocpus=dict(type="float"),
memory_in_gbs=dict(type="float"),
baseline_ocpu_utilization=dict(
type="str",
choices=[
"BASELINE_1_8",
"BASELINE_1_2",
"BASELINE_1_1",
],
),
),
),
platform_config=dict(
type="dict",
options=dict(
type=dict(
type="str",
required=True,
choices=[
"AMD_MILAN_BM",
"INTEL_VM",
"AMD_ROME_BM",
"INTEL_SKYLAKE_BM",
"AMD_VM",
],
),
is_secure_boot_enabled=dict(type="bool"),
is_trusted_platform_module_enabled=dict(type="bool"),
is_measured_boot_enabled=dict(type="bool"),
numa_nodes_per_socket=dict(
type="str", choices=["NPS0", "NPS1", "NPS2", "NPS4"]
),
),
),
source_details=dict(
type="dict",
options=dict(
source_type=dict(
type="str",
required=True,
choices=["image", "bootVolume"],
),
boot_volume_size_in_gbs=dict(type="int"),
image_id=dict(type="str"),
boot_volume_id=dict(type="str"),
),
),
fault_domain=dict(type="str"),
dedicated_vm_host_id=dict(type="str"),
launch_mode=dict(
type="str",
choices=["NATIVE", "EMULATED", "PARAVIRTUALIZED", "CUSTOM"],
),
launch_options=dict(
type="dict",
options=dict(
boot_volume_type=dict(
type="str",
choices=[
"ISCSI",
"SCSI",
"IDE",
"VFIO",
"PARAVIRTUALIZED",
],
),
firmware=dict(type="str", choices=["BIOS", "UEFI_64"]),
network_type=dict(
type="str", choices=["E1000", "VFIO", "PARAVIRTUALIZED"]
),
remote_data_volume_type=dict(
type="str",
choices=[
"ISCSI",
"SCSI",
"IDE",
"VFIO",
"PARAVIRTUALIZED",
],
),
is_pv_encryption_in_transit_enabled=dict(type="bool"),
is_consistent_volume_naming_enabled=dict(type="bool"),
),
),
agent_config=dict(
type="dict",
options=dict(
is_monitoring_disabled=dict(type="bool"),
is_management_disabled=dict(type="bool"),
are_all_plugins_disabled=dict(type="bool"),
plugins_config=dict(
type="list",
elements="dict",
options=dict(
name=dict(type="str", required=True),
desired_state=dict(
type="str",
required=True,
choices=["ENABLED", "DISABLED"],
),
),
),
),
),
is_pv_encryption_in_transit_enabled=dict(type="bool"),
preferred_maintenance_action=dict(
type="str", choices=["LIVE_MIGRATE", "REBOOT"]
),
instance_options=dict(
type="dict",
options=dict(
are_legacy_imds_endpoints_disabled=dict(type="bool")
),
),
availability_config=dict(
type="dict",
options=dict(
recovery_action=dict(
type="str",
choices=["RESTORE_INSTANCE", "STOP_INSTANCE"],
)
),
),
preemptible_instance_config=dict(
type="dict",
options=dict(
preemption_action=dict(
type="dict",
required=True,
options=dict(
type=dict(
type="str", required=True, choices=["TERMINATE"]
),
preserve_boot_volume=dict(type="bool"),
),
)
),
),
),
),
secondary_vnics=dict(
type="list",
elements="dict",
options=dict(
create_vnic_details=dict(
type="dict",
options=dict(
assign_public_ip=dict(type="bool"),
assign_private_dns_record=dict(type="bool"),
defined_tags=dict(type="dict"),
display_name=dict(aliases=["name"], type="str"),
freeform_tags=dict(type="dict"),
hostname_label=dict(type="str"),
nsg_ids=dict(type="list", elements="str"),
private_ip=dict(type="str"),
skip_source_dest_check=dict(type="bool"),
subnet_id=dict(type="str"),
),
),
display_name=dict(aliases=["name"], type="str"),
nic_index=dict(type="int"),
),
),
action=dict(
type="str", required=True, choices=["change_compartment", "launch"]
),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="instance_configuration",
service_client_class=ComputeManagementClient,
namespace="core",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| true | true |
1c342deba119b31588f50bdd551a7d92b6156a77 | 9,947 | py | Python | huskar_api/api/webhook.py | mowangdk/huskar | 7692fbc5672a5ae6e2a33616c493466a7137f8cd | [
"MIT"
] | 59 | 2019-10-31T10:50:10.000Z | 2021-11-26T04:32:25.000Z | huskar_api/api/webhook.py | mowangdk/huskar | 7692fbc5672a5ae6e2a33616c493466a7137f8cd | [
"MIT"
] | 5 | 2019-10-31T10:37:30.000Z | 2020-03-02T06:45:46.000Z | huskar_api/api/webhook.py | mowangdk/huskar | 7692fbc5672a5ae6e2a33616c493466a7137f8cd | [
"MIT"
] | 9 | 2019-10-31T10:35:00.000Z | 2019-12-01T14:13:58.000Z | from __future__ import absolute_import
import logging
import itertools
from operator import attrgetter
from flask import request, abort, g
from flask.views import MethodView
from huskar_api.models.webhook import Webhook
from huskar_api.models.audit import action_types
from huskar_api.models.auth import Application, Authority
from huskar_api.service.admin.application_auth import (
check_application_auth, check_application)
from .utils import login_required, api_response, minimal_mode_incompatible
from .schema import webhook_schema, validate_fields
logger = logging.getLogger(__name__)
class WebhookView(MethodView):
@login_required
@minimal_mode_incompatible
def get(self):
"""List all webhooks registered in Huskar.
The response looks like::
{
"status": "SUCCESS",
"message": "",
"data": {
"webhook_list": [
{
"webhook_id": 1,
"webhook_url": "http://www.example.com",
"webhook_type": 0
}
]
}
}
:status 200: The request is successful.
:status 404: The application not found.
"""
webhooks = Webhook.get_all()
webhook_list = [{
'webhook_id': webhook.id,
'webhook_url': webhook.url,
'webhook_type': webhook.hook_type
} for webhook in webhooks]
return api_response(data={'webhook_list': webhook_list})
@login_required
@minimal_mode_incompatible
def post(self):
"""Create a new webhook.
The request accepting a JSON body, the schema likes::
{
"webhook_url": "http://www.example.com",
"event_list": [
"CREATE_CONFIG_CLUSTER",
"DELETE_CONFIG_CLUSTER"
]
}
The content of ``event_list`` should be a list of action that
already defined in Huskar.
The ``application_name`` is only required when the ``webhook_type``
is 0, it means the webhook want to subscribe some events of specified
application. If the ``webhook_type`` value specified with 1,
a universal webhook will be registered which will receive all
the events of Huskar site, so the ``event_list`` will be ignored
because that is unnecessary.
:param webhook_type: default 0, set ``site`` level with 1.
:param application_name: The name of application, optional.
:form webhook_url: the webhook url.
:form event_list: event list want subscribed
:status 404: The application not found.
:status 200: successful request.
"""
webhook_type = request.args.get('webhook_type', default=0, type=int)
self._check_authority(webhook_type)
data = request.get_json() or {}
validate_fields(webhook_schema, data, partial=False)
if webhook_type == Webhook.TYPE_UNIVERSAL:
webhook = Webhook.create(data['webhook_url'], webhook_type)
return api_response()
application_name = request.args['application_name']
application = Application.get_by_name(application_name)
webhook = Webhook.create(data['webhook_url'], webhook_type)
for action_name in data.get('event_list', []):
action_type = getattr(action_types, action_name)
webhook.subscribe(application.id, action_type)
return api_response()
def _check_authority(self, webhook_type):
if webhook_type == Webhook.TYPE_UNIVERSAL:
g.auth.require_admin('only admin can add universal webhook')
else:
application_name = request.args['application_name']
check_application_auth(application_name, Authority.WRITE)
class WebhookInstanceView(MethodView):
@login_required
@minimal_mode_incompatible
def get(self, webhook_id):
"""Get the webhook subscriptions list of specified application and
The ``read`` authority is required.
The response looks like::
{
"status": "SUCCESS",
"message": "",
"data": {
"webhook_id": 1,
"webhook_url": "http://www.example.com",
"webhook_type": 0,
"event_list": [
"CREATE_CONFIG_CLUSTER",
"DELETE_CONFIG_CLUSTER",
...
]
}
}
The content of ``event_list`` is a list of action that
already defined in Huskar.
:param application_name: The name of application.
:status 200: The request is successful.
:status 404: The application not found.
"""
webhook = self._get_webhook_or_404(webhook_id)
if not webhook.is_normal:
return api_response(data={
'webhook_id': webhook.id,
'webhook_url': webhook.url,
'webhook_type': webhook.hook_type,
'event_list': []
})
application_name = request.args['application_name']
check_application_auth(application_name, Authority.READ)
application = Application.get_by_name(application_name)
subscriptions = webhook.get_multi_subscriptions(application.id)
data = {
'webhook_id': webhook.id,
'webhook_url': webhook.url,
'webhook_type': webhook.hook_type,
'event_list': [action_types[x.action_type] for x in subscriptions]
}
return api_response(data=data)
@login_required
@minimal_mode_incompatible
def put(self, webhook_id):
"""To update subscription settings of an application
Request body schema same to ``POST`` method.
:param application_name: The name of application.
:param webhook_id: The id of webhook.
:param webhoo_type: default 0, set universal with 1
:status 200: thr request is successful.
:status 404: The application or webhoook not found.
"""
webhook = self._get_webhook_or_404(webhook_id)
self._check_authority(webhook)
data = request.get_json() or {}
validate_fields(webhook_schema, data, partial=False)
if not webhook.is_normal:
webhook.update_url(data['webhook_url'])
return api_response()
application_name = request.args['application_name']
application = Application.get_by_name(application_name)
webhook.batch_unsubscribe(application.id)
webhook.update_url(data['webhook_url'])
for action_name in data.get('event_list', []):
action_type = getattr(action_types, action_name)
webhook.subscribe(application.id, action_type)
return api_response()
@login_required
@minimal_mode_incompatible
def delete(self, webhook_id):
"""Unsubscribe all subscriptions of the webhook with
specified ``webhook_id``, and delete the webhook.
The ``application_name`` is required when the webhook subscribe
application level events.
:param application_name: The name of application, optional.
:param webhook_id: The id of webhook.
:status 200: thr request is successful
:status 404: The application or webhook not found.
"""
webhook = self._get_webhook_or_404(webhook_id)
self._check_authority(webhook.hook_type)
if webhook.is_normal:
webhook = self._get_webhook_or_404(webhook_id)
webhook.batch_unsubscribe()
webhook.delete()
return api_response()
def _get_webhook_or_404(self, webhook_id):
instance = Webhook.get(webhook_id)
if not instance:
abort(404, 'Webhook not registered.')
return instance
def _check_authority(self, webhook_type):
# TODO: fix that duplicated code
if webhook_type == Webhook.TYPE_UNIVERSAL:
g.auth.require_admin('only admin can update universal webhook')
else:
application_name = request.args['application_name']
check_application_auth(application_name, Authority.WRITE)
class ApplicationWebhookView(MethodView):
@login_required
@minimal_mode_incompatible
def get(self, application_name):
"""List the subscriptions of an application specified with
the ``application_name``.
The response looks like::
{
"status": "SUCCESS",
"message": "",
"data": {
"webhook_list": [
{
"webhook_id": 1,
"webhook_url": "http://www.example.com",
"webhook_type": 0,
"event_list": [
"CREATE_CONFIG_CLUSTER",
"DELETE_CONFIG_CLUSTER",
...
]
},
...
]
}
}
:param application_name: The name of application.
:status 200: The request is successful.
"""
application = check_application(application_name)
subscriptions = Webhook.search_subscriptions(
application_id=application.id)
groups = itertools.groupby(subscriptions, key=attrgetter('webhook_id'))
webhook_list = []
for webhook_id, group in groups:
webhook = Webhook.get(webhook_id)
webhook_list.append({
'webhook_id': webhook.id,
'webhook_url': webhook.url,
'webhook_type': webhook.hook_type,
'event_list': [action_types[x.action_type] for x in group]
})
return api_response(data={'webhook_list': webhook_list})
| 35.909747 | 79 | 0.602493 | from __future__ import absolute_import
import logging
import itertools
from operator import attrgetter
from flask import request, abort, g
from flask.views import MethodView
from huskar_api.models.webhook import Webhook
from huskar_api.models.audit import action_types
from huskar_api.models.auth import Application, Authority
from huskar_api.service.admin.application_auth import (
check_application_auth, check_application)
from .utils import login_required, api_response, minimal_mode_incompatible
from .schema import webhook_schema, validate_fields
logger = logging.getLogger(__name__)
class WebhookView(MethodView):
@login_required
@minimal_mode_incompatible
def get(self):
webhooks = Webhook.get_all()
webhook_list = [{
'webhook_id': webhook.id,
'webhook_url': webhook.url,
'webhook_type': webhook.hook_type
} for webhook in webhooks]
return api_response(data={'webhook_list': webhook_list})
@login_required
@minimal_mode_incompatible
def post(self):
webhook_type = request.args.get('webhook_type', default=0, type=int)
self._check_authority(webhook_type)
data = request.get_json() or {}
validate_fields(webhook_schema, data, partial=False)
if webhook_type == Webhook.TYPE_UNIVERSAL:
webhook = Webhook.create(data['webhook_url'], webhook_type)
return api_response()
application_name = request.args['application_name']
application = Application.get_by_name(application_name)
webhook = Webhook.create(data['webhook_url'], webhook_type)
for action_name in data.get('event_list', []):
action_type = getattr(action_types, action_name)
webhook.subscribe(application.id, action_type)
return api_response()
def _check_authority(self, webhook_type):
if webhook_type == Webhook.TYPE_UNIVERSAL:
g.auth.require_admin('only admin can add universal webhook')
else:
application_name = request.args['application_name']
check_application_auth(application_name, Authority.WRITE)
class WebhookInstanceView(MethodView):
@login_required
@minimal_mode_incompatible
def get(self, webhook_id):
webhook = self._get_webhook_or_404(webhook_id)
if not webhook.is_normal:
return api_response(data={
'webhook_id': webhook.id,
'webhook_url': webhook.url,
'webhook_type': webhook.hook_type,
'event_list': []
})
application_name = request.args['application_name']
check_application_auth(application_name, Authority.READ)
application = Application.get_by_name(application_name)
subscriptions = webhook.get_multi_subscriptions(application.id)
data = {
'webhook_id': webhook.id,
'webhook_url': webhook.url,
'webhook_type': webhook.hook_type,
'event_list': [action_types[x.action_type] for x in subscriptions]
}
return api_response(data=data)
@login_required
@minimal_mode_incompatible
def put(self, webhook_id):
webhook = self._get_webhook_or_404(webhook_id)
self._check_authority(webhook)
data = request.get_json() or {}
validate_fields(webhook_schema, data, partial=False)
if not webhook.is_normal:
webhook.update_url(data['webhook_url'])
return api_response()
application_name = request.args['application_name']
application = Application.get_by_name(application_name)
webhook.batch_unsubscribe(application.id)
webhook.update_url(data['webhook_url'])
for action_name in data.get('event_list', []):
action_type = getattr(action_types, action_name)
webhook.subscribe(application.id, action_type)
return api_response()
@login_required
@minimal_mode_incompatible
def delete(self, webhook_id):
webhook = self._get_webhook_or_404(webhook_id)
self._check_authority(webhook.hook_type)
if webhook.is_normal:
webhook = self._get_webhook_or_404(webhook_id)
webhook.batch_unsubscribe()
webhook.delete()
return api_response()
def _get_webhook_or_404(self, webhook_id):
instance = Webhook.get(webhook_id)
if not instance:
abort(404, 'Webhook not registered.')
return instance
def _check_authority(self, webhook_type):
if webhook_type == Webhook.TYPE_UNIVERSAL:
g.auth.require_admin('only admin can update universal webhook')
else:
application_name = request.args['application_name']
check_application_auth(application_name, Authority.WRITE)
class ApplicationWebhookView(MethodView):
@login_required
@minimal_mode_incompatible
def get(self, application_name):
application = check_application(application_name)
subscriptions = Webhook.search_subscriptions(
application_id=application.id)
groups = itertools.groupby(subscriptions, key=attrgetter('webhook_id'))
webhook_list = []
for webhook_id, group in groups:
webhook = Webhook.get(webhook_id)
webhook_list.append({
'webhook_id': webhook.id,
'webhook_url': webhook.url,
'webhook_type': webhook.hook_type,
'event_list': [action_types[x.action_type] for x in group]
})
return api_response(data={'webhook_list': webhook_list})
| true | true |
1c342e2d9df2872b510289962bb26ccf755e777c | 36,357 | py | Python | ios/dateparser/lib/python2.7/site-packages/jdatetime/__init__.py | mpercich/Calendarize | d658962d5d205f878afd9876cb64b5381964f112 | [
"MIT"
] | null | null | null | ios/dateparser/lib/python2.7/site-packages/jdatetime/__init__.py | mpercich/Calendarize | d658962d5d205f878afd9876cb64b5381964f112 | [
"MIT"
] | null | null | null | ios/dateparser/lib/python2.7/site-packages/jdatetime/__init__.py | mpercich/Calendarize | d658962d5d205f878afd9876cb64b5381964f112 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# jdatetime is (c) 2010-2011 Milad Rastian <eslashmili at gmail.com>.
# The jdatetime module was contributed to Python as of Python 2.7 and thus
# was licensed under the Python license. Same license applies to all files in
# the jdatetime package project.
from __future__ import unicode_literals
import datetime as py_datetime
import sys
from jdatetime.jalali import \
GregorianToJalali, JalaliToGregorian, j_days_in_month
import re as _re
import locale as _locale
__VERSION__ = "1.8.1"
MINYEAR = 1
MAXYEAR = 9377
timedelta = py_datetime.timedelta
tzinfo = py_datetime.tzinfo
if sys.version_info[0] >= 3: # py3
_int_types = (int,)
else:
_int_types = (int, long)
FA_LOCALE = 'fa_IR'
class time(py_datetime.time):
def __repr__(self):
return "jdatetime.time(%s, %s, %s)" % (self.hour,
self.minute,
self.second)
class date(object):
"""date(year, month, day) --> date object"""
j_months_en = ['Farvardin',
'Ordibehesht',
'Khordad',
'Tir',
'Mordad',
'Shahrivar',
'Mehr',
'Aban',
'Azar',
'Dey',
'Bahman',
'Esfand']
j_months_short_en = ['Far',
'Ord',
'Kho',
'Tir',
'Mor',
'Sha',
'Meh',
'Aba',
'Aza',
'Dey',
'Bah',
'Esf']
j_weekdays_en = ['Saturday',
'Sunday',
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday']
j_weekdays_short_en = ['Sat',
'Sun',
'Mon',
'Tue',
'Wed',
'Thu',
'Fri']
j_ampm_en = {'PM': 'PM', 'AM': 'AM'}
j_months_fa = [u'فروردین',
u'اردیبهشت',
u'خرداد',
u'تیر',
u'مرداد',
u'شهریور',
u'مهر',
u'آبان',
u'آذر',
u'دی',
u'بهمن',
u'اسفند']
j_weekdays_fa = [u'شنبه',
u'یکشنبه',
u'دوشنبه',
u'سه شنبه',
u'چهارشنبه',
u'پنجشنبه',
u'جمعه']
j_ampm_fa = {'PM': u'بعد از ظهر', 'AM': u'قبل از ظهر'}
@property
def year(self):
return self.__year
@property
def month(self):
return self.__month
@property
def day(self):
return self.__day
__year = 0
__month = 0
__day = 0
def _check_arg(self, value):
if isinstance(value, _int_types):
return True
return False
def __init__(self, year, month, day):
"""date(year, month, day) --> date object"""
if not (self._check_arg(year) and
self._check_arg(month) and
self._check_arg(day)):
raise TypeError("an integer is required" + repr(type(year)))
if year < MINYEAR or year > MAXYEAR:
raise ValueError("year is out of range")
self.__year = year
if month < 1 or month > 12:
raise ValueError("month must be in 1..12")
self.__month = month
if day < 1:
raise ValueError("day is out of range for month")
if self.__month == 12 and day == 30 and self.isleap():
# for leap years it's ok to have 30 days in Esfand
pass
elif self.__month == 12 and day == 30 and not self.isleap():
raise ValueError("day is out of range for month")
elif day > j_days_in_month[self.__month - 1]:
raise ValueError("day is out of range for month")
self.__day = day
if self._is_fa_locale():
self.j_months = self.j_months_fa
self.j_months_short = self.j_months_fa
self.j_weekdays = self.j_weekdays_fa
self.j_weekdays_short = self.j_weekdays_fa
self.j_ampm = self.j_ampm_fa
else:
self.j_months = self.j_months_en
self.j_months_short = self.j_months_short_en
self.j_weekdays = self.j_weekdays_en
self.j_weekdays_short = self.j_weekdays_short_en
self.j_ampm = self.j_ampm_en
def _is_fa_locale(self):
if FA_LOCALE in _locale.getlocale():
return True
if None not in _locale.getlocale():
return False
if FA_LOCALE in _locale.getdefaultlocale():
return True
return False
"""The smallest possible difference between
non-equal date objects, timedelta(days=1)."""
resolution = timedelta(1)
"""The earliest representable date, date(MINYEAR, 1, 1)"""
# min = date(MINYEAR, 1, 1)
# TODO fixed errror: name 'date' is not defined
"""The latest representable date, date(MAXYEAR, 12, 31)."""
# max = date(MAXYEAR, 12,29)
def isleap(self):
"""check if year is leap year
algortim is based on http://en.wikipedia.org/wiki/Leap_year"""
return self.year % 33 in (1, 5, 9, 13, 17, 22, 26, 30)
def togregorian(self):
"""Convert current jalali date to gregorian and return datetime.date"""
(y, m, d) = JalaliToGregorian(self.year,
self.month,
self.day).getGregorianList()
return py_datetime.date(y, m, d)
@staticmethod
def fromgregorian(**kw):
"""Convert gregorian to jalali and return jdatetime.date
jdatetime.date.fromgregorian(day=X,month=X,year=X)
jdatetime.date.fromgregorian(date=datetime.date)
"""
if 'date' in kw and isinstance(kw['date'], py_datetime.date):
d = kw['date']
(y, m, d) = GregorianToJalali(d.year,
d.month,
d.day).getJalaliList()
return date(y, m, d)
if 'day' in kw and 'month' in kw and 'year' in kw:
(year, month, day) = (kw['year'], kw['month'], kw['day'])
(y, m, d) = GregorianToJalali(year, month, day).getJalaliList()
return date(y, m, d)
error_msg = ["fromgregorian have to be be called"]
error_msg += ["fromgregorian(day=X,month=X,year=X)"]
error_msg += ["or"]
error_msg += ["fromgregorian(date=datetime.date)"]
raise ValueError(" ".join(error_msg))
@staticmethod
def today():
"""Current date or datetime: same as self.__class__.fromtimestamp(time.time())."""
to = py_datetime.date.today()
(y, m, d) = GregorianToJalali(to.year,
to.month,
to.day).getJalaliList()
return date(y, m, d)
@staticmethod
def fromtimestamp(timestamp):
d = py_datetime.date.fromtimestamp(timestamp)
(y, m, d) = GregorianToJalali(d.year, d.month, d.day).getJalaliList()
return date(y, m, d)
def toordinal(self):
"""Return proleptic jalali ordinal. Farvardin 1 of year 1 which is equal to 622-3-21 of Gregorian."""
d = self.togregorian()
return d.toordinal() - 226894
@staticmethod
def fromordinal(ordinal):
"""int -> date corresponding to a proleptic Jalali ordinal. it starts from Farvardin 1 of year 1, which is equal to 622-3-21 of Gregorian"""
if ordinal < 1:
raise ValueError("ordinal must be >= 1")
d = py_datetime.date.fromordinal(226894 + ordinal)
(y, m, d) = GregorianToJalali(d.year, d.month, d.day).getJalaliList()
return date(y, m, d)
def __repr__(self):
return "jdatetime.date(%s, %s, %s)" % (self.year,
self.month,
self.day)
def __str__(self):
return self.strftime("%Y-%m-%d")
def __add__(self, timedelta):
"""x.__add__(y) <==> x+y"""
if not isinstance(timedelta, py_datetime.timedelta):
raise TypeError(
"unsupported operand type(s) for +: '%s' and '%s'" %
(type(self), type(timedelta)))
gd = self.togregorian() + timedelta
return date.fromgregorian(date=gd)
def __sub__(self, other):
"""x.__sub__(y) <==> x-y"""
if isinstance(other, py_datetime.timedelta):
gd = self.togregorian() - other
return date.fromgregorian(date=gd)
if isinstance(other, date):
return self.togregorian() - other.togregorian()
raise TypeError(
"unsupported operand type(s) for -: '%s' and '%s'" %
(type(self), type(timedelta)))
def __radd__(self, timedelta):
"""x.__radd__(y) <==> y+x"""
if not isinstance(timedelta, py_datetime.timedelta):
raise TypeError(
"unsupported operand type for +: '%s'" %
(type(timedelta)))
return self.__add__(timedelta)
def __rsub__(self, other):
"""x.__rsub__(y) <==> y-x"""
if isinstance(other, date):
return self.__sub__(other)
raise TypeError(
"unsupported operand type for -: '%s'" %
(type(other)))
def __eq__(self, other_date):
"""x.__eq__(y) <==> x==y"""
if other_date is None:
return False
if not isinstance(other_date, date):
return False
if self.year == other_date.year and \
self.month == other_date.month and \
self.day == other_date.day:
return True
return False
def __ge__(self, other_date):
"""x.__ge__(y) <==> x>=y"""
if not isinstance(other_date, date):
raise TypeError(
"unsupported operand type for >=: '%s'" %
(type(other_date)))
if self.year > other_date.year:
return True
elif self.year == other_date.year:
if self.month > other_date.month:
return True
elif self.month == other_date.month and self.day >= other_date.day:
return True
return False
def __gt__(self, other_date):
"""x.__gt__(y) <==> x>y"""
if not isinstance(other_date, date):
raise TypeError(
"unsupported operand type for >: '%s'" %
(type(other_date)))
if self.year > other_date.year:
return True
elif self.year == other_date.year:
if self.month > other_date.month:
return True
elif self.month >= other_date.month and self.day > other_date.day:
return True
return False
def __le__(self, other_date):
"""x.__le__(y) <==> x<=y"""
if not isinstance(other_date, date):
raise TypeError(
"unsupported operand type for <=: '%s'" %
(type(other_date)))
return not self.__gt__(other_date)
def __lt__(self, other_date):
"""x.__lt__(y) <==> x<y"""
if not isinstance(other_date, date):
raise TypeError(
"unsupported operand type for <: '%s'" %
(type(other_date)))
return not self.__ge__(other_date)
def __ne__(self, other_date):
"""x.__ne__(y) <==> x!=y"""
if other_date is None:
return True
if not isinstance(other_date, date):
return True
return not self.__eq__(other_date)
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
gd = self.togregorian()
return gd.__hash__()
def ctime(self):
"""Return ctime() style string."""
return self.strftime("%c")
def replace(self, year=0, month=0, day=0):
"""Return date with new specified fields."""
new_year = self.year
new_month = self.month
new_day = self.day
if year != 0:
new_year = year
if month != 0:
new_month = month
if day != 0:
new_day = day
return date(new_year, new_month, new_day)
def yday(self):
"""return day of year"""
day = 0
for i in range(0, self.month - 1):
day = day + j_days_in_month[i]
day = day + self.day
return day
def weekday(self):
"""Return the day of the week represented by the date.
Shanbeh == 0 ... Jomeh == 6"""
gd = self.togregorian()
if gd.weekday() == 5:
return 0
if gd.weekday() == 6:
return 1
if gd.weekday() == 0:
return 2
if gd.weekday() == 1:
return 3
if gd.weekday() == 2:
return 4
if gd.weekday() == 3:
return 5
if gd.weekday() == 4:
return 6
def isoweekday(self):
"""Return the day of the week as an integer, where Shanbeh is 1 and Jomeh is 7"""
return self.weekday() + 1
def weeknumber(self):
"""Return week number """
return self.yday() // 7
def isocalendar(self):
"""Return a 3-tuple, (ISO year, ISO week number, ISO weekday)."""
return (self.year, self.weeknumber(), self.isoweekday())
def isoformat(self):
"""Return a string representing the date in ISO 8601 format, 'YYYY-MM-DD'"""
return self.strftime("%Y-%m-%d")
def __format__(self, format):
"""
PEP-3101
Make string formating work!
"""
return self.strftime(format)
# TODO: create jtime !
# def timetuple(self):
# pass
def strftime(self, format):
"""format -> strftime() style string."""
# TODO: change stupid str.replace
# formats = {
# '%a': lambda: self.j_weekdays_short[self.weekday()]
# }
# find all %[a-zA-Z] and call method if it in formats
format = format.replace("%a", self.j_weekdays_short[self.weekday()])
format = format.replace("%A", self.j_weekdays[self.weekday()])
format = format.replace("%b", self.j_months_short[self.month - 1])
format = format.replace("%B", self.j_months[self.month - 1])
if '%c' in format:
format = format.replace(
"%c", self.strftime("%a %b %d %H:%M:%S %Y"))
format = format.replace("%d", '%02.d' % (self.day))
try:
format = format.replace("%f", '%06.d' % (self.microsecond))
except:
format = format.replace("%f", "000000")
try:
format = format.replace("%H", '%02.d' % (self.hour))
except:
format = format.replace("%H", '00')
try:
if self.hour > 12:
format = format.replace("%I", '%02.d' % (self.hour - 12))
else:
format = format.replace("%I", '%02.d' % (self.hour))
except:
format = format.replace("%I", '00')
format = format.replace("%j", '%03.d' % (self.yday()))
format = format.replace("%m", '%02.d' % (self.month))
try:
format = format.replace("%M", '%02.d' % (self.minute))
except:
format = format.replace("%M", '00')
try:
if self.hour > 12:
format = format.replace("%p", self.j_ampm['PM'])
else:
format = format.replace("%p", self.j_ampm['AM'])
except:
format = format.replace("%p", self.j_ampm['AM'])
try:
format = format.replace("%S", '%02.d' % (self.second))
except:
format = format.replace("%S", '00')
format = format.replace("%w", str(self.weekday()))
format = format.replace("%W", str(self.weeknumber()))
if '%x' in format:
format = format.replace("%x", self.strftime("%m/%d/%y"))
if '%X' in format:
format = format.replace("%X", self.strftime('%H:%I:%S'))
format = format.replace("%Y", str(self.year))
format = format.replace("%y", str(self.year)[2:])
format = format.replace("%Y", str(self.year))
try:
sign = "+"
diff = self.tzinfo.utcoffset(self.tzinfo)
diff_sec = diff.seconds
if diff.days > 0 or diff.days < -1:
raise ValueError(
"tzinfo.utcoffset() returned big time delta! ; must be in -1439 .. 1439")
if diff.days != 0:
sign = "-"
diff_sec = (1 * 24 * 60 * 60) - diff_sec
tmp_min = diff_sec / 60
diff_hour = tmp_min / 60
diff_min = tmp_min % 60
format = format.replace(
"%z", '%s%02.d%02.d' %
(sign, diff_hour, diff_min))
except AttributeError:
format = format.replace("%z", '')
try:
format = format.replace("%Z", self.tzinfo.tzname(self.tzinfo))
except AttributeError:
format = format.replace("%Z", '')
return format
class datetime(date):
"""datetime(year, month, day, [hour, [minute, [seconds, [microsecond, [tzinfo]]]]]) --> datetime objects"""
__time = None
def time(self):
"""Return time object with same time but with tzinfo=None."""
return time(self.hour, self.minute, self.second, self.microsecond)
def date(self):
"""Return date object with same year, month and day."""
return date(self.year, self.month, self.day)
def __init__(
self,
year,
month,
day,
hour=None,
minute=None,
second=None,
microsecond=None,
tzinfo=None):
date.__init__(self, year, month, day)
tmp_hour = 0
tmp_min = 0
tmp_sec = 0
tmp_micr = 0
if hour is not None:
tmp_hour = hour
if minute is not None:
tmp_min = minute
if second is not None:
tmp_sec = second
if microsecond is not None:
tmp_micr = microsecond
if not (self._check_arg(tmp_hour) and self._check_arg(tmp_min) and
self._check_arg(tmp_sec) and self._check_arg(tmp_micr)):
raise TypeError("an integer is required")
self.__time = time(tmp_hour, tmp_min, tmp_sec, tmp_micr, tzinfo)
def __repr__(self):
if self.__time.tzinfo is not None:
return "jdatetime.datetime(%s, %s, %s, %s, %s, %s, %s, tzinfo=%s)" % (
self.year,
self.month,
self.day, self.hour,
self.minute,
self.second,
self.microsecond,
self.tzinfo)
if self.__time.microsecond != 0:
return "jdatetime.datetime(%s, %s, %s, %s, %s, %s, %s)" % (
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond)
if self.__time.second != 0:
return "jdatetime.datetime(%s, %s, %s, %s, %s, %s)" % (
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second)
return "jdatetime.datetime(%s, %s, %s, %s, %s)" % (
self.year, self.month, self.day, self.hour, self.minute)
@staticmethod
def today():
"""Current date or datetime"""
return datetime.now()
@staticmethod
def now(tz=None):
"""[tz] -> new datetime with tz's local day and time."""
now_datetime = py_datetime.datetime.now(tz)
now = date.fromgregorian(date=now_datetime.date())
return datetime(
now.year,
now.month,
now.day,
now_datetime.hour,
now_datetime.minute,
now_datetime.second,
now_datetime.microsecond,
tz)
@staticmethod
def utcnow():
"""Return a new datetime representing UTC day and time."""
now_datetime = py_datetime.datetime.utcnow()
now = date.fromgregorian(date=now_datetime.date())
return datetime(
now.year,
now.month,
now.day,
now_datetime.hour,
now_datetime.minute,
now_datetime.second,
now_datetime.microsecond)
@staticmethod
def fromtimestamp(timestamp, tz=None):
"""timestamp[, tz] -> tz's local time from POSIX timestamp."""
now_datetime = py_datetime.datetime.fromtimestamp(timestamp, tz)
now = date.fromgregorian(date=now_datetime.date())
return datetime(
now.year,
now.month,
now.day,
now_datetime.hour,
now_datetime.minute,
now_datetime.second,
now_datetime.microsecond,
tz)
@staticmethod
def utcfromtimestamp(timestamp):
"""timestamp -> UTC datetime from a POSIX timestamp (like time.time())."""
now_datetime = py_datetime.datetime.fromtimestamp(timestamp)
now = date.fromgregorian(date=now_datetime.date())
return datetime(
now.year,
now.month,
now.day,
now_datetime.hour,
now_datetime.minute,
now_datetime.second,
now_datetime.microsecond)
@staticmethod
def combine(d=None, t=None, **kw):
"""date, time -> datetime with same date and time fields"""
c_date = None
if d is not None:
c_date = d
elif 'date' in kw:
c_date = kw['date']
c_time = None
if t is not None:
c_time = t
elif 'time' in kw:
c_time = kw['time']
if c_date is None:
raise TypeError("Required argument 'date' (pos 1) not found")
if c_time is None:
raise TypeError("Required argument 'date' (pos 2) not found")
if not isinstance(c_date, date):
raise TypeError(
"combine() argument 1 must be jdatetime.date, not %s" %
(type(c_date)))
if not isinstance(c_time, time):
raise TypeError(
"combine() argument 2 must be jdatetime.time, not %s" %
(type(c_time)))
return datetime(
c_date.year,
c_date.month,
c_date.day,
c_time.hour,
c_time.minute,
c_time.second,
c_time.microsecond,
c_time.tzinfo)
@staticmethod
def fromordinal(ordinal):
"""int -> date corresponding to a proleptic Jalali ordinal. it starts from Farvardin 1 of year 1, which is equal to 622-3-21 of Gregorian"""
if ordinal < 1:
raise ValueError("ordinal must be >= 1")
d = py_datetime.date.fromordinal(226894 + ordinal)
j_date = date.fromgregorian(date=d)
return datetime(j_date.year, j_date.month, j_date.day, 0, 0)
@property
def hour(self):
return self.__time.hour
@property
def minute(self):
return self.__time.minute
@property
def second(self):
return self.__time.second
@property
def microsecond(self):
return self.__time.microsecond
@property
def tzinfo(self):
return self.__time.tzinfo
@staticmethod
def strptime(date_string, format):
"""string, format -> new datetime parsed from a string (like time.strptime())"""
if '*' in format:
format = format.replace("*", "\*")
if '+' in format:
format = format.replace("+", "\+")
if '(' in format or ')' in format:
format = format.replace("(", "\(")
format = format.replace(")", "\)")
if '[' in format or ']' in format:
format = format.replace("[", "\[")
format = format.replace("]", "\]")
result_date = {
'day': 1,
'month': 1,
'year': 1279,
'microsecond': 0,
'second': 0,
'minute': 0,
'hour': 0}
apply_order = []
format_map = {
'%d': ['[0-9]{1,2}', 'day'],
'%f': ['[0-9]{1,6}', 'microsecond'],
'%H': ['[0-9]{1,2}', 'hour'],
'%m': ['[0-9]{1,2}', 'month'],
'%M': ['[0-9]{1,2}', 'minute'],
'%S': ['[0-9]{1,2}', 'second'],
'%Y': ['[0-9]{4,5}', 'year'],
}
regex = format
find = _re.compile("([%a-zA-Z]{2})")
for form in find.findall(format):
if form in format_map:
regex = regex.replace(form, "(" + format_map[form][0] + ")")
apply_order.append(format_map[form][1])
try:
p = _re.compile(regex)
if not p.match(date_string):
raise ValueError()
for i, el in enumerate(p.match(date_string).groups()):
result_date[apply_order[i]] = int(el)
return datetime(
result_date['year'],
result_date['month'],
result_date['day'],
result_date['hour'],
result_date['minute'],
result_date['second'])
except:
raise ValueError(
"time data '%s' does not match format '%s'" %
(date_string, format))
def replace(
self,
year=None,
month=None,
day=None,
hour=None,
minute=None,
second=None,
microsecond=None,
tzinfo=None):
"""Return datetime with new specified fields."""
t_year = self.year
if year is not None:
t_year = year
t_month = self.month
if month is not None:
t_month = month
t_day = self.day
if day is not None:
t_day = day
t_hour = self.hour
if hour is not None:
t_hour = hour
t_min = self.minute
if minute is not None:
t_min = minute
t_sec = self.second
if second is not None:
t_sec = second
t_mic = self.microsecond
if microsecond is not None:
t_mic = microsecond
t_tz = self.tzinfo
if tzinfo is not None:
t_tz = tzinfo
return datetime(
t_year,
t_month,
t_day,
t_hour,
t_min,
t_sec,
t_mic,
t_tz)
def __add__(self, timedelta):
"""x.__add__(y) <==> x+y"""
if isinstance(timedelta, py_datetime.timedelta):
return self.__calculation_on_timedelta('__add__', timedelta)
raise TypeError(
"unsupported operand type(s) for +: '%s' and '%s'" %
(type(self), type(timedelta)))
def __sub__(self, other):
"""x.__sub__(y) <==> x-y"""
if isinstance(other, py_datetime.timedelta):
return self.__calculation_on_timedelta('__sub__', other)
if isinstance(other, datetime):
return self.__calculation_on_datetime('__sub__', other)
raise TypeError(
"unsupported operand type(s) for -: '%s' and '%s'" %
(type(self), type(other)))
def __radd__(self, timedelta):
"""x.__radd__(y) <==> y+x"""
if isinstance(timedelta, py_datetime.timedelta):
return self.__add__(timedelta)
raise TypeError(
"unsupported operand type for +: '%s' and '%s'" %
(type(py_datetime.timedelta), type(self)))
def __rsub__(self, other):
"""x.__rsub__(y) <==> y-x"""
if isinstance(other, datetime):
return self.__calculation_on_datetime('__rsub__', other)
raise TypeError(
"unsupported operand type for -: '%s' and '%s'" %
(type(other), type(self)))
def __calculation_on_timedelta(self, action, timedelta):
gdatetime = self.togregorian()
new_gdatetime = getattr(gdatetime, action)(timedelta)
return datetime.fromgregorian(datetime=new_gdatetime)
def __calculation_on_datetime(self, action, another_datetime):
self_gdatetime = self.togregorian()
another_gdatetime = another_datetime.togregorian()
return getattr(self_gdatetime, action)(another_gdatetime)
def __eq__(self, other_datetime):
"""x.__eq__(y) <==> x==y"""
if other_datetime is None:
return False
if not isinstance(other_datetime, datetime):
return False
if self.year == other_datetime.year and \
self.month == other_datetime.month and \
self.day == other_datetime.day:
return self.timetz() == other_datetime.timetz(
) and self.microsecond == other_datetime.microsecond
return False
def __ge__(self, other_datetime):
"""x.__ge__(y) <==> x>=y"""
if not isinstance(other_datetime, datetime):
raise TypeError(
"unsupported operand type for >=: '%s'" %
(type(other_datetime)))
return (self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond) >= \
(other_datetime.year,
other_datetime.month,
other_datetime.day,
other_datetime.hour,
other_datetime.minute,
other_datetime.second,
other_datetime.microsecond)
def __gt__(self, other_datetime):
"""x.__gt__(y) <==> x>y"""
if not isinstance(other_datetime, datetime):
raise TypeError(
"unsupported operand type for >: '%s'" %
(type(other_datetime)))
return (self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond) > \
(other_datetime.year,
other_datetime.month,
other_datetime.day,
other_datetime.hour,
other_datetime.minute,
other_datetime.second,
other_datetime.microsecond)
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
gdt = self.togregorian()
return gdt.__hash__()
def __le__(self, other_datetime):
"""x.__le__(y) <==> x<=y"""
if not isinstance(other_datetime, datetime):
raise TypeError(
"unsupported operand type for <=: '%s'" %
(type(other_datetime)))
return not self.__gt__(other_datetime)
def __lt__(self, other_datetime):
"""x.__lt__(y) <==> x<y"""
if not isinstance(other_datetime, datetime):
raise TypeError(
"unsupported operand type for <: '%s'" %
(type(other_datetime)))
return not self.__ge__(other_datetime)
def __ne__(self, other_datetime):
"""x.__ne__(y) <==> x!=y"""
if other_datetime is None:
return True
if not isinstance(other_datetime, datetime):
return True
return not self.__eq__(other_datetime)
@staticmethod
def fromgregorian(**kw):
"""Convert gregorian to jalali and return jdatetime.datetime
jdatetime.date.fromgregorian(day=X,month=X,year=X,[hour=X, [minute=X, [second=X, [tzinfo=X]]]])
jdatetime.date.fromgregorian(date=datetime.date)
jdatetime.date.fromgregorian(datetime=datetime.datetime)
"""
if 'date' in kw and isinstance(kw['date'], py_datetime.date):
d = kw['date']
(y, m, d) = GregorianToJalali(d.year,
d.month,
d.day).getJalaliList()
return datetime(y, m, d)
if 'datetime' in kw and isinstance(
kw['datetime'], py_datetime.datetime):
dt = kw['datetime']
(y, m, d) = GregorianToJalali(
dt.year, dt.month, dt.day).getJalaliList()
return datetime(
y,
m,
d,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo)
if 'day' in kw and 'month' in kw and 'year' in kw:
(year, month, day) = (kw['year'], kw['month'], kw['day'])
(y, m, d) = GregorianToJalali(year, month, day).getJalaliList()
hour = None
minute = None
second = None
microsecond = None
tzinfo = None
if 'hour' in kw:
hour = kw['hour']
if 'minute' in kw:
minute = kw['minute']
if 'second' in kw:
second = kw['second']
if 'microsecond' in kw:
microsecond = kw['microsecond']
if 'tzinfo' in kw:
tzinfo = kw['tzinfo']
return datetime(y, m, d, hour, minute, second, microsecond, tzinfo)
raise ValueError(
"fromgregorian have to called fromgregorian(day=X,month=X,year=X, [hour=X, [minute=X, [second=X, [tzinfo=X]]]]) or fromgregorian(date=datetime.date) or fromgregorian(datetime=datetime.datetime)")
def togregorian(self):
"""Convert current jalali date to gregorian and return datetime.datetime"""
gdate = date.togregorian(self)
return py_datetime.datetime.combine(gdate, self.__time)
def astimezone(self, tz):
"""tz -> convert to local time in new timezone tz"""
gdt = self.togregorian()
gdt = gdt.astimezone(tz)
return datetime.fromgregorian(datetime=gdt)
def ctime(self):
"""Return ctime() style string."""
return self.strftime("%c")
# TODO: check what this def does !
def dst(self):
"""Return self.tzinfo.dst(self)"""
if self.tzinfo:
return self.tzinfo.dst(self)
return None
def isoformat(self):
"""[sep] -> string in ISO 8601 format, YYYY-MM-DDTHH:MM:SS[.mmmmmm][+HH:MM]."""
mil = self.strftime("%f")
if int(mil) == 0:
mil = ""
else:
mil = "." + mil
tz = self.strftime("%z")
return self.strftime("%Y-%m-%dT%H:%M:%S") + "%s%s" % (mil, tz)
def timetuple(self):
"""Return time tuple, compatible with time.localtime().
It returns Gregorian object!
"""
dt = self.togregorian()
return dt.timetuple()
def timetz(self):
"""Return time object with same time and tzinfo."""
return self.__time
def tzname(self):
"""Return self.tzinfo.tzname(self)"""
if self.tzinfo:
return self.tzinfo.tzname(self)
return None
def utcoffset(self):
"""Return self.tzinfo.utcoffset(self)."""
if self.tzinfo:
return self.tzinfo.utcoffset(self)
def utctimetuple(self):
"""Return UTC time tuple, compatible with time.localtime().
It returns Gregorian object !
"""
dt = self.togregorian()
return dt.utctimetuple()
def __str__(self):
mil = self.strftime("%f")
if int(mil) == 0:
mil = ""
else:
mil = "." + mil
tz = self.strftime("%z")
return self.strftime("%Y-%m-%d %H:%M:%S") + "%s%s" % (mil, tz)
| 32.724572 | 207 | 0.510191 |
from __future__ import unicode_literals
import datetime as py_datetime
import sys
from jdatetime.jalali import \
GregorianToJalali, JalaliToGregorian, j_days_in_month
import re as _re
import locale as _locale
__VERSION__ = "1.8.1"
MINYEAR = 1
MAXYEAR = 9377
timedelta = py_datetime.timedelta
tzinfo = py_datetime.tzinfo
if sys.version_info[0] >= 3:
_int_types = (int,)
else:
_int_types = (int, long)
FA_LOCALE = 'fa_IR'
class time(py_datetime.time):
def __repr__(self):
return "jdatetime.time(%s, %s, %s)" % (self.hour,
self.minute,
self.second)
class date(object):
j_months_en = ['Farvardin',
'Ordibehesht',
'Khordad',
'Tir',
'Mordad',
'Shahrivar',
'Mehr',
'Aban',
'Azar',
'Dey',
'Bahman',
'Esfand']
j_months_short_en = ['Far',
'Ord',
'Kho',
'Tir',
'Mor',
'Sha',
'Meh',
'Aba',
'Aza',
'Dey',
'Bah',
'Esf']
j_weekdays_en = ['Saturday',
'Sunday',
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday']
j_weekdays_short_en = ['Sat',
'Sun',
'Mon',
'Tue',
'Wed',
'Thu',
'Fri']
j_ampm_en = {'PM': 'PM', 'AM': 'AM'}
j_months_fa = [u'فروردین',
u'اردیبهشت',
u'خرداد',
u'تیر',
u'مرداد',
u'شهریور',
u'مهر',
u'آبان',
u'آذر',
u'دی',
u'بهمن',
u'اسفند']
j_weekdays_fa = [u'شنبه',
u'یکشنبه',
u'دوشنبه',
u'سه شنبه',
u'چهارشنبه',
u'پنجشنبه',
u'جمعه']
j_ampm_fa = {'PM': u'بعد از ظهر', 'AM': u'قبل از ظهر'}
@property
def year(self):
return self.__year
@property
def month(self):
return self.__month
@property
def day(self):
return self.__day
__year = 0
__month = 0
__day = 0
def _check_arg(self, value):
if isinstance(value, _int_types):
return True
return False
def __init__(self, year, month, day):
if not (self._check_arg(year) and
self._check_arg(month) and
self._check_arg(day)):
raise TypeError("an integer is required" + repr(type(year)))
if year < MINYEAR or year > MAXYEAR:
raise ValueError("year is out of range")
self.__year = year
if month < 1 or month > 12:
raise ValueError("month must be in 1..12")
self.__month = month
if day < 1:
raise ValueError("day is out of range for month")
if self.__month == 12 and day == 30 and self.isleap():
pass
elif self.__month == 12 and day == 30 and not self.isleap():
raise ValueError("day is out of range for month")
elif day > j_days_in_month[self.__month - 1]:
raise ValueError("day is out of range for month")
self.__day = day
if self._is_fa_locale():
self.j_months = self.j_months_fa
self.j_months_short = self.j_months_fa
self.j_weekdays = self.j_weekdays_fa
self.j_weekdays_short = self.j_weekdays_fa
self.j_ampm = self.j_ampm_fa
else:
self.j_months = self.j_months_en
self.j_months_short = self.j_months_short_en
self.j_weekdays = self.j_weekdays_en
self.j_weekdays_short = self.j_weekdays_short_en
self.j_ampm = self.j_ampm_en
def _is_fa_locale(self):
if FA_LOCALE in _locale.getlocale():
return True
if None not in _locale.getlocale():
return False
if FA_LOCALE in _locale.getdefaultlocale():
return True
return False
resolution = timedelta(1)
# min = date(MINYEAR, 1, 1)
# TODO fixed errror: name 'date' is not defined
# max = date(MAXYEAR, 12,29)
def isleap(self):
return self.year % 33 in (1, 5, 9, 13, 17, 22, 26, 30)
def togregorian(self):
(y, m, d) = JalaliToGregorian(self.year,
self.month,
self.day).getGregorianList()
return py_datetime.date(y, m, d)
@staticmethod
def fromgregorian(**kw):
if 'date' in kw and isinstance(kw['date'], py_datetime.date):
d = kw['date']
(y, m, d) = GregorianToJalali(d.year,
d.month,
d.day).getJalaliList()
return date(y, m, d)
if 'day' in kw and 'month' in kw and 'year' in kw:
(year, month, day) = (kw['year'], kw['month'], kw['day'])
(y, m, d) = GregorianToJalali(year, month, day).getJalaliList()
return date(y, m, d)
error_msg = ["fromgregorian have to be be called"]
error_msg += ["fromgregorian(day=X,month=X,year=X)"]
error_msg += ["or"]
error_msg += ["fromgregorian(date=datetime.date)"]
raise ValueError(" ".join(error_msg))
@staticmethod
def today():
to = py_datetime.date.today()
(y, m, d) = GregorianToJalali(to.year,
to.month,
to.day).getJalaliList()
return date(y, m, d)
@staticmethod
def fromtimestamp(timestamp):
d = py_datetime.date.fromtimestamp(timestamp)
(y, m, d) = GregorianToJalali(d.year, d.month, d.day).getJalaliList()
return date(y, m, d)
def toordinal(self):
d = self.togregorian()
return d.toordinal() - 226894
@staticmethod
def fromordinal(ordinal):
if ordinal < 1:
raise ValueError("ordinal must be >= 1")
d = py_datetime.date.fromordinal(226894 + ordinal)
(y, m, d) = GregorianToJalali(d.year, d.month, d.day).getJalaliList()
return date(y, m, d)
def __repr__(self):
return "jdatetime.date(%s, %s, %s)" % (self.year,
self.month,
self.day)
def __str__(self):
return self.strftime("%Y-%m-%d")
def __add__(self, timedelta):
if not isinstance(timedelta, py_datetime.timedelta):
raise TypeError(
"unsupported operand type(s) for +: '%s' and '%s'" %
(type(self), type(timedelta)))
gd = self.togregorian() + timedelta
return date.fromgregorian(date=gd)
def __sub__(self, other):
if isinstance(other, py_datetime.timedelta):
gd = self.togregorian() - other
return date.fromgregorian(date=gd)
if isinstance(other, date):
return self.togregorian() - other.togregorian()
raise TypeError(
"unsupported operand type(s) for -: '%s' and '%s'" %
(type(self), type(timedelta)))
def __radd__(self, timedelta):
if not isinstance(timedelta, py_datetime.timedelta):
raise TypeError(
"unsupported operand type for +: '%s'" %
(type(timedelta)))
return self.__add__(timedelta)
def __rsub__(self, other):
if isinstance(other, date):
return self.__sub__(other)
raise TypeError(
"unsupported operand type for -: '%s'" %
(type(other)))
def __eq__(self, other_date):
if other_date is None:
return False
if not isinstance(other_date, date):
return False
if self.year == other_date.year and \
self.month == other_date.month and \
self.day == other_date.day:
return True
return False
def __ge__(self, other_date):
if not isinstance(other_date, date):
raise TypeError(
"unsupported operand type for >=: '%s'" %
(type(other_date)))
if self.year > other_date.year:
return True
elif self.year == other_date.year:
if self.month > other_date.month:
return True
elif self.month == other_date.month and self.day >= other_date.day:
return True
return False
def __gt__(self, other_date):
if not isinstance(other_date, date):
raise TypeError(
"unsupported operand type for >: '%s'" %
(type(other_date)))
if self.year > other_date.year:
return True
elif self.year == other_date.year:
if self.month > other_date.month:
return True
elif self.month >= other_date.month and self.day > other_date.day:
return True
return False
def __le__(self, other_date):
if not isinstance(other_date, date):
raise TypeError(
"unsupported operand type for <=: '%s'" %
(type(other_date)))
return not self.__gt__(other_date)
def __lt__(self, other_date):
if not isinstance(other_date, date):
raise TypeError(
"unsupported operand type for <: '%s'" %
(type(other_date)))
return not self.__ge__(other_date)
def __ne__(self, other_date):
if other_date is None:
return True
if not isinstance(other_date, date):
return True
return not self.__eq__(other_date)
def __hash__(self):
gd = self.togregorian()
return gd.__hash__()
def ctime(self):
return self.strftime("%c")
def replace(self, year=0, month=0, day=0):
new_year = self.year
new_month = self.month
new_day = self.day
if year != 0:
new_year = year
if month != 0:
new_month = month
if day != 0:
new_day = day
return date(new_year, new_month, new_day)
def yday(self):
day = 0
for i in range(0, self.month - 1):
day = day + j_days_in_month[i]
day = day + self.day
return day
def weekday(self):
gd = self.togregorian()
if gd.weekday() == 5:
return 0
if gd.weekday() == 6:
return 1
if gd.weekday() == 0:
return 2
if gd.weekday() == 1:
return 3
if gd.weekday() == 2:
return 4
if gd.weekday() == 3:
return 5
if gd.weekday() == 4:
return 6
def isoweekday(self):
return self.weekday() + 1
def weeknumber(self):
return self.yday() // 7
def isocalendar(self):
return (self.year, self.weeknumber(), self.isoweekday())
def isoformat(self):
return self.strftime("%Y-%m-%d")
def __format__(self, format):
return self.strftime(format)
# TODO: create jtime !
# def timetuple(self):
# pass
def strftime(self, format):
# TODO: change stupid str.replace
# formats = {
# '%a': lambda: self.j_weekdays_short[self.weekday()]
# }
# find all %[a-zA-Z] and call method if it in formats
format = format.replace("%a", self.j_weekdays_short[self.weekday()])
format = format.replace("%A", self.j_weekdays[self.weekday()])
format = format.replace("%b", self.j_months_short[self.month - 1])
format = format.replace("%B", self.j_months[self.month - 1])
if '%c' in format:
format = format.replace(
"%c", self.strftime("%a %b %d %H:%M:%S %Y"))
format = format.replace("%d", '%02.d' % (self.day))
try:
format = format.replace("%f", '%06.d' % (self.microsecond))
except:
format = format.replace("%f", "000000")
try:
format = format.replace("%H", '%02.d' % (self.hour))
except:
format = format.replace("%H", '00')
try:
if self.hour > 12:
format = format.replace("%I", '%02.d' % (self.hour - 12))
else:
format = format.replace("%I", '%02.d' % (self.hour))
except:
format = format.replace("%I", '00')
format = format.replace("%j", '%03.d' % (self.yday()))
format = format.replace("%m", '%02.d' % (self.month))
try:
format = format.replace("%M", '%02.d' % (self.minute))
except:
format = format.replace("%M", '00')
try:
if self.hour > 12:
format = format.replace("%p", self.j_ampm['PM'])
else:
format = format.replace("%p", self.j_ampm['AM'])
except:
format = format.replace("%p", self.j_ampm['AM'])
try:
format = format.replace("%S", '%02.d' % (self.second))
except:
format = format.replace("%S", '00')
format = format.replace("%w", str(self.weekday()))
format = format.replace("%W", str(self.weeknumber()))
if '%x' in format:
format = format.replace("%x", self.strftime("%m/%d/%y"))
if '%X' in format:
format = format.replace("%X", self.strftime('%H:%I:%S'))
format = format.replace("%Y", str(self.year))
format = format.replace("%y", str(self.year)[2:])
format = format.replace("%Y", str(self.year))
try:
sign = "+"
diff = self.tzinfo.utcoffset(self.tzinfo)
diff_sec = diff.seconds
if diff.days > 0 or diff.days < -1:
raise ValueError(
"tzinfo.utcoffset() returned big time delta! ; must be in -1439 .. 1439")
if diff.days != 0:
sign = "-"
diff_sec = (1 * 24 * 60 * 60) - diff_sec
tmp_min = diff_sec / 60
diff_hour = tmp_min / 60
diff_min = tmp_min % 60
format = format.replace(
"%z", '%s%02.d%02.d' %
(sign, diff_hour, diff_min))
except AttributeError:
format = format.replace("%z", '')
try:
format = format.replace("%Z", self.tzinfo.tzname(self.tzinfo))
except AttributeError:
format = format.replace("%Z", '')
return format
class datetime(date):
__time = None
def time(self):
return time(self.hour, self.minute, self.second, self.microsecond)
def date(self):
return date(self.year, self.month, self.day)
def __init__(
self,
year,
month,
day,
hour=None,
minute=None,
second=None,
microsecond=None,
tzinfo=None):
date.__init__(self, year, month, day)
tmp_hour = 0
tmp_min = 0
tmp_sec = 0
tmp_micr = 0
if hour is not None:
tmp_hour = hour
if minute is not None:
tmp_min = minute
if second is not None:
tmp_sec = second
if microsecond is not None:
tmp_micr = microsecond
if not (self._check_arg(tmp_hour) and self._check_arg(tmp_min) and
self._check_arg(tmp_sec) and self._check_arg(tmp_micr)):
raise TypeError("an integer is required")
self.__time = time(tmp_hour, tmp_min, tmp_sec, tmp_micr, tzinfo)
def __repr__(self):
if self.__time.tzinfo is not None:
return "jdatetime.datetime(%s, %s, %s, %s, %s, %s, %s, tzinfo=%s)" % (
self.year,
self.month,
self.day, self.hour,
self.minute,
self.second,
self.microsecond,
self.tzinfo)
if self.__time.microsecond != 0:
return "jdatetime.datetime(%s, %s, %s, %s, %s, %s, %s)" % (
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond)
if self.__time.second != 0:
return "jdatetime.datetime(%s, %s, %s, %s, %s, %s)" % (
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second)
return "jdatetime.datetime(%s, %s, %s, %s, %s)" % (
self.year, self.month, self.day, self.hour, self.minute)
@staticmethod
def today():
return datetime.now()
@staticmethod
def now(tz=None):
now_datetime = py_datetime.datetime.now(tz)
now = date.fromgregorian(date=now_datetime.date())
return datetime(
now.year,
now.month,
now.day,
now_datetime.hour,
now_datetime.minute,
now_datetime.second,
now_datetime.microsecond,
tz)
@staticmethod
def utcnow():
now_datetime = py_datetime.datetime.utcnow()
now = date.fromgregorian(date=now_datetime.date())
return datetime(
now.year,
now.month,
now.day,
now_datetime.hour,
now_datetime.minute,
now_datetime.second,
now_datetime.microsecond)
@staticmethod
def fromtimestamp(timestamp, tz=None):
now_datetime = py_datetime.datetime.fromtimestamp(timestamp, tz)
now = date.fromgregorian(date=now_datetime.date())
return datetime(
now.year,
now.month,
now.day,
now_datetime.hour,
now_datetime.minute,
now_datetime.second,
now_datetime.microsecond,
tz)
@staticmethod
def utcfromtimestamp(timestamp):
now_datetime = py_datetime.datetime.fromtimestamp(timestamp)
now = date.fromgregorian(date=now_datetime.date())
return datetime(
now.year,
now.month,
now.day,
now_datetime.hour,
now_datetime.minute,
now_datetime.second,
now_datetime.microsecond)
@staticmethod
def combine(d=None, t=None, **kw):
c_date = None
if d is not None:
c_date = d
elif 'date' in kw:
c_date = kw['date']
c_time = None
if t is not None:
c_time = t
elif 'time' in kw:
c_time = kw['time']
if c_date is None:
raise TypeError("Required argument 'date' (pos 1) not found")
if c_time is None:
raise TypeError("Required argument 'date' (pos 2) not found")
if not isinstance(c_date, date):
raise TypeError(
"combine() argument 1 must be jdatetime.date, not %s" %
(type(c_date)))
if not isinstance(c_time, time):
raise TypeError(
"combine() argument 2 must be jdatetime.time, not %s" %
(type(c_time)))
return datetime(
c_date.year,
c_date.month,
c_date.day,
c_time.hour,
c_time.minute,
c_time.second,
c_time.microsecond,
c_time.tzinfo)
@staticmethod
def fromordinal(ordinal):
if ordinal < 1:
raise ValueError("ordinal must be >= 1")
d = py_datetime.date.fromordinal(226894 + ordinal)
j_date = date.fromgregorian(date=d)
return datetime(j_date.year, j_date.month, j_date.day, 0, 0)
@property
def hour(self):
return self.__time.hour
@property
def minute(self):
return self.__time.minute
@property
def second(self):
return self.__time.second
@property
def microsecond(self):
return self.__time.microsecond
@property
def tzinfo(self):
return self.__time.tzinfo
@staticmethod
def strptime(date_string, format):
if '*' in format:
format = format.replace("*", "\*")
if '+' in format:
format = format.replace("+", "\+")
if '(' in format or ')' in format:
format = format.replace("(", "\(")
format = format.replace(")", "\)")
if '[' in format or ']' in format:
format = format.replace("[", "\[")
format = format.replace("]", "\]")
result_date = {
'day': 1,
'month': 1,
'year': 1279,
'microsecond': 0,
'second': 0,
'minute': 0,
'hour': 0}
apply_order = []
format_map = {
'%d': ['[0-9]{1,2}', 'day'],
'%f': ['[0-9]{1,6}', 'microsecond'],
'%H': ['[0-9]{1,2}', 'hour'],
'%m': ['[0-9]{1,2}', 'month'],
'%M': ['[0-9]{1,2}', 'minute'],
'%S': ['[0-9]{1,2}', 'second'],
'%Y': ['[0-9]{4,5}', 'year'],
}
regex = format
find = _re.compile("([%a-zA-Z]{2})")
for form in find.findall(format):
if form in format_map:
regex = regex.replace(form, "(" + format_map[form][0] + ")")
apply_order.append(format_map[form][1])
try:
p = _re.compile(regex)
if not p.match(date_string):
raise ValueError()
for i, el in enumerate(p.match(date_string).groups()):
result_date[apply_order[i]] = int(el)
return datetime(
result_date['year'],
result_date['month'],
result_date['day'],
result_date['hour'],
result_date['minute'],
result_date['second'])
except:
raise ValueError(
"time data '%s' does not match format '%s'" %
(date_string, format))
def replace(
self,
year=None,
month=None,
day=None,
hour=None,
minute=None,
second=None,
microsecond=None,
tzinfo=None):
t_year = self.year
if year is not None:
t_year = year
t_month = self.month
if month is not None:
t_month = month
t_day = self.day
if day is not None:
t_day = day
t_hour = self.hour
if hour is not None:
t_hour = hour
t_min = self.minute
if minute is not None:
t_min = minute
t_sec = self.second
if second is not None:
t_sec = second
t_mic = self.microsecond
if microsecond is not None:
t_mic = microsecond
t_tz = self.tzinfo
if tzinfo is not None:
t_tz = tzinfo
return datetime(
t_year,
t_month,
t_day,
t_hour,
t_min,
t_sec,
t_mic,
t_tz)
def __add__(self, timedelta):
if isinstance(timedelta, py_datetime.timedelta):
return self.__calculation_on_timedelta('__add__', timedelta)
raise TypeError(
"unsupported operand type(s) for +: '%s' and '%s'" %
(type(self), type(timedelta)))
def __sub__(self, other):
if isinstance(other, py_datetime.timedelta):
return self.__calculation_on_timedelta('__sub__', other)
if isinstance(other, datetime):
return self.__calculation_on_datetime('__sub__', other)
raise TypeError(
"unsupported operand type(s) for -: '%s' and '%s'" %
(type(self), type(other)))
def __radd__(self, timedelta):
if isinstance(timedelta, py_datetime.timedelta):
return self.__add__(timedelta)
raise TypeError(
"unsupported operand type for +: '%s' and '%s'" %
(type(py_datetime.timedelta), type(self)))
def __rsub__(self, other):
if isinstance(other, datetime):
return self.__calculation_on_datetime('__rsub__', other)
raise TypeError(
"unsupported operand type for -: '%s' and '%s'" %
(type(other), type(self)))
def __calculation_on_timedelta(self, action, timedelta):
gdatetime = self.togregorian()
new_gdatetime = getattr(gdatetime, action)(timedelta)
return datetime.fromgregorian(datetime=new_gdatetime)
def __calculation_on_datetime(self, action, another_datetime):
self_gdatetime = self.togregorian()
another_gdatetime = another_datetime.togregorian()
return getattr(self_gdatetime, action)(another_gdatetime)
def __eq__(self, other_datetime):
if other_datetime is None:
return False
if not isinstance(other_datetime, datetime):
return False
if self.year == other_datetime.year and \
self.month == other_datetime.month and \
self.day == other_datetime.day:
return self.timetz() == other_datetime.timetz(
) and self.microsecond == other_datetime.microsecond
return False
def __ge__(self, other_datetime):
if not isinstance(other_datetime, datetime):
raise TypeError(
"unsupported operand type for >=: '%s'" %
(type(other_datetime)))
return (self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond) >= \
(other_datetime.year,
other_datetime.month,
other_datetime.day,
other_datetime.hour,
other_datetime.minute,
other_datetime.second,
other_datetime.microsecond)
def __gt__(self, other_datetime):
if not isinstance(other_datetime, datetime):
raise TypeError(
"unsupported operand type for >: '%s'" %
(type(other_datetime)))
return (self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond) > \
(other_datetime.year,
other_datetime.month,
other_datetime.day,
other_datetime.hour,
other_datetime.minute,
other_datetime.second,
other_datetime.microsecond)
def __hash__(self):
gdt = self.togregorian()
return gdt.__hash__()
def __le__(self, other_datetime):
if not isinstance(other_datetime, datetime):
raise TypeError(
"unsupported operand type for <=: '%s'" %
(type(other_datetime)))
return not self.__gt__(other_datetime)
def __lt__(self, other_datetime):
if not isinstance(other_datetime, datetime):
raise TypeError(
"unsupported operand type for <: '%s'" %
(type(other_datetime)))
return not self.__ge__(other_datetime)
def __ne__(self, other_datetime):
if other_datetime is None:
return True
if not isinstance(other_datetime, datetime):
return True
return not self.__eq__(other_datetime)
@staticmethod
def fromgregorian(**kw):
if 'date' in kw and isinstance(kw['date'], py_datetime.date):
d = kw['date']
(y, m, d) = GregorianToJalali(d.year,
d.month,
d.day).getJalaliList()
return datetime(y, m, d)
if 'datetime' in kw and isinstance(
kw['datetime'], py_datetime.datetime):
dt = kw['datetime']
(y, m, d) = GregorianToJalali(
dt.year, dt.month, dt.day).getJalaliList()
return datetime(
y,
m,
d,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo)
if 'day' in kw and 'month' in kw and 'year' in kw:
(year, month, day) = (kw['year'], kw['month'], kw['day'])
(y, m, d) = GregorianToJalali(year, month, day).getJalaliList()
hour = None
minute = None
second = None
microsecond = None
tzinfo = None
if 'hour' in kw:
hour = kw['hour']
if 'minute' in kw:
minute = kw['minute']
if 'second' in kw:
second = kw['second']
if 'microsecond' in kw:
microsecond = kw['microsecond']
if 'tzinfo' in kw:
tzinfo = kw['tzinfo']
return datetime(y, m, d, hour, minute, second, microsecond, tzinfo)
raise ValueError(
"fromgregorian have to called fromgregorian(day=X,month=X,year=X, [hour=X, [minute=X, [second=X, [tzinfo=X]]]]) or fromgregorian(date=datetime.date) or fromgregorian(datetime=datetime.datetime)")
def togregorian(self):
gdate = date.togregorian(self)
return py_datetime.datetime.combine(gdate, self.__time)
def astimezone(self, tz):
gdt = self.togregorian()
gdt = gdt.astimezone(tz)
return datetime.fromgregorian(datetime=gdt)
def ctime(self):
return self.strftime("%c")
# TODO: check what this def does !
def dst(self):
if self.tzinfo:
return self.tzinfo.dst(self)
return None
def isoformat(self):
mil = self.strftime("%f")
if int(mil) == 0:
mil = ""
else:
mil = "." + mil
tz = self.strftime("%z")
return self.strftime("%Y-%m-%dT%H:%M:%S") + "%s%s" % (mil, tz)
def timetuple(self):
dt = self.togregorian()
return dt.timetuple()
def timetz(self):
return self.__time
def tzname(self):
if self.tzinfo:
return self.tzinfo.tzname(self)
return None
def utcoffset(self):
if self.tzinfo:
return self.tzinfo.utcoffset(self)
def utctimetuple(self):
dt = self.togregorian()
return dt.utctimetuple()
def __str__(self):
mil = self.strftime("%f")
if int(mil) == 0:
mil = ""
else:
mil = "." + mil
tz = self.strftime("%z")
return self.strftime("%Y-%m-%d %H:%M:%S") + "%s%s" % (mil, tz)
| true | true |
1c342e64716b7de1c47b236f6f33e3aabef1b660 | 5,148 | py | Python | purity_fb/purity_fb_1dot10/models/smtp.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 5 | 2017-09-08T20:47:22.000Z | 2021-06-29T02:11:05.000Z | purity_fb/purity_fb_1dot10/models/smtp.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 16 | 2017-11-27T20:57:48.000Z | 2021-11-23T18:46:43.000Z | purity_fb/purity_fb_1dot10/models/smtp.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 22 | 2017-10-13T15:33:05.000Z | 2021-11-08T19:56:21.000Z | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.10 Python SDK
Pure Storage FlashBlade REST 1.10 Python SDK. Compatible with REST API versions 1.0 - 1.10. Developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.10
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Smtp(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'relay_host': 'str',
'sender_domain': 'str'
}
attribute_map = {
'name': 'name',
'relay_host': 'relay_host',
'sender_domain': 'sender_domain'
}
def __init__(self, name=None, relay_host=None, sender_domain=None): # noqa: E501
"""Smtp - a model defined in Swagger""" # noqa: E501
self._name = None
self._relay_host = None
self._sender_domain = None
self.discriminator = None
if name is not None:
self.name = name
if relay_host is not None:
self.relay_host = relay_host
if sender_domain is not None:
self.sender_domain = sender_domain
@property
def name(self):
"""Gets the name of this Smtp. # noqa: E501
The name of the object (e.g., a file system or snapshot) # noqa: E501
:return: The name of this Smtp. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Smtp.
The name of the object (e.g., a file system or snapshot) # noqa: E501
:param name: The name of this Smtp. # noqa: E501
:type: str
"""
self._name = name
@property
def relay_host(self):
"""Gets the relay_host of this Smtp. # noqa: E501
Relay server used as a forwarding point for email sent from the array # noqa: E501
:return: The relay_host of this Smtp. # noqa: E501
:rtype: str
"""
return self._relay_host
@relay_host.setter
def relay_host(self, relay_host):
"""Sets the relay_host of this Smtp.
Relay server used as a forwarding point for email sent from the array # noqa: E501
:param relay_host: The relay_host of this Smtp. # noqa: E501
:type: str
"""
self._relay_host = relay_host
@property
def sender_domain(self):
"""Gets the sender_domain of this Smtp. # noqa: E501
Domain name appended to alert email messages # noqa: E501
:return: The sender_domain of this Smtp. # noqa: E501
:rtype: str
"""
return self._sender_domain
@sender_domain.setter
def sender_domain(self, sender_domain):
"""Sets the sender_domain of this Smtp.
Domain name appended to alert email messages # noqa: E501
:param sender_domain: The sender_domain of this Smtp. # noqa: E501
:type: str
"""
self._sender_domain = sender_domain
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Smtp, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Smtp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.759777 | 251 | 0.578671 |
import pprint
import re
import six
class Smtp(object):
__test__ = False
swagger_types = {
'name': 'str',
'relay_host': 'str',
'sender_domain': 'str'
}
attribute_map = {
'name': 'name',
'relay_host': 'relay_host',
'sender_domain': 'sender_domain'
}
def __init__(self, name=None, relay_host=None, sender_domain=None):
self._name = None
self._relay_host = None
self._sender_domain = None
self.discriminator = None
if name is not None:
self.name = name
if relay_host is not None:
self.relay_host = relay_host
if sender_domain is not None:
self.sender_domain = sender_domain
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def relay_host(self):
return self._relay_host
@relay_host.setter
def relay_host(self, relay_host):
self._relay_host = relay_host
@property
def sender_domain(self):
return self._sender_domain
@sender_domain.setter
def sender_domain(self, sender_domain):
self._sender_domain = sender_domain
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Smtp, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Smtp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c342f10cd788b23e6e565736edc76b12871c407 | 10,337 | py | Python | app/api/services/game_services/service.py | Glyphack/tortuga | 42444a850c7587a01b75a2b13fae4a37e38ab79b | [
"MIT"
] | 5 | 2020-03-14T13:22:40.000Z | 2021-07-12T11:55:18.000Z | app/api/services/game_services/service.py | Glyphack/tortuga | 42444a850c7587a01b75a2b13fae4a37e38ab79b | [
"MIT"
] | 10 | 2020-04-01T16:28:49.000Z | 2021-02-27T09:06:07.000Z | app/api/services/game_services/service.py | Glyphack/tortuga | 42444a850c7587a01b75a2b13fae4a37e38ab79b | [
"MIT"
] | null | null | null | import random
from typing import List, Dict
from app.api.services import lobby_service
from app.models.event_cards import EventCardsManager
from app.models.game import Game, Player, Chests
from app.api.services.game_services.event_card_handlers import \
event_card_handlers
from app.models import votes
from app.models.votes import Votes
from app.schemas import game_schema
from app.schemas.auth import User
from app.schemas.game_schema import Team, KeptEventCard
game_statuses: Dict[str, Game] = {}
players_game: Dict[str, str] = {}
def _setup_vote_cards(game: Game):
for player_info in game.players_info.values():
if player_info.vote_cards is None:
player_info.vote_cards = []
for _ in range(3):
player_info.vote_cards.append(
votes.generate_vote_card()
)
game.vote_deck = votes.generate_vote_card()
game.votes = Votes()
def _get_available_actions(player: Player, game: Game):
available_actions = []
player_position = game.players_position[player.id]
if game.last_action:
if game.last_action.action_type == game_schema.Action.ActionType.CALL_FOR_AN_ATTACK:
if player.id in game.last_action.action_data.participating_players:
available_actions = [game_schema.Action.ActionType.VOTE]
return available_actions
if (
player.id == game.last_action.action_data.which_captain
):
if game.last_action.action_data.state == game_schema.State.Success:
available_actions = [
game_schema.Action.ActionType.PUT_CHEST]
elif game.last_action.action_data.state == game_schema.State.InProgress:
available_actions = []
return available_actions
elif game.last_action.action_type == game_schema.Action.ActionType.CALL_FOR_BRAWL:
if player.id in game.last_action.action_data.participating_players:
available_actions = [game_schema.Action.ActionType.VOTE]
return available_actions
elif game.last_action.action_type == game_schema.Action.ActionType.CALL_FOR_A_MUTINY:
if player.id in game.last_action.action_data.participating_players:
available_actions = [game_schema.Action.ActionType.VOTE]
return available_actions
elif (
game.last_action.action_type == game_schema.Action.ActionType.REVEAL_EVENT_CARD and
game.last_action.action_data.player == player.id
):
if game.last_action.action_data.can_use:
available_actions.append(
game_schema.Action.ActionType.USE_EVENT_CARD
)
if game.last_action.action_data.can_keep:
available_actions.append(
game_schema.Action.ActionType.KEEP_EVENT_CARD
)
if available_actions:
return available_actions
elif (
game.last_action.action_type == game_schema.Action.ActionType.FORCE_ANOTHER_PLAYER_TO_CHOOSE_CARD
):
if game.last_action.action_data.forced_player == player.id:
return [game_schema.Action.ActionType.REVEAL_EVENT_CARD]
else:
return []
if player.id != game.turn:
return available_actions
if player.chests > 0:
available_actions = [game_schema.Action.ActionType.PUT_CHEST]
return available_actions
global_actions = [
game_schema.Action.ActionType.MOVE,
game_schema.Action.ActionType.VIEW_TWO_EVENT_CARDS,
game_schema.Action.ActionType.REVEAL_EVENT_CARD,
game_schema.Action.ActionType.FORCE_ANOTHER_PLAYER_TO_CHOOSE_CARD,
]
available_actions.extend(global_actions)
if player_position in [
game_schema.Positions.FD1, game_schema.Positions.JR1
]:
available_actions.extend([
game_schema.Action.ActionType.CALL_FOR_AN_ATTACK,
game_schema.Action.ActionType.MAROON_ANY_CREW_MATE_TO_TORTUGA,
])
if player_position in game.cabin_boy_slots:
available_actions.append(
game_schema.Action.ActionType.MOVE_TREASURE
)
if player_position == game_schema.Positions.TR1:
available_actions.append(
game_schema.Action.ActionType.CALL_FOR_BRAWL
)
if player_position in [game_schema.Positions.JR2,
game_schema.Positions.FD2]:
available_actions.append(
game_schema.Action.ActionType.CALL_FOR_A_MUTINY
)
if player.event_cards:
available_actions.append(
game_schema.Action.ActionType.USE_EVENT_CARD
)
return available_actions
def _generate_map(players: List[str]):
players_position: Dict[str, game_schema.Positions] = {}
shuffled_players = players.copy()
random.shuffle(shuffled_players)
positions = [e.value for e in game_schema.Positions]
jr_head = 10
fd_head = 1
for index, player in enumerate(shuffled_players):
if index % 2 == 0:
players_position[player] = positions[jr_head]
jr_head += 1
else:
players_position[player] = positions[fd_head]
fd_head += 1
return players_position
def _generate_chests_positions() -> Chests:
return Chests(
tr_en=1,
tr_fr=1,
sg_nt=4,
fd_en=0,
fd_fr=0,
jr_en=0,
jr_fr=0
)
def _give_treasure_to_captains(players_info: Dict[str, Player],
positions: Dict[str, game_schema.Positions]):
updated_players_info = players_info.copy()
for player, position in positions.items():
if position in [
game_schema.Positions.FD1.value,
game_schema.Positions.JR1.value,
]:
updated_players_info[player].chests += 1
return updated_players_info
def setup_event_cards_deck():
event_cards = EventCardsManager.get_all_slugs().copy()
random.shuffle(event_cards)
event_cards.remove("spanish-armada")
spanish_armada_place = random.randint(-4, -1)
event_cards.insert(spanish_armada_place, "spanish-armada")
return event_cards
def create_new_game(game_id: str, players: List[str], host: str) -> Game:
players_info: Dict[str, Player] = {}
players_copy = players.copy()
random.shuffle(players_copy)
if len(players_copy) % 2 != 0:
dutch = players_copy[0]
players_copy.remove(dutch)
dutch = Player(id=dutch, team=Team.DUTCH.value)
players_info[dutch.id] = dutch
players_game[dutch.id] = game_id
for index, player in enumerate(players_copy):
if index % 2 != 0:
player_team = Team.BRITAIN.value
else:
player_team = Team.FRANCE.value
players_info[player] = Player(id=player, team=player_team)
players_game[player] = game_id
players_positions = _generate_map(players)
chests_position = _generate_chests_positions()
players_info = _give_treasure_to_captains(players_info, players_positions)
event_cards = setup_event_cards_deck()
new_game = Game(
id=game_id,
players_info=players_info,
chests_position=chests_position,
players_position=players_positions,
event_cards=event_cards,
last_action=None,
is_over=False,
winner=None,
host=host,
)
_setup_vote_cards(new_game)
new_game.turn = new_game.get_jr_caption()
game_statuses[game_id] = new_game
return new_game
def get_player_game(username) -> Game:
game_id = players_game.get(username)
return game_statuses.get(game_id)
def leave_current_game(username):
game = get_player_game(username)
del players_game[username]
del game.players_position[username]
lobby_service.leave_current_lobby(
game.id, game_schema.User(username=username)
)
if len(game.players_position) == 0:
remove_game(game.id)
def get_player_info_in_game(game: Game, player_id: str) -> Player:
return game.players_info[player_id]
def remove_game(game_id: str):
del game_statuses[game_id]
def is_game_host(game: Game, player: str):
return game.host == player
def generate_game_schema_from_game(username: str):
game = get_player_game(username)
player_info = get_player_info_in_game(game, username)
game_status = game_schema.GameStatus(
players_position=game.players_position,
chests_position=game_schema.Chests(
tr_en=game.chests_position.tr_en,
tr_fr=game.chests_position.tr_fr,
fd_fr=game.chests_position.fd_fr,
fd_en=game.chests_position.fd_en,
jr_fr=game.chests_position.jr_fr,
jr_en=game.chests_position.jr_en,
sg_nt=game.chests_position.sg_nt,
),
player_game_info=game_schema.PlayerGameInfo(
team=player_info.team,
vote_cards=player_info.vote_cards,
event_cards=get_kept_event_cards(player_info, game),
seen_event_cards=player_info.seen_event_cards,
role=None,
available_actions=_get_available_actions(player_info, game),
chests=player_info.chests
),
event_cards_deck=game_schema.EventCardDeck(
count=game.get_event_cards_deck_count(),
selectable_cards=game.get_event_card_deck_selectable_cards()
),
last_action=game.last_action,
activities=game.activities,
is_over=game.is_over,
turn=User(username=game.turn),
)
if game.winner:
game_status.winner = game_schema.WinState(
winner_team=game.winner,
players_teams={
player: info.team for player, info in game.players_info.items()
}
)
return game_status
def get_kept_event_cards(player: Player, game: Game):
return [
KeptEventCard(
event_card=EventCardsManager.get(event_card_slug),
options=event_card_handlers[event_card_slug](game, player).options,
can_use=event_card_handlers[event_card_slug](game, player).can_use
) for event_card_slug in player.event_cards
]
| 36.017422 | 113 | 0.666054 | import random
from typing import List, Dict
from app.api.services import lobby_service
from app.models.event_cards import EventCardsManager
from app.models.game import Game, Player, Chests
from app.api.services.game_services.event_card_handlers import \
event_card_handlers
from app.models import votes
from app.models.votes import Votes
from app.schemas import game_schema
from app.schemas.auth import User
from app.schemas.game_schema import Team, KeptEventCard
game_statuses: Dict[str, Game] = {}
players_game: Dict[str, str] = {}
def _setup_vote_cards(game: Game):
for player_info in game.players_info.values():
if player_info.vote_cards is None:
player_info.vote_cards = []
for _ in range(3):
player_info.vote_cards.append(
votes.generate_vote_card()
)
game.vote_deck = votes.generate_vote_card()
game.votes = Votes()
def _get_available_actions(player: Player, game: Game):
available_actions = []
player_position = game.players_position[player.id]
if game.last_action:
if game.last_action.action_type == game_schema.Action.ActionType.CALL_FOR_AN_ATTACK:
if player.id in game.last_action.action_data.participating_players:
available_actions = [game_schema.Action.ActionType.VOTE]
return available_actions
if (
player.id == game.last_action.action_data.which_captain
):
if game.last_action.action_data.state == game_schema.State.Success:
available_actions = [
game_schema.Action.ActionType.PUT_CHEST]
elif game.last_action.action_data.state == game_schema.State.InProgress:
available_actions = []
return available_actions
elif game.last_action.action_type == game_schema.Action.ActionType.CALL_FOR_BRAWL:
if player.id in game.last_action.action_data.participating_players:
available_actions = [game_schema.Action.ActionType.VOTE]
return available_actions
elif game.last_action.action_type == game_schema.Action.ActionType.CALL_FOR_A_MUTINY:
if player.id in game.last_action.action_data.participating_players:
available_actions = [game_schema.Action.ActionType.VOTE]
return available_actions
elif (
game.last_action.action_type == game_schema.Action.ActionType.REVEAL_EVENT_CARD and
game.last_action.action_data.player == player.id
):
if game.last_action.action_data.can_use:
available_actions.append(
game_schema.Action.ActionType.USE_EVENT_CARD
)
if game.last_action.action_data.can_keep:
available_actions.append(
game_schema.Action.ActionType.KEEP_EVENT_CARD
)
if available_actions:
return available_actions
elif (
game.last_action.action_type == game_schema.Action.ActionType.FORCE_ANOTHER_PLAYER_TO_CHOOSE_CARD
):
if game.last_action.action_data.forced_player == player.id:
return [game_schema.Action.ActionType.REVEAL_EVENT_CARD]
else:
return []
if player.id != game.turn:
return available_actions
if player.chests > 0:
available_actions = [game_schema.Action.ActionType.PUT_CHEST]
return available_actions
global_actions = [
game_schema.Action.ActionType.MOVE,
game_schema.Action.ActionType.VIEW_TWO_EVENT_CARDS,
game_schema.Action.ActionType.REVEAL_EVENT_CARD,
game_schema.Action.ActionType.FORCE_ANOTHER_PLAYER_TO_CHOOSE_CARD,
]
available_actions.extend(global_actions)
if player_position in [
game_schema.Positions.FD1, game_schema.Positions.JR1
]:
available_actions.extend([
game_schema.Action.ActionType.CALL_FOR_AN_ATTACK,
game_schema.Action.ActionType.MAROON_ANY_CREW_MATE_TO_TORTUGA,
])
if player_position in game.cabin_boy_slots:
available_actions.append(
game_schema.Action.ActionType.MOVE_TREASURE
)
if player_position == game_schema.Positions.TR1:
available_actions.append(
game_schema.Action.ActionType.CALL_FOR_BRAWL
)
if player_position in [game_schema.Positions.JR2,
game_schema.Positions.FD2]:
available_actions.append(
game_schema.Action.ActionType.CALL_FOR_A_MUTINY
)
if player.event_cards:
available_actions.append(
game_schema.Action.ActionType.USE_EVENT_CARD
)
return available_actions
def _generate_map(players: List[str]):
players_position: Dict[str, game_schema.Positions] = {}
shuffled_players = players.copy()
random.shuffle(shuffled_players)
positions = [e.value for e in game_schema.Positions]
jr_head = 10
fd_head = 1
for index, player in enumerate(shuffled_players):
if index % 2 == 0:
players_position[player] = positions[jr_head]
jr_head += 1
else:
players_position[player] = positions[fd_head]
fd_head += 1
return players_position
def _generate_chests_positions() -> Chests:
return Chests(
tr_en=1,
tr_fr=1,
sg_nt=4,
fd_en=0,
fd_fr=0,
jr_en=0,
jr_fr=0
)
def _give_treasure_to_captains(players_info: Dict[str, Player],
positions: Dict[str, game_schema.Positions]):
updated_players_info = players_info.copy()
for player, position in positions.items():
if position in [
game_schema.Positions.FD1.value,
game_schema.Positions.JR1.value,
]:
updated_players_info[player].chests += 1
return updated_players_info
def setup_event_cards_deck():
event_cards = EventCardsManager.get_all_slugs().copy()
random.shuffle(event_cards)
event_cards.remove("spanish-armada")
spanish_armada_place = random.randint(-4, -1)
event_cards.insert(spanish_armada_place, "spanish-armada")
return event_cards
def create_new_game(game_id: str, players: List[str], host: str) -> Game:
players_info: Dict[str, Player] = {}
players_copy = players.copy()
random.shuffle(players_copy)
if len(players_copy) % 2 != 0:
dutch = players_copy[0]
players_copy.remove(dutch)
dutch = Player(id=dutch, team=Team.DUTCH.value)
players_info[dutch.id] = dutch
players_game[dutch.id] = game_id
for index, player in enumerate(players_copy):
if index % 2 != 0:
player_team = Team.BRITAIN.value
else:
player_team = Team.FRANCE.value
players_info[player] = Player(id=player, team=player_team)
players_game[player] = game_id
players_positions = _generate_map(players)
chests_position = _generate_chests_positions()
players_info = _give_treasure_to_captains(players_info, players_positions)
event_cards = setup_event_cards_deck()
new_game = Game(
id=game_id,
players_info=players_info,
chests_position=chests_position,
players_position=players_positions,
event_cards=event_cards,
last_action=None,
is_over=False,
winner=None,
host=host,
)
_setup_vote_cards(new_game)
new_game.turn = new_game.get_jr_caption()
game_statuses[game_id] = new_game
return new_game
def get_player_game(username) -> Game:
game_id = players_game.get(username)
return game_statuses.get(game_id)
def leave_current_game(username):
game = get_player_game(username)
del players_game[username]
del game.players_position[username]
lobby_service.leave_current_lobby(
game.id, game_schema.User(username=username)
)
if len(game.players_position) == 0:
remove_game(game.id)
def get_player_info_in_game(game: Game, player_id: str) -> Player:
return game.players_info[player_id]
def remove_game(game_id: str):
del game_statuses[game_id]
def is_game_host(game: Game, player: str):
return game.host == player
def generate_game_schema_from_game(username: str):
game = get_player_game(username)
player_info = get_player_info_in_game(game, username)
game_status = game_schema.GameStatus(
players_position=game.players_position,
chests_position=game_schema.Chests(
tr_en=game.chests_position.tr_en,
tr_fr=game.chests_position.tr_fr,
fd_fr=game.chests_position.fd_fr,
fd_en=game.chests_position.fd_en,
jr_fr=game.chests_position.jr_fr,
jr_en=game.chests_position.jr_en,
sg_nt=game.chests_position.sg_nt,
),
player_game_info=game_schema.PlayerGameInfo(
team=player_info.team,
vote_cards=player_info.vote_cards,
event_cards=get_kept_event_cards(player_info, game),
seen_event_cards=player_info.seen_event_cards,
role=None,
available_actions=_get_available_actions(player_info, game),
chests=player_info.chests
),
event_cards_deck=game_schema.EventCardDeck(
count=game.get_event_cards_deck_count(),
selectable_cards=game.get_event_card_deck_selectable_cards()
),
last_action=game.last_action,
activities=game.activities,
is_over=game.is_over,
turn=User(username=game.turn),
)
if game.winner:
game_status.winner = game_schema.WinState(
winner_team=game.winner,
players_teams={
player: info.team for player, info in game.players_info.items()
}
)
return game_status
def get_kept_event_cards(player: Player, game: Game):
return [
KeptEventCard(
event_card=EventCardsManager.get(event_card_slug),
options=event_card_handlers[event_card_slug](game, player).options,
can_use=event_card_handlers[event_card_slug](game, player).can_use
) for event_card_slug in player.event_cards
]
| true | true |
1c342ff6f76ccebd538d0b8709c2672cdf7a8d9d | 3,145 | py | Python | image2emoticon/prepare_data.py | Tobigs-team/EmoGET_tobigticon | 4b13d6a780bbe269a9c285cc603b16b09d459edf | [
"Apache-2.0"
] | 11 | 2021-01-21T12:53:30.000Z | 2022-03-04T17:03:08.000Z | image2emoticon/prepare_data.py | Tobigs-team/EmoGET_tobigticon | 4b13d6a780bbe269a9c285cc603b16b09d459edf | [
"Apache-2.0"
] | null | null | null | image2emoticon/prepare_data.py | Tobigs-team/EmoGET_tobigticon | 4b13d6a780bbe269a9c285cc603b16b09d459edf | [
"Apache-2.0"
] | 8 | 2021-01-25T07:30:41.000Z | 2021-11-05T08:53:26.000Z | import argparse
from io import BytesIO
import multiprocessing
from functools import partial
from PIL import Image
import lmdb
from tqdm import tqdm
from torchvision import datasets
from torchvision.transforms import functional as trans_fn
import numpy as np
def resize_and_convert(img, size, resample, quality=100):
img = trans_fn.resize(img, size, resample)
img = trans_fn.center_crop(img, size)
buffer = BytesIO()
img.save(buffer, format="jpeg", quality=quality)
val = buffer.getvalue()
return val
def resize_multiple(
img, sizes=(128, 256, 512, 1024), resample=Image.LANCZOS, quality=100
):
imgs = []
for size in sizes:
imgs.append(resize_and_convert(img, size, resample, quality))
return imgs
def resize_worker(img_file, sizes, resample):
i, file = img_file
img = Image.open(file)
img = img.convert("RGB")
out = resize_multiple(img, sizes=sizes, resample=resample)
return i, out
def prepare(
env, dataset, n_worker, sizes=(128, 256, 512, 1024), resample=Image.LANCZOS
):
resize_fn = partial(resize_worker, sizes=sizes, resample=resample)
files = sorted(dataset.imgs, key=lambda x: x[0])
labels = {i: label for i, (file, label) in enumerate(files)} # label 고려
np.save(env.path()+"/labels", labels) # label을 np 파일 형식으로 save 해준다.
files = [(i, file) for i, (file, label) in enumerate(files)]
total = 0
with multiprocessing.Pool(n_worker) as pool:
for i, imgs in tqdm(pool.imap_unordered(resize_fn, files)):
for size, img in zip(sizes, imgs):
key = f"{size}-{str(i).zfill(5)}-{labels[i]}".encode("utf-8") # key값에 label도 부여
with env.begin(write=True) as txn:
txn.put(key, img)
total += 1
with env.begin(write=True) as txn:
txn.put("length".encode("utf-8"), str(total).encode("utf-8"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Preprocess images for model training")
parser.add_argument("--out", type=str, help="filename of the result lmdb dataset")
parser.add_argument(
"--size",
type=str,
default="128,256,512,1024",
help="resolutions of images for the dataset",
)
parser.add_argument(
"--n_worker",
type=int,
default=8,
help="number of workers for preparing dataset",
)
parser.add_argument(
"--resample",
type=str,
default="lanczos",
help="resampling methods for resizing images",
)
parser.add_argument("path", type=str, help="path to the image dataset")
args = parser.parse_args()
resample_map = {"lanczos": Image.LANCZOS, "bilinear": Image.BILINEAR}
resample = resample_map[args.resample]
sizes = [int(s.strip()) for s in args.size.split(",")]
print(f"Make dataset of image sizes:", ", ".join(str(s) for s in sizes))
imgset = datasets.ImageFolder(args.path)
with lmdb.open(args.out, map_size=1024 ** 4, readahead=False) as env:
prepare(env, imgset, args.n_worker, sizes=sizes, resample=resample)
| 29.669811 | 95 | 0.644197 | import argparse
from io import BytesIO
import multiprocessing
from functools import partial
from PIL import Image
import lmdb
from tqdm import tqdm
from torchvision import datasets
from torchvision.transforms import functional as trans_fn
import numpy as np
def resize_and_convert(img, size, resample, quality=100):
img = trans_fn.resize(img, size, resample)
img = trans_fn.center_crop(img, size)
buffer = BytesIO()
img.save(buffer, format="jpeg", quality=quality)
val = buffer.getvalue()
return val
def resize_multiple(
img, sizes=(128, 256, 512, 1024), resample=Image.LANCZOS, quality=100
):
imgs = []
for size in sizes:
imgs.append(resize_and_convert(img, size, resample, quality))
return imgs
def resize_worker(img_file, sizes, resample):
i, file = img_file
img = Image.open(file)
img = img.convert("RGB")
out = resize_multiple(img, sizes=sizes, resample=resample)
return i, out
def prepare(
env, dataset, n_worker, sizes=(128, 256, 512, 1024), resample=Image.LANCZOS
):
resize_fn = partial(resize_worker, sizes=sizes, resample=resample)
files = sorted(dataset.imgs, key=lambda x: x[0])
labels = {i: label for i, (file, label) in enumerate(files)}
np.save(env.path()+"/labels", labels)
files = [(i, file) for i, (file, label) in enumerate(files)]
total = 0
with multiprocessing.Pool(n_worker) as pool:
for i, imgs in tqdm(pool.imap_unordered(resize_fn, files)):
for size, img in zip(sizes, imgs):
key = f"{size}-{str(i).zfill(5)}-{labels[i]}".encode("utf-8")
with env.begin(write=True) as txn:
txn.put(key, img)
total += 1
with env.begin(write=True) as txn:
txn.put("length".encode("utf-8"), str(total).encode("utf-8"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Preprocess images for model training")
parser.add_argument("--out", type=str, help="filename of the result lmdb dataset")
parser.add_argument(
"--size",
type=str,
default="128,256,512,1024",
help="resolutions of images for the dataset",
)
parser.add_argument(
"--n_worker",
type=int,
default=8,
help="number of workers for preparing dataset",
)
parser.add_argument(
"--resample",
type=str,
default="lanczos",
help="resampling methods for resizing images",
)
parser.add_argument("path", type=str, help="path to the image dataset")
args = parser.parse_args()
resample_map = {"lanczos": Image.LANCZOS, "bilinear": Image.BILINEAR}
resample = resample_map[args.resample]
sizes = [int(s.strip()) for s in args.size.split(",")]
print(f"Make dataset of image sizes:", ", ".join(str(s) for s in sizes))
imgset = datasets.ImageFolder(args.path)
with lmdb.open(args.out, map_size=1024 ** 4, readahead=False) as env:
prepare(env, imgset, args.n_worker, sizes=sizes, resample=resample)
| true | true |
1c3431e073f430407c8ba92511837ef293c7f6b3 | 1,097 | py | Python | utils/politeness.py | petarGitNik/reddit-image-downloader | e38ddaf225a47d85a0d91785eb22b80c42e886dc | [
"MIT"
] | 3 | 2019-03-29T22:09:13.000Z | 2019-05-24T07:58:52.000Z | utils/politeness.py | petarGitNik/reddit-image-downloader | e38ddaf225a47d85a0d91785eb22b80c42e886dc | [
"MIT"
] | null | null | null | utils/politeness.py | petarGitNik/reddit-image-downloader | e38ddaf225a47d85a0d91785eb22b80c42e886dc | [
"MIT"
] | 1 | 2020-07-13T14:56:14.000Z | 2020-07-13T14:56:14.000Z | #!/usr/bin/python3
from math import log
from math import exp
from domainparsers.common import Domains
def get_politeness_factor(domain):
"""
Return politeness factor for reddit.com domain. Calculated according
to instructions given here:
https://stackoverflow.com/questions/8236046/typical-politeness-factor-for-a-web-crawler
Archived versions:
http://archive.is/AlBg0
https://web.archive.org/web/20170730001425/https://stackoverflow.com/questions/8236046/typical-politeness-factor-for-a-web-crawler
"""
if not domain or domain not in Domains.domains():
domain = 'other'
DOMAIN_AUTHORITY = {
Domains.REDDIT : 99,
Domains.IMGUR : 93,
Domains.GFYCAT : 70,
Domains.TUMBLR : 100, # tumblr allows crawl time of 1s https://www.tumblr.com/robots.txt
Domains.BLOGSPOT : 97,
'other' : 0,
}
domain_size = DOMAIN_AUTHORITY[domain] // 10
if domain_size <= 5: domain_size = 5
minimal_crawl_time = min(exp(2.52166863221 + -0.530185027289 * log(domain_size)), 5)
return minimal_crawl_time
| 31.342857 | 134 | 0.692799 |
from math import log
from math import exp
from domainparsers.common import Domains
def get_politeness_factor(domain):
if not domain or domain not in Domains.domains():
domain = 'other'
DOMAIN_AUTHORITY = {
Domains.REDDIT : 99,
Domains.IMGUR : 93,
Domains.GFYCAT : 70,
Domains.TUMBLR : 100,
Domains.BLOGSPOT : 97,
'other' : 0,
}
domain_size = DOMAIN_AUTHORITY[domain] // 10
if domain_size <= 5: domain_size = 5
minimal_crawl_time = min(exp(2.52166863221 + -0.530185027289 * log(domain_size)), 5)
return minimal_crawl_time
| true | true |
1c343246da538f3a1d8854fff17357ff6666fcc3 | 7,733 | py | Python | tests/test_planner.py | tylernorth/public-transit | e2430078557adf9d2ad03d794ea551a7b06ce145 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/test_planner.py | tylernorth/public-transit | e2430078557adf9d2ad03d794ea551a7b06ce145 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/test_planner.py | tylernorth/public-transit | e2430078557adf9d2ad03d794ea551a7b06ce145 | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2017-03-17T11:54:09.000Z | 2022-01-21T05:07:16.000Z | from trip_planner.client import TripPlanner
MOCK_BART_STATION_LIST = {
'stations': {
'station': [
{
"name": "West Oakland",
"abbr": "WOAK",
"gtfs_latitude": "37.804872",
"gtfs_longitude": "-122.295140",
"address": "1451 7th Street",
"city": "Oakland",
"county": "alameda",
"state": "CA",
"zipcode": "94607"
}
]
}
}
MOCK_BART_STATION_DEPARTURES = {
"station": [
{
"name": "West Oakland",
"abbr": "WOAK",
"etd": [
{
"destination": "Antioch",
"abbreviation": "ANTC",
"limited": "0",
"estimate": [
{
"minutes": "7",
"platform": "2",
"direction": "North",
"length": "10",
"color": "YELLOW",
"hexcolor": "#ffff33",
"bikeflag": "1",
"delay": "0"
},
{
"minutes": "22",
"platform": "2",
"direction": "North",
"length": "10",
"color": "YELLOW",
"hexcolor": "#ffff33",
"bikeflag": "1",
"delay": "0"
},
]
}
]
}
]
}
MOCK_BART_ALL_DEPARTURES = {
'station': [
{
"name": "Dublin/Pleasanton",
"abbr": "DUBL",
"etd": [
{
"destination": "Daly City",
"abbreviation": "DALY",
"limited": "0",
"estimate": [
{
"minutes": "23",
"platform": "2",
"direction": "South",
"length": "10",
"color": "BLUE",
"hexcolor": "#0099cc",
"bikeflag": "1",
"delay": "0"
}
]
}
]
},
{
"name": "West Oakland",
"abbr": "WOAK",
"etd": [
{
"destination": "Antioch",
"abbreviation": "ANTC",
"limited": "0",
"estimate": [
{
"minutes": "3",
"platform": "2",
"direction": "North",
"length": "10",
"color": "YELLOW",
"hexcolor": "#ffff33",
"bikeflag": "1",
"delay": "0"
},
]
}
]
}
]
}
MOCK_ACTRANSIT_STOP_DEPARTURES = {
"bustime-response": {
"prd": [
{
"tmstmp": "20211230 15:39",
"typ": "A",
"stpnm": "Mission Blvd & Central Blvd",
"stpid": "51303",
"vid": "1421",
"dstp": 10993,
"rt": "99",
"rtdd": "99",
"rtdir": "To Hayward BART",
"des": "Hayward BART",
# TODO make this relative
"prdtm": "20211230 15:48",
"tablockid": "99006",
"tatripid": "7633993",
"origtatripno": "8681894",
"dly": False,
"dyn": 0,
"prdctdn": "9",
"zone": "",
"rid": "9919",
"tripid": "2513020",
"tripdyn": 0,
"schdtm": "20211230 15:51",
"geoid": "5282",
"seq": 47,
"psgld": "",
"stst": 54000,
"stsd": "2021-12-30"
},
]
}
}
MOCK_NEXTBUS_STOP_PREDICTION = {
"predictions": [
{
"agencyTitle": "San Francisco Muni",
"routeTitle": "38-Geary",
"routeTag": "38",
"stopTitle": "Market St & Montgomery St",
"stopTag": "5684",
"direction": {
"title": "Outbound to V. A. Hospital",
"prediction": {
# TODO make this relative
"epochTime": "1640921531578",
"seconds": "148",
"minutes": "2",
"isDeparture": "false",
"dirTag": "38___O_F10",
"vehicle": "6510",
"block": "3801",
"tripTag": "10341551"
}
},
}
]
}
def test_leg_bart(mocker):
mocker.patch('trip_planner.client.bart.station_list', return_value=MOCK_BART_STATION_LIST)
mocker.patch('trip_planner.client.bart.station_departures', return_value=MOCK_BART_STATION_DEPARTURES)
client = TripPlanner('')
client.leg_create('bart', 'woak')
leg_list = client.leg_list()
assert len(leg_list) == 1
assert leg_list[0]['stop_id'] == 'woak'
assert leg_list[0]['stop_title'] == 'West Oakland'
agency, departures = client.leg_show(1)
assert agency == 'bart'
assert len(departures) == 1
client.leg_delete(1)
def test_leg_actransit(mocker):
mocker.patch('trip_planner.client.actransit.stop_predictions', return_value=MOCK_ACTRANSIT_STOP_DEPARTURES)
client = TripPlanner('')
client.leg_create('actransit', '51303')
leg_list = client.leg_list()
assert len(leg_list) == 1
assert leg_list[0]['stop_id'] == '51303'
agency, departures = client.leg_show(1)
assert agency == 'actransit'
assert len(departures) > 0
def test_leg_nextbus(mocker):
mocker.patch('trip_planner.client.nextbus.stop_prediction', return_value=MOCK_NEXTBUS_STOP_PREDICTION)
client = TripPlanner('')
client.leg_create('sf-muni', '15684')
leg_list = client.leg_list()
assert len(leg_list) == 1
assert leg_list[0]['stop_id'] == '15684'
agency, departures = client.leg_show(1)
assert agency == 'sf-muni'
assert len(departures) > 0
def test_trip(mocker):
mocker.patch('trip_planner.client.bart.station_list', return_value=MOCK_BART_STATION_LIST)
mocker.patch('trip_planner.client.bart.station_departures', return_value=MOCK_BART_STATION_DEPARTURES)
mocker.patch('trip_planner.client.actransit.stop_predictions', return_value=MOCK_ACTRANSIT_STOP_DEPARTURES)
mocker.patch('trip_planner.client.nextbus.stop_prediction', return_value=MOCK_NEXTBUS_STOP_PREDICTION)
client = TripPlanner('')
client.leg_create('bart', 'woak')
client.leg_create('actransit', '51303')
client.leg_create('sf-muni', '15684')
client.trip_create('testing', [1, 2, 3])
trip_list = client.trip_list()
assert len(trip_list) == 1
assert len(trip_list[0]['legs']) == 3
mocker.patch('trip_planner.client.bart.station_departures', return_value=MOCK_BART_ALL_DEPARTURES)
mocker.patch('trip_planner.client.nextbus.stop_multiple_predictions', return_value=MOCK_NEXTBUS_STOP_PREDICTION)
result = client.trip_show(1)
client.trip_delete(trip_list[0]['id']) | 33.916667 | 116 | 0.428812 | from trip_planner.client import TripPlanner
MOCK_BART_STATION_LIST = {
'stations': {
'station': [
{
"name": "West Oakland",
"abbr": "WOAK",
"gtfs_latitude": "37.804872",
"gtfs_longitude": "-122.295140",
"address": "1451 7th Street",
"city": "Oakland",
"county": "alameda",
"state": "CA",
"zipcode": "94607"
}
]
}
}
MOCK_BART_STATION_DEPARTURES = {
"station": [
{
"name": "West Oakland",
"abbr": "WOAK",
"etd": [
{
"destination": "Antioch",
"abbreviation": "ANTC",
"limited": "0",
"estimate": [
{
"minutes": "7",
"platform": "2",
"direction": "North",
"length": "10",
"color": "YELLOW",
"hexcolor": "#ffff33",
"bikeflag": "1",
"delay": "0"
},
{
"minutes": "22",
"platform": "2",
"direction": "North",
"length": "10",
"color": "YELLOW",
"hexcolor": "#ffff33",
"bikeflag": "1",
"delay": "0"
},
]
}
]
}
]
}
MOCK_BART_ALL_DEPARTURES = {
'station': [
{
"name": "Dublin/Pleasanton",
"abbr": "DUBL",
"etd": [
{
"destination": "Daly City",
"abbreviation": "DALY",
"limited": "0",
"estimate": [
{
"minutes": "23",
"platform": "2",
"direction": "South",
"length": "10",
"color": "BLUE",
"hexcolor": "#0099cc",
"bikeflag": "1",
"delay": "0"
}
]
}
]
},
{
"name": "West Oakland",
"abbr": "WOAK",
"etd": [
{
"destination": "Antioch",
"abbreviation": "ANTC",
"limited": "0",
"estimate": [
{
"minutes": "3",
"platform": "2",
"direction": "North",
"length": "10",
"color": "YELLOW",
"hexcolor": "#ffff33",
"bikeflag": "1",
"delay": "0"
},
]
}
]
}
]
}
MOCK_ACTRANSIT_STOP_DEPARTURES = {
"bustime-response": {
"prd": [
{
"tmstmp": "20211230 15:39",
"typ": "A",
"stpnm": "Mission Blvd & Central Blvd",
"stpid": "51303",
"vid": "1421",
"dstp": 10993,
"rt": "99",
"rtdd": "99",
"rtdir": "To Hayward BART",
"des": "Hayward BART",
"prdtm": "20211230 15:48",
"tablockid": "99006",
"tatripid": "7633993",
"origtatripno": "8681894",
"dly": False,
"dyn": 0,
"prdctdn": "9",
"zone": "",
"rid": "9919",
"tripid": "2513020",
"tripdyn": 0,
"schdtm": "20211230 15:51",
"geoid": "5282",
"seq": 47,
"psgld": "",
"stst": 54000,
"stsd": "2021-12-30"
},
]
}
}
MOCK_NEXTBUS_STOP_PREDICTION = {
"predictions": [
{
"agencyTitle": "San Francisco Muni",
"routeTitle": "38-Geary",
"routeTag": "38",
"stopTitle": "Market St & Montgomery St",
"stopTag": "5684",
"direction": {
"title": "Outbound to V. A. Hospital",
"prediction": {
"epochTime": "1640921531578",
"seconds": "148",
"minutes": "2",
"isDeparture": "false",
"dirTag": "38___O_F10",
"vehicle": "6510",
"block": "3801",
"tripTag": "10341551"
}
},
}
]
}
def test_leg_bart(mocker):
mocker.patch('trip_planner.client.bart.station_list', return_value=MOCK_BART_STATION_LIST)
mocker.patch('trip_planner.client.bart.station_departures', return_value=MOCK_BART_STATION_DEPARTURES)
client = TripPlanner('')
client.leg_create('bart', 'woak')
leg_list = client.leg_list()
assert len(leg_list) == 1
assert leg_list[0]['stop_id'] == 'woak'
assert leg_list[0]['stop_title'] == 'West Oakland'
agency, departures = client.leg_show(1)
assert agency == 'bart'
assert len(departures) == 1
client.leg_delete(1)
def test_leg_actransit(mocker):
mocker.patch('trip_planner.client.actransit.stop_predictions', return_value=MOCK_ACTRANSIT_STOP_DEPARTURES)
client = TripPlanner('')
client.leg_create('actransit', '51303')
leg_list = client.leg_list()
assert len(leg_list) == 1
assert leg_list[0]['stop_id'] == '51303'
agency, departures = client.leg_show(1)
assert agency == 'actransit'
assert len(departures) > 0
def test_leg_nextbus(mocker):
mocker.patch('trip_planner.client.nextbus.stop_prediction', return_value=MOCK_NEXTBUS_STOP_PREDICTION)
client = TripPlanner('')
client.leg_create('sf-muni', '15684')
leg_list = client.leg_list()
assert len(leg_list) == 1
assert leg_list[0]['stop_id'] == '15684'
agency, departures = client.leg_show(1)
assert agency == 'sf-muni'
assert len(departures) > 0
def test_trip(mocker):
mocker.patch('trip_planner.client.bart.station_list', return_value=MOCK_BART_STATION_LIST)
mocker.patch('trip_planner.client.bart.station_departures', return_value=MOCK_BART_STATION_DEPARTURES)
mocker.patch('trip_planner.client.actransit.stop_predictions', return_value=MOCK_ACTRANSIT_STOP_DEPARTURES)
mocker.patch('trip_planner.client.nextbus.stop_prediction', return_value=MOCK_NEXTBUS_STOP_PREDICTION)
client = TripPlanner('')
client.leg_create('bart', 'woak')
client.leg_create('actransit', '51303')
client.leg_create('sf-muni', '15684')
client.trip_create('testing', [1, 2, 3])
trip_list = client.trip_list()
assert len(trip_list) == 1
assert len(trip_list[0]['legs']) == 3
mocker.patch('trip_planner.client.bart.station_departures', return_value=MOCK_BART_ALL_DEPARTURES)
mocker.patch('trip_planner.client.nextbus.stop_multiple_predictions', return_value=MOCK_NEXTBUS_STOP_PREDICTION)
result = client.trip_show(1)
client.trip_delete(trip_list[0]['id']) | true | true |
1c34327b9840461dfff1bdc4c54eb17e155d6d86 | 3,618 | py | Python | darrow/settings.py | grrdsoto/darrow | cdf818bbea9d2eb715f8834a93ca91c8dff72872 | [
"MIT"
] | null | null | null | darrow/settings.py | grrdsoto/darrow | cdf818bbea9d2eb715f8834a93ca91c8dff72872 | [
"MIT"
] | 6 | 2020-06-05T19:57:14.000Z | 2021-09-22T18:04:37.000Z | darrow/settings.py | grrdsoto/darrow | cdf818bbea9d2eb715f8834a93ca91c8dff72872 | [
"MIT"
] | null | null | null | """
Django settings for darrow project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['darrow-gstr410.herokuapp.com', '127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'chat',
'channels',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'darrow.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
WSGI_APPLICATION = 'darrow.wsgi.application'
ASGI_APPLICATION = 'darrow.routing.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
| 26.217391 | 91 | 0.691266 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.environ.get('SECRET_KEY')
DEBUG = True
ALLOWED_HOSTS = ['darrow-gstr410.herokuapp.com', '127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'chat',
'channels',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'darrow.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
WSGI_APPLICATION = 'darrow.wsgi.application'
ASGI_APPLICATION = 'darrow.routing.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
| true | true |
1c34339b65ab717b297a70f6ed3dbebf86844de9 | 2,838 | py | Python | setup.py | vsajip/sarge | 6200d310b2a0173026554cd2a9a48fa9374985c5 | [
"BSD-3-Clause"
] | 35 | 2015-03-09T20:41:02.000Z | 2022-03-12T10:29:06.000Z | setup.py | vsajip/sarge | 6200d310b2a0173026554cd2a9a48fa9374985c5 | [
"BSD-3-Clause"
] | 7 | 2019-10-23T12:34:37.000Z | 2022-01-14T18:52:36.000Z | setup.py | vsajip/sarge | 6200d310b2a0173026554cd2a9a48fa9374985c5 | [
"BSD-3-Clause"
] | 5 | 2015-11-07T17:20:39.000Z | 2021-10-03T19:31:16.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2021 Vinay M. Sajip. See LICENSE for licensing information.
#
# sarge: Subprocess Allegedly Rewards Good Encapsulation :-)
#
from distutils.core import setup, Command
import os
from os.path import join, dirname, abspath
import re
import sarge
class TestCommand(Command):
user_options = []
def run(self):
import sys
import unittest
import test_sarge
loader = unittest.TestLoader()
runner = unittest.TextTestRunner()
runner.run(loader.loadTestsFromModule(test_sarge))
def initialize_options(self):
pass
def finalize_options(self):
pass
def description():
f = open(join(dirname(__file__), 'README.rst'))
read_me = f.read()
f.close()
regexp = r'Overview\n========\n(.*)Requirements '
requires, = re.findall(regexp, read_me, re.DOTALL)
regexp = r'Availability & Documentation\s*\n-----+\s*\n(.*)'
avail, = re.findall(regexp, read_me, re.DOTALL)
return requires + avail
setup(
name='sarge',
description=('A wrapper for subprocess which provides command '
'pipeline functionality.'),
long_description=description(),
version=sarge.__version__,
license='BSD',
author='Vinay Sajip',
author_email='vinay_sajip@yahoo.co.uk',
maintainer='Vinay Sajip',
maintainer_email='vinay_sajip@yahoo.co.uk',
url='http://sarge.readthedocs.org/',
download_url=('http://pypi.python.org/packages/source/s/sarge/'
'sarge-%s.tar.gz' % sarge.__version__),
packages=['sarge'],
keywords=['subprocess', 'wrapper', 'external', 'command'],
platforms=['Any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Shells',
],
cmdclass={ 'test': TestCommand },
)
| 32.62069 | 80 | 0.617689 |
from distutils.core import setup, Command
import os
from os.path import join, dirname, abspath
import re
import sarge
class TestCommand(Command):
user_options = []
def run(self):
import sys
import unittest
import test_sarge
loader = unittest.TestLoader()
runner = unittest.TextTestRunner()
runner.run(loader.loadTestsFromModule(test_sarge))
def initialize_options(self):
pass
def finalize_options(self):
pass
def description():
f = open(join(dirname(__file__), 'README.rst'))
read_me = f.read()
f.close()
regexp = r'Overview\n========\n(.*)Requirements '
requires, = re.findall(regexp, read_me, re.DOTALL)
regexp = r'Availability & Documentation\s*\n-----+\s*\n(.*)'
avail, = re.findall(regexp, read_me, re.DOTALL)
return requires + avail
setup(
name='sarge',
description=('A wrapper for subprocess which provides command '
'pipeline functionality.'),
long_description=description(),
version=sarge.__version__,
license='BSD',
author='Vinay Sajip',
author_email='vinay_sajip@yahoo.co.uk',
maintainer='Vinay Sajip',
maintainer_email='vinay_sajip@yahoo.co.uk',
url='http://sarge.readthedocs.org/',
download_url=('http://pypi.python.org/packages/source/s/sarge/'
'sarge-%s.tar.gz' % sarge.__version__),
packages=['sarge'],
keywords=['subprocess', 'wrapper', 'external', 'command'],
platforms=['Any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Shells',
],
cmdclass={ 'test': TestCommand },
)
| true | true |
1c3433b0433f82f09778b10d863693ac5a55692c | 2,193 | py | Python | filync/filync.py | xsyu94/fiLync | 96c20624371cd04d3922eda6d14eedfca9a59ab1 | [
"MIT"
] | null | null | null | filync/filync.py | xsyu94/fiLync | 96c20624371cd04d3922eda6d14eedfca9a59ab1 | [
"MIT"
] | null | null | null | filync/filync.py | xsyu94/fiLync | 96c20624371cd04d3922eda6d14eedfca9a59ab1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
main class of the filync
Author: Xiansheng Yu
Data : 2021.05.18
'''
import os
import shutil
import filync.tools as tools
class filync:
def __init__(self,
source_path='./',
goal_path='./',
ignores=['.git', '.vscode']) -> None:
self.source_path = source_path
self.goal_path = goal_path
if (self.source_path[-1] != '/'):
self.source_path + '/'
if (self.goal_path[-1] != '/'):
self.goal_path + '/'
self.ignores = ignores
self.update()
def update(self):
self.source_files, self.source_folders = tools.path_walker(
self.source_path, self.ignores)
self.goal_files, self.goal_folders = tools.path_walker(
self.goal_path, self.ignores)
def diff(self):
self.new_files = {}
self.del_files = []
for file in sorted(self.source_files.keys()):
goal_according = self.goal_path + file[len(self.source_path):]
if (goal_according not in self.goal_files.keys()
or self.source_files[file] !=
self.goal_files[goal_according]):
self.new_files[file] = goal_according
for file in sorted(self.goal_files.keys()):
source_according = self.source_path + file[len(self.goal_path):]
if (source_according not in self.source_files.keys()):
self.del_files.append(file)
return self.new_files, self.del_files
def sync(self):
self.diff()
for new_file in self.new_files.keys():
goal_file = self.new_files[new_file]
try:
shutil.copy(new_file, goal_file)
except FileNotFoundError:
os.makedirs(goal_file.rsplit('/', 1)[0])
shutil.copy(new_file, goal_file)
for del_file in self.del_files:
os.remove(del_file)
self.clean_empty()
def clean_empty(self):
for folder in sorted(self.goal_folders, reverse=True):
try:
os.rmdir(folder)
except OSError:
continue
| 31.328571 | 76 | 0.561332 |
import os
import shutil
import filync.tools as tools
class filync:
def __init__(self,
source_path='./',
goal_path='./',
ignores=['.git', '.vscode']) -> None:
self.source_path = source_path
self.goal_path = goal_path
if (self.source_path[-1] != '/'):
self.source_path + '/'
if (self.goal_path[-1] != '/'):
self.goal_path + '/'
self.ignores = ignores
self.update()
def update(self):
self.source_files, self.source_folders = tools.path_walker(
self.source_path, self.ignores)
self.goal_files, self.goal_folders = tools.path_walker(
self.goal_path, self.ignores)
def diff(self):
self.new_files = {}
self.del_files = []
for file in sorted(self.source_files.keys()):
goal_according = self.goal_path + file[len(self.source_path):]
if (goal_according not in self.goal_files.keys()
or self.source_files[file] !=
self.goal_files[goal_according]):
self.new_files[file] = goal_according
for file in sorted(self.goal_files.keys()):
source_according = self.source_path + file[len(self.goal_path):]
if (source_according not in self.source_files.keys()):
self.del_files.append(file)
return self.new_files, self.del_files
def sync(self):
self.diff()
for new_file in self.new_files.keys():
goal_file = self.new_files[new_file]
try:
shutil.copy(new_file, goal_file)
except FileNotFoundError:
os.makedirs(goal_file.rsplit('/', 1)[0])
shutil.copy(new_file, goal_file)
for del_file in self.del_files:
os.remove(del_file)
self.clean_empty()
def clean_empty(self):
for folder in sorted(self.goal_folders, reverse=True):
try:
os.rmdir(folder)
except OSError:
continue
| true | true |
1c3434c8d5a9453f4f0be8b719dbb00378a8eb7b | 16,392 | py | Python | common/battor/battor/battor_wrapper.py | pavelfeldman/catapult | a62e07f8660650e000a8112c50bcb67258a201f2 | [
"BSD-3-Clause"
] | 1 | 2021-03-16T14:33:43.000Z | 2021-03-16T14:33:43.000Z | common/battor/battor/battor_wrapper.py | hikecoder/catapult | a62e07f8660650e000a8112c50bcb67258a201f2 | [
"BSD-3-Clause"
] | null | null | null | common/battor/battor/battor_wrapper.py | hikecoder/catapult | a62e07f8660650e000a8112c50bcb67258a201f2 | [
"BSD-3-Clause"
] | 1 | 2021-03-14T23:44:40.000Z | 2021-03-14T23:44:40.000Z | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import datetime
import os
import logging
import platform
import random
import subprocess
import sys
import tempfile
import time
from battor import battor_error
import py_utils
from py_utils import cloud_storage
import dependency_manager
from devil.utils import battor_device_mapping
from devil.utils import find_usb_devices
import serial
from serial.tools import list_ports
DEFAULT_SHELL_CLOSE_TIMEOUT_S = 60
def IsBattOrConnected(*args, **kwargs):
"""Returns True if BattOr is detected.
See _IsBattOrConnected below for arguments.
"""
is_connected = _IsBattOrConnected(*args, **kwargs)
if is_connected:
logging.info('BattOr power monitor is connected.')
else:
logging.info('BattOr power monitor is not connected.')
return is_connected
def _IsBattOrConnected(test_platform, android_device=None,
android_device_map=None, android_device_file=None):
"""Returns True if BattOr is detected."""
if test_platform == 'android':
if not android_device:
raise ValueError('Must pass android device serial when determining '
'support on android platform')
if not android_device_map:
device_tree = find_usb_devices.GetBusNumberToDeviceTreeMap()
if device_tree:
for _, node in sorted(device_tree.iteritems()):
node.Display()
if len(battor_device_mapping.GetBattOrList(device_tree)) == 1:
return True
if android_device_file:
android_device_map = battor_device_mapping.ReadSerialMapFile(
android_device_file)
else:
try:
android_device_map = battor_device_mapping.GenerateSerialMap()
except battor_error.BattOrError:
logging.exception('Error generating serial map')
return False
# If neither if statement above is triggered, it means that an
# android_device_map was passed in and will be used.
return str(android_device) in android_device_map
elif test_platform == 'win':
for (_1, desc, _2) in serial.tools.list_ports.comports():
if 'USB Serial Port' in desc:
return True
logging.info('No usb serial port discovered. Available ones are: %s' %
list(serial.tools.list_ports.comports()))
return False
elif test_platform == 'mac':
for (_1, desc, _2) in serial.tools.list_ports.comports():
if 'BattOr' in desc:
return True
return False
elif test_platform == 'linux':
device_tree = find_usb_devices.GetBusNumberToDeviceTreeMap(fast=True)
return bool(battor_device_mapping.GetBattOrList(device_tree))
return False
class BattOrWrapper(object):
"""A class for communicating with a BattOr in python."""
_EXIT_CMD = 'Exit'
_GET_FIRMWARE_GIT_HASH_CMD = 'GetFirmwareGitHash'
_START_TRACING_CMD = 'StartTracing'
_STOP_TRACING_CMD = 'StopTracing'
_SUPPORTS_CLOCKSYNC_CMD = 'SupportsExplicitClockSync'
_RECORD_CLOCKSYNC_CMD = 'RecordClockSyncMarker'
_SUPPORTED_PLATFORMS = ['android', 'chromeos', 'linux', 'mac', 'win']
_BATTOR_PARTNO = 'x192a3u'
_BATTOR_PROGRAMMER = 'avr109'
_BATTOR_BAUDRATE = '115200'
def __init__(self, target_platform, android_device=None, battor_path=None,
battor_map_file=None, battor_map=None, serial_log_bucket=None,
autoflash=True):
"""Constructor.
Args:
target_platform: Platform BattOr is attached to.
android_device: Serial number of Android device.
battor_path: Path to BattOr device.
battor_map_file: File giving map of [device serial: BattOr path]
battor_map: Map of [device serial: BattOr path]
serial_log_bucket: The cloud storage bucket to which BattOr agent serial
logs are uploaded on failure.
Attributes:
_battor_path: Path to BattOr. Typically similar to /tty/USB0.
_battor_agent_binary: Path to the BattOr agent binary used to communicate
with the BattOr.
_tracing: A bool saying if tracing has been started.
_battor_shell: A subprocess running the battor_agent_binary
_trace_results_path: Path to BattOr trace results file.
_serial_log_bucket: Cloud storage bucket to which BattOr agent serial logs
are uploaded on failure.
_serial_log_file: Temp file for the BattOr agent serial log.
"""
self._battor_path = self._GetBattOrPath(target_platform, android_device,
battor_path, battor_map_file, battor_map)
config = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'battor_binary_dependencies.json')
self._dm = dependency_manager.DependencyManager(
[dependency_manager.BaseConfig(config)])
self._battor_agent_binary = self._dm.FetchPath(
'battor_agent_binary', '%s_%s' % (sys.platform, platform.machine()))
self._autoflash = autoflash
self._serial_log_bucket = serial_log_bucket
self._tracing = False
self._battor_shell = None
self._trace_results_path = None
self._start_tracing_time = None
self._stop_tracing_time = None
self._trace_results = None
self._serial_log_file = None
self._target_platform = target_platform
self._git_hash = None
atexit.register(self.KillBattOrShell)
def _FlashBattOr(self):
assert self._battor_shell, (
'Must start shell before attempting to flash BattOr')
try:
device_git_hash = self.GetFirmwareGitHash()
battor_firmware, cs_git_hash = self._dm.FetchPathWithVersion(
'battor_firmware', 'default')
if cs_git_hash != device_git_hash:
logging.info(
'Flashing BattOr with old firmware version <%s> with new '
'version <%s>.', device_git_hash, cs_git_hash)
avrdude_config = self._dm.FetchPath('avrdude_config', 'default')
self.StopShell()
return self.FlashFirmware(battor_firmware, avrdude_config)
return False
except ValueError:
logging.exception('Git hash returned from BattOr was not as expected: %s'
% self._git_hash)
self.StopShell()
finally:
if not self._battor_shell:
# TODO(charliea): Once we understand why BattOrs are crashing, remove
# this log.
# http://crbug.com/699581
logging.info('_FlashBattOr serial log:')
self._UploadSerialLogToCloudStorage()
self._serial_log_file = None
self.StartShell()
def KillBattOrShell(self):
if self._battor_shell:
logging.critical('BattOr shell was not properly closed. Killing now.')
self._battor_shell.kill()
def GetShellReturnCode(self):
"""Gets the return code of the BattOr agent shell."""
rc = self._battor_shell.poll()
return rc
def StartShell(self):
"""Start BattOr binary shell."""
assert not self._battor_shell, 'Attempting to start running BattOr shell.'
battor_cmd = [self._battor_agent_binary]
if self._serial_log_bucket:
# Create and immediately close a temp file in order to get a filename
# for the serial log.
self._serial_log_file = tempfile.NamedTemporaryFile(delete=False)
self._serial_log_file.close()
battor_cmd.append('--battor-serial-log=%s' % self._serial_log_file.name)
if self._battor_path:
battor_cmd.append('--battor-path=%s' % self._battor_path)
self._battor_shell = self._StartShellImpl(battor_cmd)
assert self.GetShellReturnCode() is None, 'Shell failed to start.'
def StopShell(self, timeout=None):
"""Stop BattOr binary shell."""
assert self._battor_shell, 'Attempting to stop a non-running BattOr shell.'
assert not self._tracing, 'Attempting to stop a BattOr shell while tracing.'
timeout = timeout if timeout else DEFAULT_SHELL_CLOSE_TIMEOUT_S
self._SendBattOrCommand(self._EXIT_CMD, check_return=False)
try:
py_utils.WaitFor(lambda: self.GetShellReturnCode() != None, timeout)
except py_utils.TimeoutException:
self.KillBattOrShell()
finally:
self._battor_shell = None
def StartTracing(self):
"""Start tracing on the BattOr."""
assert self._battor_shell, 'Must start shell before tracing'
assert not self._tracing, 'Tracing already started.'
self._FlashBattOr()
self._SendBattOrCommand(self._START_TRACING_CMD)
self._tracing = True
self._start_tracing_time = int(time.time())
def StopTracing(self):
"""Stop tracing on the BattOr."""
assert self._tracing, 'Must run StartTracing before StopTracing'
# Create temp file to reserve location for saving results.
temp_file = tempfile.NamedTemporaryFile(delete=False)
self._trace_results_path = temp_file.name
temp_file.close()
self._SendBattOrCommand(
'%s %s' % (self._STOP_TRACING_CMD, self._trace_results_path),
check_return=False)
self._tracing = False
self._stop_tracing_time = int(time.time())
def CollectTraceData(self, timeout=None):
"""Collect trace data from battor.
Args:
timeout: timeout for waiting on the BattOr process to terminate in
seconds.
Returns: Trace data in form of a list.
"""
# The BattOr shell terminates after returning the results.
if timeout is None:
timeout = self._stop_tracing_time - self._start_tracing_time
py_utils.WaitFor(lambda: self.GetShellReturnCode() != None, timeout)
# TODO(charliea): Once we understand why BattOrs are crashing, only do
# this on failure.
# http://crbug.com/699581
logging.info('CollectTraceData serial log:')
self._UploadSerialLogToCloudStorage()
with open(self._trace_results_path) as results:
self._trace_results = results.read()
self._battor_shell = None
self._serial_log_file = None
return self._trace_results
def SupportsExplicitClockSync(self):
"""Returns if BattOr supports Clock Sync events."""
return bool(int(self._SendBattOrCommand(self._SUPPORTS_CLOCKSYNC_CMD,
check_return=False)))
def RecordClockSyncMarker(self, sync_id):
"""Record clock sync event on BattOr."""
if not isinstance(sync_id, basestring):
raise TypeError('sync_id must be a string.')
self._SendBattOrCommand('%s %s' % (self._RECORD_CLOCKSYNC_CMD, sync_id))
def _GetBattOrPath(self, target_platform, android_device=None,
battor_path=None, battor_map_file=None, battor_map=None):
"""Determines most likely path to the correct BattOr."""
if target_platform not in self._SUPPORTED_PLATFORMS:
raise battor_error.BattOrError(
'%s is an unsupported platform.' % target_platform)
if target_platform in ['win']:
# Right now, the BattOr agent binary isn't able to automatically detect
# the BattOr port on Windows. To get around this, we know that the BattOr
# shows up with a name of 'USB Serial Port', so use the COM port that
# corresponds to a device with that name.
for (port, desc, _) in serial.tools.list_ports.comports():
if 'USB Serial Port' in desc:
return port
raise battor_error.BattOrError(
'Could not find BattOr attached to machine.')
if target_platform in ['mac']:
for (port, desc, _) in serial.tools.list_ports.comports():
if 'BattOr' in desc:
return port
if target_platform in ['android', 'linux']:
if battor_path:
if not isinstance(battor_path, basestring):
raise battor_error.BattOrError(
'An invalid BattOr path was specified.')
return battor_path
if target_platform == 'android':
if not android_device:
raise battor_error.BattOrError(
'Must specify device for Android platform.')
if not battor_map_file and not battor_map:
# No map was passed, so must create one.
battor_map = battor_device_mapping.GenerateSerialMap()
return battor_device_mapping.GetBattOrPathFromPhoneSerial(
str(android_device), serial_map_file=battor_map_file,
serial_map=battor_map)
# Not Android and no explicitly passed BattOr.
device_tree = find_usb_devices.GetBusNumberToDeviceTreeMap(fast=True)
battors = battor_device_mapping.GetBattOrList(device_tree)
if len(battors) != 1:
raise battor_error.BattOrError(
'For non-Android platforms, exactly one BattOr must be '
'attached unless address is explicitly given.')
return '/dev/%s' % battors.pop()
raise NotImplementedError(
'BattOr Wrapper not implemented for given platform')
def _SendBattOrCommandImpl(self, cmd):
"""Sends command to the BattOr."""
self._battor_shell.stdin.write('%s\n' % cmd)
self._battor_shell.stdin.flush()
return self._battor_shell.stdout.readline()
def _SendBattOrCommand(self, cmd, check_return=True):
status = self._SendBattOrCommandImpl(cmd)
if check_return and not 'Done.' in status:
self.KillBattOrShell()
self._UploadSerialLogToCloudStorage()
self._serial_log_file = None
raise battor_error.BattOrError(
'BattOr did not complete command \'%s\' correctly.\n'
'Outputted: %s' % (cmd, status))
return status
def _StartShellImpl(self, battor_cmd):
return subprocess.Popen(
battor_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=False)
def _UploadSerialLogToCloudStorage(self):
"""Uploads the BattOr serial log to cloud storage."""
if not self._serial_log_file or not cloud_storage.IsNetworkIOEnabled():
return
remote_path = ('battor-serial-log-%s-%d.txt' % (
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M.txt'),
random.randint(1, 100000)))
try:
cloud_url = cloud_storage.Insert(
self._serial_log_bucket, remote_path, self._serial_log_file.name)
sys.stderr.write('View BattOr serial log at %s\n' % cloud_url)
except cloud_storage.PermissionError as e:
logging.error('Cannot upload BattOr serial log file to cloud storage due '
'to permission error: %s' % e.message)
def GetFirmwareGitHash(self):
"""Gets the git hash for the BattOr firmware.
Returns: Git hash for firmware currently on the BattOr.
Also sets self._git_hash to this value.
Raises: ValueException if the git hash is not in hex.
"""
assert self._battor_shell, ('Must start shell before getting firmware git '
'hash')
self._git_hash = self._SendBattOrCommand(self._GET_FIRMWARE_GIT_HASH_CMD,
check_return=False).strip()
# We expect the git hash to be a valid 6 character hexstring. This will
# throw a ValueError exception otherwise.
int(self._git_hash, 16)
return self._git_hash
def FlashFirmware(self, hex_path, avrdude_config_path):
"""Flashes the BattOr using an avrdude config at config_path with the new
firmware at hex_path.
"""
assert not self._battor_shell, 'Cannot flash BattOr with open shell'
avrdude_binary = self._dm.FetchPath(
'avrdude_binary', '%s_%s' % (sys.platform, platform.machine()))
# Sanitize hex file path for windows. It contains <drive>:/ which avrdude
# is not capable of handling.
_, hex_path = os.path.splitdrive(hex_path)
avr_cmd = [
avrdude_binary,
'-e', # Specify to erase data on chip.
'-p', self._BATTOR_PARTNO, # Specify AVR device.
# Specify which microcontroller programmer to use.
'-c', self._BATTOR_PROGRAMMER,
'-b', self._BATTOR_BAUDRATE, # Specify the baud rate to communicate at.
'-P', self._battor_path, # Serial path to the battor.
# Command to execute with hex file and path to hex file.
'-U', 'flash:w:%s' % hex_path,
'-C', avrdude_config_path, # AVRdude config file path.
'2>&1' # All output goes to stderr for some reason.
]
try:
subprocess.check_output(avr_cmd)
except subprocess.CalledProcessError as e:
raise BattOrFlashError('BattOr flash failed with return code %s.'
% e.returncode)
self._git_hash = None
return True
class BattOrFlashError(Exception):
pass
| 38.032483 | 80 | 0.693204 |
import atexit
import datetime
import os
import logging
import platform
import random
import subprocess
import sys
import tempfile
import time
from battor import battor_error
import py_utils
from py_utils import cloud_storage
import dependency_manager
from devil.utils import battor_device_mapping
from devil.utils import find_usb_devices
import serial
from serial.tools import list_ports
DEFAULT_SHELL_CLOSE_TIMEOUT_S = 60
def IsBattOrConnected(*args, **kwargs):
is_connected = _IsBattOrConnected(*args, **kwargs)
if is_connected:
logging.info('BattOr power monitor is connected.')
else:
logging.info('BattOr power monitor is not connected.')
return is_connected
def _IsBattOrConnected(test_platform, android_device=None,
android_device_map=None, android_device_file=None):
if test_platform == 'android':
if not android_device:
raise ValueError('Must pass android device serial when determining '
'support on android platform')
if not android_device_map:
device_tree = find_usb_devices.GetBusNumberToDeviceTreeMap()
if device_tree:
for _, node in sorted(device_tree.iteritems()):
node.Display()
if len(battor_device_mapping.GetBattOrList(device_tree)) == 1:
return True
if android_device_file:
android_device_map = battor_device_mapping.ReadSerialMapFile(
android_device_file)
else:
try:
android_device_map = battor_device_mapping.GenerateSerialMap()
except battor_error.BattOrError:
logging.exception('Error generating serial map')
return False
return str(android_device) in android_device_map
elif test_platform == 'win':
for (_1, desc, _2) in serial.tools.list_ports.comports():
if 'USB Serial Port' in desc:
return True
logging.info('No usb serial port discovered. Available ones are: %s' %
list(serial.tools.list_ports.comports()))
return False
elif test_platform == 'mac':
for (_1, desc, _2) in serial.tools.list_ports.comports():
if 'BattOr' in desc:
return True
return False
elif test_platform == 'linux':
device_tree = find_usb_devices.GetBusNumberToDeviceTreeMap(fast=True)
return bool(battor_device_mapping.GetBattOrList(device_tree))
return False
class BattOrWrapper(object):
_EXIT_CMD = 'Exit'
_GET_FIRMWARE_GIT_HASH_CMD = 'GetFirmwareGitHash'
_START_TRACING_CMD = 'StartTracing'
_STOP_TRACING_CMD = 'StopTracing'
_SUPPORTS_CLOCKSYNC_CMD = 'SupportsExplicitClockSync'
_RECORD_CLOCKSYNC_CMD = 'RecordClockSyncMarker'
_SUPPORTED_PLATFORMS = ['android', 'chromeos', 'linux', 'mac', 'win']
_BATTOR_PARTNO = 'x192a3u'
_BATTOR_PROGRAMMER = 'avr109'
_BATTOR_BAUDRATE = '115200'
def __init__(self, target_platform, android_device=None, battor_path=None,
battor_map_file=None, battor_map=None, serial_log_bucket=None,
autoflash=True):
self._battor_path = self._GetBattOrPath(target_platform, android_device,
battor_path, battor_map_file, battor_map)
config = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'battor_binary_dependencies.json')
self._dm = dependency_manager.DependencyManager(
[dependency_manager.BaseConfig(config)])
self._battor_agent_binary = self._dm.FetchPath(
'battor_agent_binary', '%s_%s' % (sys.platform, platform.machine()))
self._autoflash = autoflash
self._serial_log_bucket = serial_log_bucket
self._tracing = False
self._battor_shell = None
self._trace_results_path = None
self._start_tracing_time = None
self._stop_tracing_time = None
self._trace_results = None
self._serial_log_file = None
self._target_platform = target_platform
self._git_hash = None
atexit.register(self.KillBattOrShell)
def _FlashBattOr(self):
assert self._battor_shell, (
'Must start shell before attempting to flash BattOr')
try:
device_git_hash = self.GetFirmwareGitHash()
battor_firmware, cs_git_hash = self._dm.FetchPathWithVersion(
'battor_firmware', 'default')
if cs_git_hash != device_git_hash:
logging.info(
'Flashing BattOr with old firmware version <%s> with new '
'version <%s>.', device_git_hash, cs_git_hash)
avrdude_config = self._dm.FetchPath('avrdude_config', 'default')
self.StopShell()
return self.FlashFirmware(battor_firmware, avrdude_config)
return False
except ValueError:
logging.exception('Git hash returned from BattOr was not as expected: %s'
% self._git_hash)
self.StopShell()
finally:
if not self._battor_shell:
logging.info('_FlashBattOr serial log:')
self._UploadSerialLogToCloudStorage()
self._serial_log_file = None
self.StartShell()
def KillBattOrShell(self):
if self._battor_shell:
logging.critical('BattOr shell was not properly closed. Killing now.')
self._battor_shell.kill()
def GetShellReturnCode(self):
rc = self._battor_shell.poll()
return rc
def StartShell(self):
assert not self._battor_shell, 'Attempting to start running BattOr shell.'
battor_cmd = [self._battor_agent_binary]
if self._serial_log_bucket:
self._serial_log_file = tempfile.NamedTemporaryFile(delete=False)
self._serial_log_file.close()
battor_cmd.append('--battor-serial-log=%s' % self._serial_log_file.name)
if self._battor_path:
battor_cmd.append('--battor-path=%s' % self._battor_path)
self._battor_shell = self._StartShellImpl(battor_cmd)
assert self.GetShellReturnCode() is None, 'Shell failed to start.'
def StopShell(self, timeout=None):
assert self._battor_shell, 'Attempting to stop a non-running BattOr shell.'
assert not self._tracing, 'Attempting to stop a BattOr shell while tracing.'
timeout = timeout if timeout else DEFAULT_SHELL_CLOSE_TIMEOUT_S
self._SendBattOrCommand(self._EXIT_CMD, check_return=False)
try:
py_utils.WaitFor(lambda: self.GetShellReturnCode() != None, timeout)
except py_utils.TimeoutException:
self.KillBattOrShell()
finally:
self._battor_shell = None
def StartTracing(self):
assert self._battor_shell, 'Must start shell before tracing'
assert not self._tracing, 'Tracing already started.'
self._FlashBattOr()
self._SendBattOrCommand(self._START_TRACING_CMD)
self._tracing = True
self._start_tracing_time = int(time.time())
def StopTracing(self):
assert self._tracing, 'Must run StartTracing before StopTracing'
temp_file = tempfile.NamedTemporaryFile(delete=False)
self._trace_results_path = temp_file.name
temp_file.close()
self._SendBattOrCommand(
'%s %s' % (self._STOP_TRACING_CMD, self._trace_results_path),
check_return=False)
self._tracing = False
self._stop_tracing_time = int(time.time())
def CollectTraceData(self, timeout=None):
if timeout is None:
timeout = self._stop_tracing_time - self._start_tracing_time
py_utils.WaitFor(lambda: self.GetShellReturnCode() != None, timeout)
logging.info('CollectTraceData serial log:')
self._UploadSerialLogToCloudStorage()
with open(self._trace_results_path) as results:
self._trace_results = results.read()
self._battor_shell = None
self._serial_log_file = None
return self._trace_results
def SupportsExplicitClockSync(self):
return bool(int(self._SendBattOrCommand(self._SUPPORTS_CLOCKSYNC_CMD,
check_return=False)))
def RecordClockSyncMarker(self, sync_id):
if not isinstance(sync_id, basestring):
raise TypeError('sync_id must be a string.')
self._SendBattOrCommand('%s %s' % (self._RECORD_CLOCKSYNC_CMD, sync_id))
def _GetBattOrPath(self, target_platform, android_device=None,
battor_path=None, battor_map_file=None, battor_map=None):
if target_platform not in self._SUPPORTED_PLATFORMS:
raise battor_error.BattOrError(
'%s is an unsupported platform.' % target_platform)
if target_platform in ['win']:
# the BattOr port on Windows. To get around this, we know that the BattOr
# shows up with a name of 'USB Serial Port', so use the COM port that
# corresponds to a device with that name.
for (port, desc, _) in serial.tools.list_ports.comports():
if 'USB Serial Port' in desc:
return port
raise battor_error.BattOrError(
'Could not find BattOr attached to machine.')
if target_platform in ['mac']:
for (port, desc, _) in serial.tools.list_ports.comports():
if 'BattOr' in desc:
return port
if target_platform in ['android', 'linux']:
if battor_path:
if not isinstance(battor_path, basestring):
raise battor_error.BattOrError(
'An invalid BattOr path was specified.')
return battor_path
if target_platform == 'android':
if not android_device:
raise battor_error.BattOrError(
'Must specify device for Android platform.')
if not battor_map_file and not battor_map:
# No map was passed, so must create one.
battor_map = battor_device_mapping.GenerateSerialMap()
return battor_device_mapping.GetBattOrPathFromPhoneSerial(
str(android_device), serial_map_file=battor_map_file,
serial_map=battor_map)
# Not Android and no explicitly passed BattOr.
device_tree = find_usb_devices.GetBusNumberToDeviceTreeMap(fast=True)
battors = battor_device_mapping.GetBattOrList(device_tree)
if len(battors) != 1:
raise battor_error.BattOrError(
'For non-Android platforms, exactly one BattOr must be '
'attached unless address is explicitly given.')
return '/dev/%s' % battors.pop()
raise NotImplementedError(
'BattOr Wrapper not implemented for given platform')
def _SendBattOrCommandImpl(self, cmd):
self._battor_shell.stdin.write('%s\n' % cmd)
self._battor_shell.stdin.flush()
return self._battor_shell.stdout.readline()
def _SendBattOrCommand(self, cmd, check_return=True):
status = self._SendBattOrCommandImpl(cmd)
if check_return and not 'Done.' in status:
self.KillBattOrShell()
self._UploadSerialLogToCloudStorage()
self._serial_log_file = None
raise battor_error.BattOrError(
'BattOr did not complete command \'%s\' correctly.\n'
'Outputted: %s' % (cmd, status))
return status
def _StartShellImpl(self, battor_cmd):
return subprocess.Popen(
battor_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=False)
def _UploadSerialLogToCloudStorage(self):
if not self._serial_log_file or not cloud_storage.IsNetworkIOEnabled():
return
remote_path = ('battor-serial-log-%s-%d.txt' % (
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M.txt'),
random.randint(1, 100000)))
try:
cloud_url = cloud_storage.Insert(
self._serial_log_bucket, remote_path, self._serial_log_file.name)
sys.stderr.write('View BattOr serial log at %s\n' % cloud_url)
except cloud_storage.PermissionError as e:
logging.error('Cannot upload BattOr serial log file to cloud storage due '
'to permission error: %s' % e.message)
def GetFirmwareGitHash(self):
assert self._battor_shell, ('Must start shell before getting firmware git '
'hash')
self._git_hash = self._SendBattOrCommand(self._GET_FIRMWARE_GIT_HASH_CMD,
check_return=False).strip()
# We expect the git hash to be a valid 6 character hexstring. This will
# throw a ValueError exception otherwise.
int(self._git_hash, 16)
return self._git_hash
def FlashFirmware(self, hex_path, avrdude_config_path):
assert not self._battor_shell, 'Cannot flash BattOr with open shell'
avrdude_binary = self._dm.FetchPath(
'avrdude_binary', '%s_%s' % (sys.platform, platform.machine()))
# Sanitize hex file path for windows. It contains <drive>:/ which avrdude
# is not capable of handling.
_, hex_path = os.path.splitdrive(hex_path)
avr_cmd = [
avrdude_binary,
'-e', # Specify to erase data on chip.
'-p', self._BATTOR_PARTNO, # Specify AVR device.
# Specify which microcontroller programmer to use.
'-c', self._BATTOR_PROGRAMMER,
'-b', self._BATTOR_BAUDRATE, # Specify the baud rate to communicate at.
'-P', self._battor_path, # Serial path to the battor.
# Command to execute with hex file and path to hex file.
'-U', 'flash:w:%s' % hex_path,
'-C', avrdude_config_path, # AVRdude config file path.
'2>&1' # All output goes to stderr for some reason.
]
try:
subprocess.check_output(avr_cmd)
except subprocess.CalledProcessError as e:
raise BattOrFlashError('BattOr flash failed with return code %s.'
% e.returncode)
self._git_hash = None
return True
class BattOrFlashError(Exception):
pass
| true | true |
1c3435a6cc855240de2c2d26e2b0ce4f8df5479a | 4,287 | py | Python | tests/python/relay/test_name_transforms.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 4,640 | 2017-08-17T19:22:15.000Z | 2019-11-04T15:29:46.000Z | tests/python/relay/test_name_transforms.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 3,022 | 2020-11-24T14:02:31.000Z | 2022-03-31T23:55:31.000Z | tests/python/relay/test_name_transforms.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 1,352 | 2017-08-17T19:30:38.000Z | 2019-11-04T16:09:29.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License" you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm import TVMError
from tvm.relay.backend.name_transforms import (
to_c_function_style,
to_c_variable_style,
to_c_constant_style,
prefix_name,
prefix_generated_name,
sanitize_name,
)
import pytest
def test_to_c_function_style():
assert to_c_function_style("TVM_Woof") == "TVMWoof"
assert to_c_function_style("TVM_woof") == "TVMWoof"
assert to_c_function_style("TVM_woof_woof") == "TVMWoofWoof"
assert to_c_function_style("TVMGen_woof_woof") == "TVMGenWoofWoof"
# Incorrect prefix
with pytest.raises(TVMError, match="Function not TVM prefixed"):
to_c_function_style("Cake_Bakery")
with pytest.raises(TVMError, match="Function name is empty"):
to_c_function_style("")
def test_to_c_variable_style():
assert to_c_variable_style("TVM_Woof") == "tvm_woof"
assert to_c_variable_style("TVM_woof") == "tvm_woof"
assert to_c_variable_style("TVM_woof_Woof") == "tvm_woof_woof"
# Incorrect prefix
with pytest.raises(TVMError, match="Variable not TVM prefixed"):
to_c_variable_style("Cake_Bakery")
with pytest.raises(TVMError, match="Variable name is empty"):
to_c_variable_style("")
def test_to_c_constant_style():
assert to_c_constant_style("TVM_Woof") == "TVM_WOOF"
assert to_c_constant_style("TVM_woof") == "TVM_WOOF"
assert to_c_constant_style("TVM_woof_Woof") == "TVM_WOOF_WOOF"
with pytest.raises(TVMError, match="Constant not TVM prefixed"):
to_c_constant_style("Cake_Bakery")
with pytest.raises(TVMError):
to_c_constant_style("")
def test_prefix_name():
assert prefix_name("Woof") == "TVM_Woof"
assert prefix_name(["Woof"]) == "TVM_Woof"
assert prefix_name(["woof"]) == "TVM_woof"
assert prefix_name(["woof", "moo"]) == "TVM_woof_moo"
with pytest.raises(TVMError, match="Name is empty"):
prefix_name("")
with pytest.raises(TVMError, match="Name segments empty"):
prefix_name([])
with pytest.raises(TVMError, match="Name segment is empty"):
prefix_name([""])
def test_prefix_generated_name():
assert prefix_generated_name("Woof") == "TVMGen_Woof"
assert prefix_generated_name(["Woof"]) == "TVMGen_Woof"
assert prefix_generated_name(["Woof"]) == "TVMGen_Woof"
assert prefix_generated_name(["woof"]) == "TVMGen_woof"
assert prefix_generated_name(["woof", "moo"]) == "TVMGen_woof_moo"
with pytest.raises(TVMError, match="Name is empty"):
prefix_generated_name("")
with pytest.raises(TVMError, match="Name segments empty"):
prefix_generated_name([])
with pytest.raises(TVMError, match="Name segment is empty"):
prefix_generated_name([""])
def test_sanitize_name():
assert sanitize_name("+_+ ") == "____"
assert sanitize_name("input+") == "input_"
assert sanitize_name("input-") == "input_"
assert sanitize_name("input++") == "input__"
assert sanitize_name("woof:1") == "woof_1"
with pytest.raises(TVMError, match="Name is empty"):
sanitize_name("")
def test_combined_logic():
assert (
to_c_function_style(prefix_name(["Device", "target", "Invoke"])) == "TVMDeviceTargetInvoke"
)
assert to_c_function_style(prefix_generated_name(["model", "Run"])) == "TVMGenModelRun"
assert to_c_variable_style(prefix_name(["Device", "target", "t"])) == "tvm_device_target_t"
assert (
to_c_variable_style(prefix_generated_name(["model", "Devices"])) == "tvmgen_model_devices"
)
| 37.278261 | 99 | 0.70982 |
from tvm import TVMError
from tvm.relay.backend.name_transforms import (
to_c_function_style,
to_c_variable_style,
to_c_constant_style,
prefix_name,
prefix_generated_name,
sanitize_name,
)
import pytest
def test_to_c_function_style():
assert to_c_function_style("TVM_Woof") == "TVMWoof"
assert to_c_function_style("TVM_woof") == "TVMWoof"
assert to_c_function_style("TVM_woof_woof") == "TVMWoofWoof"
assert to_c_function_style("TVMGen_woof_woof") == "TVMGenWoofWoof"
with pytest.raises(TVMError, match="Function not TVM prefixed"):
to_c_function_style("Cake_Bakery")
with pytest.raises(TVMError, match="Function name is empty"):
to_c_function_style("")
def test_to_c_variable_style():
assert to_c_variable_style("TVM_Woof") == "tvm_woof"
assert to_c_variable_style("TVM_woof") == "tvm_woof"
assert to_c_variable_style("TVM_woof_Woof") == "tvm_woof_woof"
with pytest.raises(TVMError, match="Variable not TVM prefixed"):
to_c_variable_style("Cake_Bakery")
with pytest.raises(TVMError, match="Variable name is empty"):
to_c_variable_style("")
def test_to_c_constant_style():
assert to_c_constant_style("TVM_Woof") == "TVM_WOOF"
assert to_c_constant_style("TVM_woof") == "TVM_WOOF"
assert to_c_constant_style("TVM_woof_Woof") == "TVM_WOOF_WOOF"
with pytest.raises(TVMError, match="Constant not TVM prefixed"):
to_c_constant_style("Cake_Bakery")
with pytest.raises(TVMError):
to_c_constant_style("")
def test_prefix_name():
assert prefix_name("Woof") == "TVM_Woof"
assert prefix_name(["Woof"]) == "TVM_Woof"
assert prefix_name(["woof"]) == "TVM_woof"
assert prefix_name(["woof", "moo"]) == "TVM_woof_moo"
with pytest.raises(TVMError, match="Name is empty"):
prefix_name("")
with pytest.raises(TVMError, match="Name segments empty"):
prefix_name([])
with pytest.raises(TVMError, match="Name segment is empty"):
prefix_name([""])
def test_prefix_generated_name():
assert prefix_generated_name("Woof") == "TVMGen_Woof"
assert prefix_generated_name(["Woof"]) == "TVMGen_Woof"
assert prefix_generated_name(["Woof"]) == "TVMGen_Woof"
assert prefix_generated_name(["woof"]) == "TVMGen_woof"
assert prefix_generated_name(["woof", "moo"]) == "TVMGen_woof_moo"
with pytest.raises(TVMError, match="Name is empty"):
prefix_generated_name("")
with pytest.raises(TVMError, match="Name segments empty"):
prefix_generated_name([])
with pytest.raises(TVMError, match="Name segment is empty"):
prefix_generated_name([""])
def test_sanitize_name():
assert sanitize_name("+_+ ") == "____"
assert sanitize_name("input+") == "input_"
assert sanitize_name("input-") == "input_"
assert sanitize_name("input++") == "input__"
assert sanitize_name("woof:1") == "woof_1"
with pytest.raises(TVMError, match="Name is empty"):
sanitize_name("")
def test_combined_logic():
assert (
to_c_function_style(prefix_name(["Device", "target", "Invoke"])) == "TVMDeviceTargetInvoke"
)
assert to_c_function_style(prefix_generated_name(["model", "Run"])) == "TVMGenModelRun"
assert to_c_variable_style(prefix_name(["Device", "target", "t"])) == "tvm_device_target_t"
assert (
to_c_variable_style(prefix_generated_name(["model", "Devices"])) == "tvmgen_model_devices"
)
| true | true |
1c34364153b35a31a3417fb55f88b56115287023 | 1,111 | py | Python | datalad/support/cache.py | yarikoptic/datalad | c0cd538de2ed9a30c0f58256c7afa6e18d325505 | [
"MIT"
] | null | null | null | datalad/support/cache.py | yarikoptic/datalad | c0cd538de2ed9a30c0f58256c7afa6e18d325505 | [
"MIT"
] | 6 | 2015-11-20T21:41:13.000Z | 2018-06-12T14:27:32.000Z | datalad/support/cache.py | yarikoptic/datalad | c0cd538de2ed9a30c0f58256c7afa6e18d325505 | [
"MIT"
] | 1 | 2017-03-28T14:44:16.000Z | 2017-03-28T14:44:16.000Z | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Simple constructs to be used as caches
"""
from collections import OrderedDict
# based on http://stackoverflow.com/a/2437645/1265472
class DictCache(OrderedDict):
"""A simple cache (dictionary) with limited size which expunges oldest entries
"""
def __init__(self, *args, **kwds):
self.size_limit = kwds.pop("size_limit", None)
OrderedDict.__init__(self, *args, **kwds)
self._check_size_limit()
def __setitem__(self, key, value):
OrderedDict.__setitem__(self, key, value)
self._check_size_limit()
def _check_size_limit(self):
if self.size_limit is not None:
while len(self) > self.size_limit:
self.popitem(last=False)
| 34.71875 | 87 | 0.572457 | true | true | |
1c3436a93e156c9b163508894959b51b9b7b4eb6 | 1,427 | py | Python | pandashells/bin/p_linspace.py | timgates42/pandashells | 4b565435a25ac713eeeacf28c3e5b52fe94530d8 | [
"BSD-2-Clause-FreeBSD"
] | 878 | 2015-08-02T02:07:20.000Z | 2022-01-15T19:06:47.000Z | pandashells/bin/p_linspace.py | timgates42/pandashells | 4b565435a25ac713eeeacf28c3e5b52fe94530d8 | [
"BSD-2-Clause-FreeBSD"
] | 44 | 2015-05-12T15:56:57.000Z | 2021-01-13T20:58:29.000Z | pandashells/bin/p_linspace.py | timgates42/pandashells | 4b565435a25ac713eeeacf28c3e5b52fe94530d8 | [
"BSD-2-Clause-FreeBSD"
] | 31 | 2015-08-02T22:48:36.000Z | 2021-01-13T20:54:58.000Z | #! /usr/bin/env python
# standard library imports
import sys # NOQA import sys to allow for mocking sys.argv in tests
import argparse
import textwrap
from pandashells.lib import module_checker_lib, arg_lib
module_checker_lib.check_for_modules(['pandas'])
from pandashells.lib import io_lib
import numpy as np
import pandas as pd
def main():
msg = "Generate a linearly spaced set of data points."
msg = textwrap.dedent(
"""
Generate a linearly spaced set of data points.
-----------------------------------------------------------------------
Examples:
* Generate 7 points between 1 and 10
p.linspace 1 10 7
-----------------------------------------------------------------------
"""
)
# read command line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=msg)
arg_lib.add_args(parser, 'io_out')
msg = 'start end npoints'
parser.add_argument("numbers", help=msg, type=str, nargs=3, metavar='')
# parse arguments
args = parser.parse_args()
min_val, max_val = float(args.numbers[0]), float(args.numbers[1])
N = int(args.numbers[2])
df = pd.DataFrame({'c0': np.linspace(min_val, max_val, N)})
# write dataframe to output
io_lib.df_to_output(args, df)
if __name__ == '__main__': # pragma: no cover
main()
| 25.945455 | 79 | 0.597758 |
import sys
import argparse
import textwrap
from pandashells.lib import module_checker_lib, arg_lib
module_checker_lib.check_for_modules(['pandas'])
from pandashells.lib import io_lib
import numpy as np
import pandas as pd
def main():
msg = "Generate a linearly spaced set of data points."
msg = textwrap.dedent(
"""
Generate a linearly spaced set of data points.
-----------------------------------------------------------------------
Examples:
* Generate 7 points between 1 and 10
p.linspace 1 10 7
-----------------------------------------------------------------------
"""
)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=msg)
arg_lib.add_args(parser, 'io_out')
msg = 'start end npoints'
parser.add_argument("numbers", help=msg, type=str, nargs=3, metavar='')
args = parser.parse_args()
min_val, max_val = float(args.numbers[0]), float(args.numbers[1])
N = int(args.numbers[2])
df = pd.DataFrame({'c0': np.linspace(min_val, max_val, N)})
io_lib.df_to_output(args, df)
if __name__ == '__main__':
main()
| true | true |
1c3438005bf0a2213fed7f01d5fce16c91644252 | 8,117 | py | Python | FlyTrackApp/flight_tracker_vis_class.py | jmmelis/FlyTrackApp | 7c03eb0aeda7b0bd4e0c6181bc776c92e4fbc582 | [
"MIT"
] | null | null | null | FlyTrackApp/flight_tracker_vis_class.py | jmmelis/FlyTrackApp | 7c03eb0aeda7b0bd4e0c6181bc776c92e4fbc582 | [
"MIT"
] | null | null | null | FlyTrackApp/flight_tracker_vis_class.py | jmmelis/FlyTrackApp | 7c03eb0aeda7b0bd4e0c6181bc776c92e4fbc582 | [
"MIT"
] | null | null | null | import vtk
import sys
import os
import time
import numpy as np
# flight tracker visualization class
class FlightTrackerVisualization:
def __init__(self):
# window parameters
self.window_name = "Model"
self.background = (0.1,0.2,0.4)
self.window_sz = (600, 600)
# stl model parameters
self.model_name = ""
self.stl_list = []
self.model_loc = ""
self.stl_src = []
self.stl_actors = []
# point parameters
self.pointcloud_list = []
# Create the Renderer, RenderWindow, and RenderWindowInteractor
self.ren = vtk.vtkRenderer()
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.AddRenderer(self.ren)
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.ren_win)
# Set the background color and window size
self.ren_win.SetWindowName(self.window_name)
self.ren.SetBackground(*self.background)
self.ren_win.SetSize(*self.window_sz)
# Render
self.iren.Initialize()
self.ren.ResetCameraClippingRange()
self.ren_win.Render()
def load_model(self,model_name,model_loc,stl_list):
self.model_name = model_name
self.stl_list = stl_list
self.model_loc = model_loc + '/' + model_name
self.ren_win.SetWindowName(model_name)
os.chdir(self.model_loc)
for stl_file in stl_list:
sr = vtk.vtkSTLReader()
sr.SetFileName(stl_file)
self.stl_src.append(sr)
stl_mapper = vtk.vtkPolyDataMapper()
stl_mapper.ScalarVisibilityOff()
stl_mapper.SetInputConnection(sr.GetOutputPort())
stl_actor = vtk.vtkActor()
stl_actor.SetMapper(stl_mapper)
self.stl_actors.append(stl_actor)
stl_props = stl_actor.GetProperty()
stl_actor.SetPosition(0,0,0)
stl_props.SetInterpolationToGouraud()
stl_mapper.Update()
self.ren.AddActor(stl_actor)
self.ren_win.Render()
def set_state_model(self,state,parents,scale):
for i in range(state.shape[1]):
old_val = -1
j = 0
transformation = vtk.vtkTransform()
while parents[i,j] > old_val:
ind = parents[i,j]
elem_mat = vtk.vtkMatrix4x4()
elem_mat.SetElement(0,0,(2.0*state[0,ind]**2-1.0+2.0*state[1,ind]**2))
elem_mat.SetElement(0,1,(2.0*state[1,ind]*state[2,ind]+2.0*state[0,ind]*state[3,ind]))
elem_mat.SetElement(0,2,(2.0*state[1,ind]*state[3,ind]-2.0*state[0,ind]*state[2,ind]))
elem_mat.SetElement(1,0,(2.0*state[1,ind]*state[2,ind]-2.0*state[0,ind]*state[3,ind]))
elem_mat.SetElement(1,1,(2.0*state[0,ind]**2-1.0+2.0*state[2,ind]**2))
elem_mat.SetElement(1,2,(2.0*state[2,ind]*state[3,ind]+2.0*state[0,ind]*state[1,ind]))
elem_mat.SetElement(2,0,(2.0*state[1,ind]*state[3,ind]+2.0*state[0,ind]*state[2,ind]))
elem_mat.SetElement(2,1,(2.0*state[2,ind]*state[3,ind]-2.0*state[0,ind]*state[1,ind]))
elem_mat.SetElement(2,2,(2.0*state[0,ind]**2-1.0+2.0*state[3,ind]**2))
elem_mat.SetElement(0,3,state[4,ind]*scale[ind])
elem_mat.SetElement(1,3,state[5,ind]*scale[ind])
elem_mat.SetElement(2,3,state[6,ind]*scale[ind])
transformation.Concatenate(elem_mat)
old_val = parents[i,j]
j+=1
self.stl_actors[i].SetUserMatrix(transformation.GetMatrix())
self.ren_win.Render()
def add_pointcloud(self,pcl_in):
N = pcl_in.shape[1]
#points = vtk.vtkPointSource()
points = vtk.vtkPoints()
points.SetNumberOfPoints(N)
polydata = vtk.vtkPolyData()
for i in range(N):
points.InsertNextPoint(pcl_in[:,i])
#points.SetRadius(0.005)
polydata.SetPoints(points)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydata)
#mapper.SetInputData(points)
#mapper.SetInputConnection(points.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self.ren.AddActor(actor)
#for i in range(N_points):
# #point = vtk.vtkSphereSource()
# #point.SetCenter(pcl_in[:,i])
# #point.SetRadius(0.005)
# point = vtk.vtkPointSource()
# point.SetCenter(pcl_in[:,i])
# point.SetNumberOfPoints(1);
# mapper = vtk.vtkPolyDataMapper()
# mapper.ScalarVisibilityOff()
# mapper.SetInputConnection(point.GetOutputPort())
# actor = vtk.vtkActor()
# actor.SetMapper(mapper)
# props = actor.GetProperty()
# self.ren.AddActor(actor)
def start_interaction_window(self):
self.ren_win.Render()
self.iren.Start()
def kill_interaction_window(self):
del self.ren_win, self.iren
def load_pointcloud(self,pointCloud,pcl_in):
for k in range(pcl_in.shape[1]):
point = pcl_in[:,k]
pointCloud.addPoint(point)
return pointCloud
def show_pointcloud(self,pcl_in):
pointCloud = self.VtkPointCloud(np.amax(pcl_in[3,:]))
pointCloud = self.load_pointcloud(pointCloud,pcl_in)
self.ren.AddActor(pointCloud.vtkActor)
def show_bboxes(self,corner_points):
N_boxes = corner_points.shape[1]
for i in range(N_boxes):
corner_mat = np.empty([8,3])
for j in range(8):
corner_mat[j,0] = corner_points[j*3,i]
corner_mat[j,1] = corner_points[j*3+1,i]
corner_mat[j,2] = corner_points[j*3+2,i]
box = self.BoundingBox()
box.addBox(corner_mat)
self.ren.AddActor(box.vtkActor)
class VtkPointCloud:
def __init__(self,scalar_range):
self.vtkPolyData = vtk.vtkPolyData()
self.clearPoints()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(self.vtkPolyData)
mapper.SetColorModeToDefault()
mapper.SetScalarRange(0.0,scalar_range)
mapper.SetScalarVisibility(1)
self.vtkActor = vtk.vtkActor()
self.vtkActor.SetMapper(mapper)
def addPoint(self,point):
pointID = self.vtkPoints.InsertNextPoint(point[0:3])
self.vtkDepth.InsertNextValue(point[3])
self.vtkCells.InsertNextCell(1)
self.vtkCells.InsertCellPoint(pointID)
self.vtkCells.Modified()
self.vtkPoints.Modified()
self.vtkDepth.Modified()
def clearPoints(self):
self.vtkPoints = vtk.vtkPoints()
self.vtkCells = vtk.vtkCellArray()
self.vtkDepth = vtk.vtkDoubleArray()
self.vtkDepth.SetName('DepthArray')
self.vtkPolyData.SetPoints(self.vtkPoints)
self.vtkPolyData.SetVerts(self.vtkCells)
self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth)
self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray')
class BoundingBox:
def __init__(self):
self.mapper = vtk.vtkPolyDataMapper()
self.vtkActor = vtk.vtkActor()
self.vtkActor.SetMapper(self.mapper)
def addBox(self,corner_points):
# Add a bounding box
points = vtk.vtkPoints()
points.SetNumberOfPoints(8)
points.SetPoint(0,corner_points[0,0],corner_points[0,1],corner_points[0,2])
points.SetPoint(1,corner_points[1,0],corner_points[1,1],corner_points[1,2])
points.SetPoint(2,corner_points[2,0],corner_points[2,1],corner_points[2,2])
points.SetPoint(3,corner_points[3,0],corner_points[3,1],corner_points[3,2])
points.SetPoint(4,corner_points[4,0],corner_points[4,1],corner_points[4,2])
points.SetPoint(5,corner_points[5,0],corner_points[5,1],corner_points[5,2])
points.SetPoint(6,corner_points[6,0],corner_points[6,1],corner_points[6,2])
points.SetPoint(7,corner_points[7,0],corner_points[7,1],corner_points[7,2])
lines = vtk.vtkCellArray()
lines.InsertNextCell(5)
lines.InsertCellPoint(0)
lines.InsertCellPoint(1)
lines.InsertCellPoint(2)
lines.InsertCellPoint(3)
lines.InsertCellPoint(0)
lines.InsertNextCell(5)
lines.InsertCellPoint(4)
lines.InsertCellPoint(5)
lines.InsertCellPoint(6)
lines.InsertCellPoint(7)
lines.InsertCellPoint(4)
lines.InsertNextCell(5)
lines.InsertCellPoint(0)
lines.InsertCellPoint(4)
lines.InsertCellPoint(7)
lines.InsertCellPoint(3)
lines.InsertCellPoint(0)
lines.InsertNextCell(5)
lines.InsertCellPoint(1)
lines.InsertCellPoint(5)
lines.InsertCellPoint(6)
lines.InsertCellPoint(2)
lines.InsertCellPoint(1)
lines.InsertNextCell(5)
lines.InsertCellPoint(0)
lines.InsertCellPoint(1)
lines.InsertCellPoint(5)
lines.InsertCellPoint(4)
lines.InsertCellPoint(0)
lines.InsertNextCell(5)
lines.InsertCellPoint(3)
lines.InsertCellPoint(2)
lines.InsertCellPoint(6)
lines.InsertCellPoint(7)
lines.InsertCellPoint(3)
polygon = vtk.vtkPolyData()
polygon.SetPoints(points)
polygon.SetLines(lines)
self.mapper.SetInputData(polygon)
self.mapper.Update()
| 30.287313 | 90 | 0.724036 | import vtk
import sys
import os
import time
import numpy as np
class FlightTrackerVisualization:
def __init__(self):
self.window_name = "Model"
self.background = (0.1,0.2,0.4)
self.window_sz = (600, 600)
self.model_name = ""
self.stl_list = []
self.model_loc = ""
self.stl_src = []
self.stl_actors = []
self.pointcloud_list = []
self.ren = vtk.vtkRenderer()
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.AddRenderer(self.ren)
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.ren_win)
self.ren_win.SetWindowName(self.window_name)
self.ren.SetBackground(*self.background)
self.ren_win.SetSize(*self.window_sz)
self.iren.Initialize()
self.ren.ResetCameraClippingRange()
self.ren_win.Render()
def load_model(self,model_name,model_loc,stl_list):
self.model_name = model_name
self.stl_list = stl_list
self.model_loc = model_loc + '/' + model_name
self.ren_win.SetWindowName(model_name)
os.chdir(self.model_loc)
for stl_file in stl_list:
sr = vtk.vtkSTLReader()
sr.SetFileName(stl_file)
self.stl_src.append(sr)
stl_mapper = vtk.vtkPolyDataMapper()
stl_mapper.ScalarVisibilityOff()
stl_mapper.SetInputConnection(sr.GetOutputPort())
stl_actor = vtk.vtkActor()
stl_actor.SetMapper(stl_mapper)
self.stl_actors.append(stl_actor)
stl_props = stl_actor.GetProperty()
stl_actor.SetPosition(0,0,0)
stl_props.SetInterpolationToGouraud()
stl_mapper.Update()
self.ren.AddActor(stl_actor)
self.ren_win.Render()
def set_state_model(self,state,parents,scale):
for i in range(state.shape[1]):
old_val = -1
j = 0
transformation = vtk.vtkTransform()
while parents[i,j] > old_val:
ind = parents[i,j]
elem_mat = vtk.vtkMatrix4x4()
elem_mat.SetElement(0,0,(2.0*state[0,ind]**2-1.0+2.0*state[1,ind]**2))
elem_mat.SetElement(0,1,(2.0*state[1,ind]*state[2,ind]+2.0*state[0,ind]*state[3,ind]))
elem_mat.SetElement(0,2,(2.0*state[1,ind]*state[3,ind]-2.0*state[0,ind]*state[2,ind]))
elem_mat.SetElement(1,0,(2.0*state[1,ind]*state[2,ind]-2.0*state[0,ind]*state[3,ind]))
elem_mat.SetElement(1,1,(2.0*state[0,ind]**2-1.0+2.0*state[2,ind]**2))
elem_mat.SetElement(1,2,(2.0*state[2,ind]*state[3,ind]+2.0*state[0,ind]*state[1,ind]))
elem_mat.SetElement(2,0,(2.0*state[1,ind]*state[3,ind]+2.0*state[0,ind]*state[2,ind]))
elem_mat.SetElement(2,1,(2.0*state[2,ind]*state[3,ind]-2.0*state[0,ind]*state[1,ind]))
elem_mat.SetElement(2,2,(2.0*state[0,ind]**2-1.0+2.0*state[3,ind]**2))
elem_mat.SetElement(0,3,state[4,ind]*scale[ind])
elem_mat.SetElement(1,3,state[5,ind]*scale[ind])
elem_mat.SetElement(2,3,state[6,ind]*scale[ind])
transformation.Concatenate(elem_mat)
old_val = parents[i,j]
j+=1
self.stl_actors[i].SetUserMatrix(transformation.GetMatrix())
self.ren_win.Render()
def add_pointcloud(self,pcl_in):
N = pcl_in.shape[1]
points = vtk.vtkPoints()
points.SetNumberOfPoints(N)
polydata = vtk.vtkPolyData()
for i in range(N):
points.InsertNextPoint(pcl_in[:,i])
polydata.SetPoints(points)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self.ren.AddActor(actor)
ren_win.Render()
self.iren.Start()
def kill_interaction_window(self):
del self.ren_win, self.iren
def load_pointcloud(self,pointCloud,pcl_in):
for k in range(pcl_in.shape[1]):
point = pcl_in[:,k]
pointCloud.addPoint(point)
return pointCloud
def show_pointcloud(self,pcl_in):
pointCloud = self.VtkPointCloud(np.amax(pcl_in[3,:]))
pointCloud = self.load_pointcloud(pointCloud,pcl_in)
self.ren.AddActor(pointCloud.vtkActor)
def show_bboxes(self,corner_points):
N_boxes = corner_points.shape[1]
for i in range(N_boxes):
corner_mat = np.empty([8,3])
for j in range(8):
corner_mat[j,0] = corner_points[j*3,i]
corner_mat[j,1] = corner_points[j*3+1,i]
corner_mat[j,2] = corner_points[j*3+2,i]
box = self.BoundingBox()
box.addBox(corner_mat)
self.ren.AddActor(box.vtkActor)
class VtkPointCloud:
def __init__(self,scalar_range):
self.vtkPolyData = vtk.vtkPolyData()
self.clearPoints()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(self.vtkPolyData)
mapper.SetColorModeToDefault()
mapper.SetScalarRange(0.0,scalar_range)
mapper.SetScalarVisibility(1)
self.vtkActor = vtk.vtkActor()
self.vtkActor.SetMapper(mapper)
def addPoint(self,point):
pointID = self.vtkPoints.InsertNextPoint(point[0:3])
self.vtkDepth.InsertNextValue(point[3])
self.vtkCells.InsertNextCell(1)
self.vtkCells.InsertCellPoint(pointID)
self.vtkCells.Modified()
self.vtkPoints.Modified()
self.vtkDepth.Modified()
def clearPoints(self):
self.vtkPoints = vtk.vtkPoints()
self.vtkCells = vtk.vtkCellArray()
self.vtkDepth = vtk.vtkDoubleArray()
self.vtkDepth.SetName('DepthArray')
self.vtkPolyData.SetPoints(self.vtkPoints)
self.vtkPolyData.SetVerts(self.vtkCells)
self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth)
self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray')
class BoundingBox:
def __init__(self):
self.mapper = vtk.vtkPolyDataMapper()
self.vtkActor = vtk.vtkActor()
self.vtkActor.SetMapper(self.mapper)
def addBox(self,corner_points):
points = vtk.vtkPoints()
points.SetNumberOfPoints(8)
points.SetPoint(0,corner_points[0,0],corner_points[0,1],corner_points[0,2])
points.SetPoint(1,corner_points[1,0],corner_points[1,1],corner_points[1,2])
points.SetPoint(2,corner_points[2,0],corner_points[2,1],corner_points[2,2])
points.SetPoint(3,corner_points[3,0],corner_points[3,1],corner_points[3,2])
points.SetPoint(4,corner_points[4,0],corner_points[4,1],corner_points[4,2])
points.SetPoint(5,corner_points[5,0],corner_points[5,1],corner_points[5,2])
points.SetPoint(6,corner_points[6,0],corner_points[6,1],corner_points[6,2])
points.SetPoint(7,corner_points[7,0],corner_points[7,1],corner_points[7,2])
lines = vtk.vtkCellArray()
lines.InsertNextCell(5)
lines.InsertCellPoint(0)
lines.InsertCellPoint(1)
lines.InsertCellPoint(2)
lines.InsertCellPoint(3)
lines.InsertCellPoint(0)
lines.InsertNextCell(5)
lines.InsertCellPoint(4)
lines.InsertCellPoint(5)
lines.InsertCellPoint(6)
lines.InsertCellPoint(7)
lines.InsertCellPoint(4)
lines.InsertNextCell(5)
lines.InsertCellPoint(0)
lines.InsertCellPoint(4)
lines.InsertCellPoint(7)
lines.InsertCellPoint(3)
lines.InsertCellPoint(0)
lines.InsertNextCell(5)
lines.InsertCellPoint(1)
lines.InsertCellPoint(5)
lines.InsertCellPoint(6)
lines.InsertCellPoint(2)
lines.InsertCellPoint(1)
lines.InsertNextCell(5)
lines.InsertCellPoint(0)
lines.InsertCellPoint(1)
lines.InsertCellPoint(5)
lines.InsertCellPoint(4)
lines.InsertCellPoint(0)
lines.InsertNextCell(5)
lines.InsertCellPoint(3)
lines.InsertCellPoint(2)
lines.InsertCellPoint(6)
lines.InsertCellPoint(7)
lines.InsertCellPoint(3)
polygon = vtk.vtkPolyData()
polygon.SetPoints(points)
polygon.SetLines(lines)
self.mapper.SetInputData(polygon)
self.mapper.Update()
| true | true |
1c3438bcf49ef281259083f830ab90e75458a3b3 | 470 | py | Python | cirq-google/cirq_google/workflow/__init__.py | unaiic/Cirq | d137a7d69fb6e0f2afb676b179ffdf7a198363f5 | [
"Apache-2.0"
] | null | null | null | cirq-google/cirq_google/workflow/__init__.py | unaiic/Cirq | d137a7d69fb6e0f2afb676b179ffdf7a198363f5 | [
"Apache-2.0"
] | null | null | null | cirq-google/cirq_google/workflow/__init__.py | unaiic/Cirq | d137a7d69fb6e0f2afb676b179ffdf7a198363f5 | [
"Apache-2.0"
] | 2 | 2021-09-22T11:16:46.000Z | 2021-09-23T12:55:22.000Z | # pylint: disable=wrong-or-nonexistent-copyright-notice
from cirq_google.workflow.quantum_executable import (
ExecutableSpec,
KeyValueExecutableSpec,
QuantumExecutable,
QuantumExecutableGroup,
BitstringsMeasurement,
)
from cirq_google.workflow.quantum_runtime import (
SharedRuntimeInfo,
RuntimeInfo,
ExecutableResult,
ExecutableGroupResult,
ExecutableGroupResultFilesystemRecord,
QuantumRuntimeConfiguration,
execute,
)
| 24.736842 | 55 | 0.785106 |
from cirq_google.workflow.quantum_executable import (
ExecutableSpec,
KeyValueExecutableSpec,
QuantumExecutable,
QuantumExecutableGroup,
BitstringsMeasurement,
)
from cirq_google.workflow.quantum_runtime import (
SharedRuntimeInfo,
RuntimeInfo,
ExecutableResult,
ExecutableGroupResult,
ExecutableGroupResultFilesystemRecord,
QuantumRuntimeConfiguration,
execute,
)
| true | true |
1c34390d2bcd7b5fa6794c8b84106f4ee13a2ac5 | 6,917 | py | Python | autocomplete_light/tests/widgets.py | bburan/django-autocomplete-light | 064676061b101d5d47655e8598b21cbaf7716ae8 | [
"MIT"
] | 1 | 2015-07-01T02:26:51.000Z | 2015-07-01T02:26:51.000Z | autocomplete_light/tests/widgets.py | bburan/django-autocomplete-light | 064676061b101d5d47655e8598b21cbaf7716ae8 | [
"MIT"
] | null | null | null | autocomplete_light/tests/widgets.py | bburan/django-autocomplete-light | 064676061b101d5d47655e8598b21cbaf7716ae8 | [
"MIT"
] | null | null | null | from lxml.html import etree
from lxml.cssselect import CSSSelector
try:
from unittest import mock
except ImportError: # python2
import mock
from django.test import TestCase
import autocomplete_light
from ..example_apps.basic.models import FkModel
from ..example_apps.security_test.models import Item
class LazyAutocomplete(autocomplete_light.AutocompleteModelBase):
pass
class WidgetBaseTestCase(TestCase):
widget_class = autocomplete_light.WidgetBase
fixtures = ['security_test.json']
def autocomplete_input(self, et):
return CSSSelector('input.autocomplete')(et)[0]
def test_init_with_registry(self):
registry = autocomplete_light.AutocompleteRegistry()
registry.register(FkModel, name='TestAutocomplete')
widget = self.widget_class('TestAutocomplete', registry=registry)
self.assertEqual(widget.autocomplete.__name__, 'TestAutocomplete')
def test_init_without_registry(self):
widget = self.widget_class('FkModelAutocomplete')
self.assertEqual(widget.autocomplete.model, FkModel)
def test_widget_attrs(self):
widget = self.widget_class('FkModelAutocomplete',
widget_attrs={'data-widget-foo': 'bar', 'class':'foobar'})
html = widget.render('somewidget', None)
et = etree.fromstring(html)
self.assertEquals(et.attrib['data-widget-foo'], 'bar')
self.assertIn('foobar', et.attrib['class'])
self.assertIn('autocomplete-light-widget', et.attrib['class'])
def test_widget_js_attributes_deprecation(self):
with self.assertRaises(PendingDeprecationWarning) as context:
widget = self.widget_class(widget_js_attributes={'foo': 'bar'})
def test_autocomplete_js_attributes_deprecation(self):
with self.assertRaises(PendingDeprecationWarning) as context:
widget = self.widget_class(autocomplete_js_attributes={'foo': 'bar'})
@mock.patch('autocomplete_light.widgets.render_to_string')
def test_widget_template(self, render_to_string):
widget = self.widget_class('FkModelAutocomplete',
widget_template='foo.html')
widget.render('somewidget', None)
render_to_string.assert_called_with('foo.html', mock.ANY)
@mock.patch('autocomplete_light.widgets.render_to_string')
def test_autocomplete_widget_template(self, render_to_string):
class Autocomplete(autocomplete_light.AutocompleteListBase):
widget_template='bar.html'
choices = ['a', 'b']
widget = self.widget_class(Autocomplete)
widget.render('somewidget', [])
render_to_string.assert_called_with('bar.html', mock.ANY)
@mock.patch('autocomplete_light.widgets.render_to_string')
def test_base_context(self, render_to_string):
widget = self.widget_class('FkModelAutocomplete')
widget.render('somewidget', None)
render_to_string.assert_called_with(
'autocomplete_light/widget.html', {
'widget': widget,
'choices': mock.ANY,
'autocomplete': mock.ANY,
'attrs': mock.ANY,
'widget_attrs': mock.ANY,
'name': 'somewidget',
'values': [],
})
@mock.patch('autocomplete_light.widgets.render_to_string')
def test_extra_context(self, render_to_string):
widget = self.widget_class('FkModelAutocomplete',
extra_context={'foo': 'bar'})
widget.render('somewidget', None)
render_to_string.assert_called_with(
'autocomplete_light/widget.html', {
'widget': widget,
'choices': mock.ANY,
'autocomplete': mock.ANY,
'attrs': mock.ANY,
'widget_attrs': mock.ANY,
'name': 'somewidget',
'values': [],
'foo': 'bar',
})
def test_input_placeholder_attr(self):
widget = self.widget_class('FkModelAutocomplete',
attrs={'placeholder': 'foo'})
html = widget.render('somewidget', None)
et = etree.XML(html)
self.assertEqual(self.autocomplete_input(et).attrib['placeholder'],
'foo')
def test_widget_attrs(self):
widget = self.widget_class('FkModelAutocomplete',
widget_attrs={'class': 'foo'})
html = widget.render('somewidget', None)
et = etree.XML(html)
self.assertIn('foo', et.attrib['class'])
def test_lazy_autocomplete_init(self):
registry = autocomplete_light.AutocompleteRegistry()
try:
self.widget_class('LazyAutocomplete', registry=registry)
except autocomplete_light.AutocompleteNotRegistered:
self.fail('WidgetBase initialization should not trigger registry '
'access')
def test_lazy_autcomplete_access(self):
registry = autocomplete_light.AutocompleteRegistry()
widget = self.widget_class('LazyAutocomplete', registry=registry)
try:
widget.autocomplete
self.fail('Should raise AutocompleteNotRegistered on unregistered '
'LazyAutocomplete')
except autocomplete_light.AutocompleteNotRegistered:
pass
registry.register(LazyAutocomplete)
self.assertIn('LazyAutocomplete', registry.keys())
try:
widget.autocomplete
except autocomplete_light.AutocompleteNotRegistered:
self.fail('widget.autocomplete access should not raise '
'AutocompleteNotRegistered')
def test_value_out_of_queryset(self):
widget = self.widget_class('ItemAutocomplete')
html = widget.render('somewidget', [1, 2])
span = etree.fromstring(html)
choices = CSSSelector('[data-value]')(span)
self.assertEqual(len(choices), 1)
self.assertEqual(int(choices[0].attrib['data-value']), 1)
class ChoiceWidgetTestCase(WidgetBaseTestCase):
widget_class = autocomplete_light.ChoiceWidget
class MultipleChoiceWidgetTestCase(WidgetBaseTestCase):
widget_class = autocomplete_light.MultipleChoiceWidget
class TextWidgetTestCase(WidgetBaseTestCase):
widget_class = autocomplete_light.TextWidget
def autocomplete_input(self, et):
return et
def test_extra_context(self):
pass # no template for TextWidget
def test_widget_template(self):
pass # no template for TextWidget
def test_base_context(self):
pass # no template for TextWidget
def test_autocomplete_widget_template(self):
pass # no template for TextWidget
def test_widget_attrs(self):
pass # no widget_attrs for TextWidget
def test_value_out_of_queryset(self):
pass # no queryset for text widget
| 35.111675 | 81 | 0.656354 | from lxml.html import etree
from lxml.cssselect import CSSSelector
try:
from unittest import mock
except ImportError:
import mock
from django.test import TestCase
import autocomplete_light
from ..example_apps.basic.models import FkModel
from ..example_apps.security_test.models import Item
class LazyAutocomplete(autocomplete_light.AutocompleteModelBase):
pass
class WidgetBaseTestCase(TestCase):
widget_class = autocomplete_light.WidgetBase
fixtures = ['security_test.json']
def autocomplete_input(self, et):
return CSSSelector('input.autocomplete')(et)[0]
def test_init_with_registry(self):
registry = autocomplete_light.AutocompleteRegistry()
registry.register(FkModel, name='TestAutocomplete')
widget = self.widget_class('TestAutocomplete', registry=registry)
self.assertEqual(widget.autocomplete.__name__, 'TestAutocomplete')
def test_init_without_registry(self):
widget = self.widget_class('FkModelAutocomplete')
self.assertEqual(widget.autocomplete.model, FkModel)
def test_widget_attrs(self):
widget = self.widget_class('FkModelAutocomplete',
widget_attrs={'data-widget-foo': 'bar', 'class':'foobar'})
html = widget.render('somewidget', None)
et = etree.fromstring(html)
self.assertEquals(et.attrib['data-widget-foo'], 'bar')
self.assertIn('foobar', et.attrib['class'])
self.assertIn('autocomplete-light-widget', et.attrib['class'])
def test_widget_js_attributes_deprecation(self):
with self.assertRaises(PendingDeprecationWarning) as context:
widget = self.widget_class(widget_js_attributes={'foo': 'bar'})
def test_autocomplete_js_attributes_deprecation(self):
with self.assertRaises(PendingDeprecationWarning) as context:
widget = self.widget_class(autocomplete_js_attributes={'foo': 'bar'})
@mock.patch('autocomplete_light.widgets.render_to_string')
def test_widget_template(self, render_to_string):
widget = self.widget_class('FkModelAutocomplete',
widget_template='foo.html')
widget.render('somewidget', None)
render_to_string.assert_called_with('foo.html', mock.ANY)
@mock.patch('autocomplete_light.widgets.render_to_string')
def test_autocomplete_widget_template(self, render_to_string):
class Autocomplete(autocomplete_light.AutocompleteListBase):
widget_template='bar.html'
choices = ['a', 'b']
widget = self.widget_class(Autocomplete)
widget.render('somewidget', [])
render_to_string.assert_called_with('bar.html', mock.ANY)
@mock.patch('autocomplete_light.widgets.render_to_string')
def test_base_context(self, render_to_string):
widget = self.widget_class('FkModelAutocomplete')
widget.render('somewidget', None)
render_to_string.assert_called_with(
'autocomplete_light/widget.html', {
'widget': widget,
'choices': mock.ANY,
'autocomplete': mock.ANY,
'attrs': mock.ANY,
'widget_attrs': mock.ANY,
'name': 'somewidget',
'values': [],
})
@mock.patch('autocomplete_light.widgets.render_to_string')
def test_extra_context(self, render_to_string):
widget = self.widget_class('FkModelAutocomplete',
extra_context={'foo': 'bar'})
widget.render('somewidget', None)
render_to_string.assert_called_with(
'autocomplete_light/widget.html', {
'widget': widget,
'choices': mock.ANY,
'autocomplete': mock.ANY,
'attrs': mock.ANY,
'widget_attrs': mock.ANY,
'name': 'somewidget',
'values': [],
'foo': 'bar',
})
def test_input_placeholder_attr(self):
widget = self.widget_class('FkModelAutocomplete',
attrs={'placeholder': 'foo'})
html = widget.render('somewidget', None)
et = etree.XML(html)
self.assertEqual(self.autocomplete_input(et).attrib['placeholder'],
'foo')
def test_widget_attrs(self):
widget = self.widget_class('FkModelAutocomplete',
widget_attrs={'class': 'foo'})
html = widget.render('somewidget', None)
et = etree.XML(html)
self.assertIn('foo', et.attrib['class'])
def test_lazy_autocomplete_init(self):
registry = autocomplete_light.AutocompleteRegistry()
try:
self.widget_class('LazyAutocomplete', registry=registry)
except autocomplete_light.AutocompleteNotRegistered:
self.fail('WidgetBase initialization should not trigger registry '
'access')
def test_lazy_autcomplete_access(self):
registry = autocomplete_light.AutocompleteRegistry()
widget = self.widget_class('LazyAutocomplete', registry=registry)
try:
widget.autocomplete
self.fail('Should raise AutocompleteNotRegistered on unregistered '
'LazyAutocomplete')
except autocomplete_light.AutocompleteNotRegistered:
pass
registry.register(LazyAutocomplete)
self.assertIn('LazyAutocomplete', registry.keys())
try:
widget.autocomplete
except autocomplete_light.AutocompleteNotRegistered:
self.fail('widget.autocomplete access should not raise '
'AutocompleteNotRegistered')
def test_value_out_of_queryset(self):
widget = self.widget_class('ItemAutocomplete')
html = widget.render('somewidget', [1, 2])
span = etree.fromstring(html)
choices = CSSSelector('[data-value]')(span)
self.assertEqual(len(choices), 1)
self.assertEqual(int(choices[0].attrib['data-value']), 1)
class ChoiceWidgetTestCase(WidgetBaseTestCase):
widget_class = autocomplete_light.ChoiceWidget
class MultipleChoiceWidgetTestCase(WidgetBaseTestCase):
widget_class = autocomplete_light.MultipleChoiceWidget
class TextWidgetTestCase(WidgetBaseTestCase):
widget_class = autocomplete_light.TextWidget
def autocomplete_input(self, et):
return et
def test_extra_context(self):
pass
def test_widget_template(self):
pass
def test_base_context(self):
pass
def test_autocomplete_widget_template(self):
pass
def test_widget_attrs(self):
pass
def test_value_out_of_queryset(self):
pass
| true | true |
1c34391518004496e38d5b8a753fef4334f1ebab | 3,187 | py | Python | plato/processors/registry.py | NingxinSu/plato | 94c1c0d7d8b1a1b0ff7f6d9efcf1883f314d9668 | [
"Apache-2.0"
] | null | null | null | plato/processors/registry.py | NingxinSu/plato | 94c1c0d7d8b1a1b0ff7f6d9efcf1883f314d9668 | [
"Apache-2.0"
] | null | null | null | plato/processors/registry.py | NingxinSu/plato | 94c1c0d7d8b1a1b0ff7f6d9efcf1883f314d9668 | [
"Apache-2.0"
] | null | null | null | """
This registry for Processors contains framework-specific implementations of
Processors for data payloads.
Having a registry of all available classes is convenient for retrieving an instance
based on a configuration at run-time.
"""
import logging
from collections import OrderedDict
from typing import Tuple
from plato.config import Config
from plato.processors import pipeline
if not (hasattr(Config().trainer, 'use_tensorflow')
or hasattr(Config().trainer, 'use_mindspore')):
from plato.processors import (
base,
feature_randomized_response,
feature_gaussian,
feature_laplace,
feature_quantize,
feature_dequantize,
feature_unbatch,
inbound_feature_tensors,
outbound_feature_ndarrays,
model_deepcopy,
model_quantize,
model_dequantize,
model_randomized_response,
)
registered_processors = OrderedDict([
('base', base.Processor),
('feature_randomized_response', feature_randomized_response.Processor),
('feature_gaussian', feature_gaussian.Processor),
('feature_laplace', feature_laplace.Processor),
('feature_quantize', feature_quantize.Processor),
('feature_dequantize', feature_dequantize.Processor),
('feature_unbatch', feature_unbatch.Processor),
('inbound_feature_tensors', inbound_feature_tensors.Processor),
('outbound_feature_ndarrays', outbound_feature_ndarrays.Processor),
('model_deepcopy', model_deepcopy.Processor),
('model_quantize', model_quantize.Processor),
('model_dequantize', model_dequantize.Processor),
('model_randomized_response', model_randomized_response.Processor),
])
def get(user: str,
processor_kwargs={},
**kwargs) -> Tuple[pipeline.Processor, pipeline.Processor]:
""" Get an instance of the processor. """
outbound_processors = []
inbound_processors = []
assert user in ("Server", "Client")
if user == "Server":
config = Config().server
else:
config = Config().clients
if hasattr(config, 'outbound_processors') and isinstance(
config.outbound_processors, list):
outbound_processors = config.outbound_processors
if hasattr(config, 'inbound_processors') and isinstance(
config.inbound_processors, list):
inbound_processors = config.inbound_processors
for processor in outbound_processors:
logging.info("%s: Using Processor for sending payload: %s", user,
processor)
for processor in inbound_processors:
logging.info("%s: Using Processor for receiving payload: %s", user,
processor)
def map_f(name):
if name in processor_kwargs:
this_kwargs = {**kwargs, **(processor_kwargs[name])}
else:
this_kwargs = kwargs
return registered_processors[name](**this_kwargs)
outbound_processors = list(map(map_f, outbound_processors))
inbound_processors = list(map(map_f, inbound_processors))
return pipeline.Processor(outbound_processors), pipeline.Processor(
inbound_processors)
| 34.641304 | 83 | 0.688422 | import logging
from collections import OrderedDict
from typing import Tuple
from plato.config import Config
from plato.processors import pipeline
if not (hasattr(Config().trainer, 'use_tensorflow')
or hasattr(Config().trainer, 'use_mindspore')):
from plato.processors import (
base,
feature_randomized_response,
feature_gaussian,
feature_laplace,
feature_quantize,
feature_dequantize,
feature_unbatch,
inbound_feature_tensors,
outbound_feature_ndarrays,
model_deepcopy,
model_quantize,
model_dequantize,
model_randomized_response,
)
registered_processors = OrderedDict([
('base', base.Processor),
('feature_randomized_response', feature_randomized_response.Processor),
('feature_gaussian', feature_gaussian.Processor),
('feature_laplace', feature_laplace.Processor),
('feature_quantize', feature_quantize.Processor),
('feature_dequantize', feature_dequantize.Processor),
('feature_unbatch', feature_unbatch.Processor),
('inbound_feature_tensors', inbound_feature_tensors.Processor),
('outbound_feature_ndarrays', outbound_feature_ndarrays.Processor),
('model_deepcopy', model_deepcopy.Processor),
('model_quantize', model_quantize.Processor),
('model_dequantize', model_dequantize.Processor),
('model_randomized_response', model_randomized_response.Processor),
])
def get(user: str,
processor_kwargs={},
**kwargs) -> Tuple[pipeline.Processor, pipeline.Processor]:
outbound_processors = []
inbound_processors = []
assert user in ("Server", "Client")
if user == "Server":
config = Config().server
else:
config = Config().clients
if hasattr(config, 'outbound_processors') and isinstance(
config.outbound_processors, list):
outbound_processors = config.outbound_processors
if hasattr(config, 'inbound_processors') and isinstance(
config.inbound_processors, list):
inbound_processors = config.inbound_processors
for processor in outbound_processors:
logging.info("%s: Using Processor for sending payload: %s", user,
processor)
for processor in inbound_processors:
logging.info("%s: Using Processor for receiving payload: %s", user,
processor)
def map_f(name):
if name in processor_kwargs:
this_kwargs = {**kwargs, **(processor_kwargs[name])}
else:
this_kwargs = kwargs
return registered_processors[name](**this_kwargs)
outbound_processors = list(map(map_f, outbound_processors))
inbound_processors = list(map(map_f, inbound_processors))
return pipeline.Processor(outbound_processors), pipeline.Processor(
inbound_processors)
| true | true |
1c343b29fd70682caa31e6d22ef7e8dbd0c921c6 | 9,125 | py | Python | mcse/crystals/packing_factor.py | manny405/mcse | 419e4b8c144563ae0bf48982fc7ea26ce941a3eb | [
"Apache-2.0"
] | 5 | 2021-07-22T17:24:58.000Z | 2021-11-30T07:50:29.000Z | mcse/crystals/packing_factor.py | manny405/mcse | 419e4b8c144563ae0bf48982fc7ea26ce941a3eb | [
"Apache-2.0"
] | null | null | null | mcse/crystals/packing_factor.py | manny405/mcse | 419e4b8c144563ae0bf48982fc7ea26ce941a3eb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
from scipy.spatial.distance import cdist
from scipy.spatial import cKDTree
from ase.data import vdw_radii,atomic_numbers,covalent_radii
from ase.data.vdw_alvarez import vdw_radii as vdw_radii_alvarez
from mcse import Structure
from mcse import BaseDriver_
from mcse.crystals.supercell import SupercellSphere
all_radii = []
for idx,value in enumerate(vdw_radii):
if np.isnan(value):
## Approximate
value = covalent_radii[idx]+0.8
else:
if not np.isnan(vdw_radii_alvarez[idx]):
alvarez_value = vdw_radii_alvarez[idx]
value = np.min([value, alvarez_value])
all_radii.append(value)
all_radii = np.array(all_radii)
## 1.1 is more appropriate for molecular crystal structures, particularly
## when hydrogen bonds are present.
all_radii[1] = 1.1
all_radii_dict = {}
for key,value in atomic_numbers.items():
if value >= len(all_radii):
continue
all_radii_dict[key] = all_radii[value]
class PackingFactor(BaseDriver_):
"""
Calculatees the geometric packing factor using the vdW radii of the
structure and a user specified grid spacing. The algorithm is as follows:
1. Generate Supercell of user specified size. This is to ensure that all
of the necessary atoms are within the unit cell.
2. Keep only atoms that are within the unit cell plus a correction
equal to the largest van der Waals radius in the system.
3. Generate a grid using the specified grid spacing. This is done by
computing the grid spacing that should be used based on the lattice
vector norm in every direction. Then, this grid is generated in
fraction space and converted finally to real space.
5. For each location of the grid spacing, calculate how far it is from
each atom.
6. For the distance to each atom, divide by the vdW radius of the
respective atom.
7. All values less than 1 are occupied and all values greater than 1
are considered empty.
8. Divide filled by total to get the packing factor.
Arguments
---------
spacing: float
Spacing is given in Angstroms
vdw: iterable
Iterable that can be indexed where the index is equal to the atomic
number and the value at the index is the radius to use.
supercell_mult: int
Size of supercell to build in every lattice direction.
low_memory: bool
If True, an implementation that requires a smaller, fixed amount of
system memory is used at the expense of additional compute time. This
should be set to True if the user would like to use grid spacings
below 0.25 Angstrom.
"""
def __init__(self,
spacing=0.25,
vdw=all_radii,
supercell_mult=1,
low_memory=False):
self.spacing = spacing
self.vdw = vdw
self.supercell_mult = supercell_mult
self.supercell = SupercellSphere(mult=self.supercell_mult)
self.low_memory = low_memory
### Batch size not directly exposed user as argument to API cleaner.
### Change if you're confident.
self.batch_size = 25000
def calc_struct(self, struct):
self.struct = struct
self.supercell_struct = self.supercell.calc_struct(struct)
self.lat = np.vstack(self.struct.get_lattice_vectors())
self.norm = np.linalg.norm(self.lat,axis=-1)
self.linv = np.linalg.inv(self.lat)
self.unit_cell_struct = self.keep_inside(self.supercell_struct)
self.num = self.norm / self.spacing
self.grid_frac = self.generate_grid(self.num)
self.grid_cart = np.dot(self.grid_frac, self.lat)
if self.low_memory == False:
### Compute pairwise distances with unit cell modified structure
dist = cdist(self.grid_cart, self.unit_cell_struct.get_geo_array())
### Divide dist by vdW radius
ele = self.unit_cell_struct.elements
self.vdw_array = [self.vdw[atomic_numbers[x]] for x in ele]
self.vdw_array = np.array(self.vdw_array)[None,:]
dist = dist / self.vdw_array
self.min_dist = np.min(dist, axis=-1)
self.occupied_idx = np.where(self.min_dist < 1)[0]
## Exact definition of packing factor
packing_factor = np.where(self.min_dist < 1)[0].shape[0] / \
self.min_dist.shape[0]
else:
#### Certainly a batch size of 25000 should be possible for
#### reasonably size molecular cryestal structures on a modern
#### server.
#### In addition, this type of operation would be excellent for
#### GPU implementation.
ele = self.unit_cell_struct.elements
self.vdw_array = [self.vdw[atomic_numbers[x]] for x in ele]
self.vdw_array = np.array(self.vdw_array)[None,:]
total_occupied = 0
total_points = 0
total = len(self.grid_cart[::self.batch_size])
for idx,value in enumerate(self.grid_cart[::self.batch_size]):
print("{}: {}".format(total, idx))
start_idx = idx*self.batch_size
end_idx = (idx+1)*self.batch_size
if end_idx > len(self.grid_cart):
end_idx = len(self.grid_cart)
idx_values = np.arange(start_idx,end_idx,1)
temp_cart = self.grid_cart[idx_values]
temp_dist = cdist(temp_cart,
self.unit_cell_struct.get_geo_array())
### Divide dist by vdW radius
temp_dist = temp_dist / self.vdw_array
temp_min_dist = np.min(temp_dist, axis=-1)
temp_occupied_idx = np.where(temp_min_dist < 1)[0]
total_occupied += temp_occupied_idx.shape[0]
total_points += temp_cart.shape[0]
packing_factor = total_occupied / total_points
self.struct.properties["PackingFactor"] = packing_factor
return packing_factor
def keep_inside(self, struct):
"""
Keeps only the atoms that are inside the unit cell plus a factor equal
to the largest vdW radius.
"""
geo = struct.get_geo_array()
ele = struct.elements
vdw_list = [self.vdw[atomic_numbers[x]] for x in ele]
correction = np.max(vdw_list)
max_frac_correction = correction / np.min(self.norm)
max_frac_correction = max_frac_correction*2
frac = np.dot(geo,self.linv)
keep_idx = np.where(
np.logical_and(
(frac>=(0-max_frac_correction)).all(axis=-1),
(frac<=(1+max_frac_correction)).all(axis=-1)
))[0]
geo = geo[keep_idx]
ele = ele[keep_idx]
unit_cell_struct = Structure.from_geo(geo, ele)
unit_cell_struct.set_lattice_vectors(self.lat)
return unit_cell_struct
def move_atoms_in(self):
geo = self.struct.get_geo_array()
ele = self.struct.elements
frac = np.dot(self.linv,geo.T).T
## Fix values greater than 1
greater_idx = np.where(frac > 1)
subtract = frac[greater_idx].astype(int)
frac[greater_idx] -= subtract
## Fix values less than zero
less_idx = np.where(frac < 0)
add = -frac[less_idx].astype(int)+1
frac[less_idx] += add
cart = np.dot(frac, self.lat)
struct = Structure.from_geo(cart, ele)
struct.set_lattice_vectors(self.lat)
return struct
def generate_grid(self, num):
"""
Generates appropriate grid for the system.
Arguments
---------
num: Iterable
Iterable of length 3 that describes how many values should be
generated in each direction for the grid.
"""
## This should be adjusted so that it's exactly at the edge of the
## unit cell.
range_x = np.arange(0,num[0],1) / num[0]
range_y = np.arange(0,num[1],1) / num[1]
range_z = np.arange(0,num[2],1) / num[2]
grid_x,grid_y,grid_z = np.meshgrid(range_x,range_y,range_z,
indexing="ij")
grid = np.c_[grid_x.ravel(),
grid_y.ravel(),
grid_z.ravel()]
return grid
if __name__ == "__main__":
pass
##
| 36.067194 | 80 | 0.575342 |
import numpy as np
from scipy.spatial.distance import cdist
from scipy.spatial import cKDTree
from ase.data import vdw_radii,atomic_numbers,covalent_radii
from ase.data.vdw_alvarez import vdw_radii as vdw_radii_alvarez
from mcse import Structure
from mcse import BaseDriver_
from mcse.crystals.supercell import SupercellSphere
all_radii = []
for idx,value in enumerate(vdw_radii):
if np.isnan(value):
e = covalent_radii[idx]+0.8
else:
if not np.isnan(vdw_radii_alvarez[idx]):
alvarez_value = vdw_radii_alvarez[idx]
value = np.min([value, alvarez_value])
all_radii.append(value)
all_radii = np.array(all_radii)
adii):
continue
all_radii_dict[key] = all_radii[value]
class PackingFactor(BaseDriver_):
def __init__(self,
spacing=0.25,
vdw=all_radii,
supercell_mult=1,
low_memory=False):
self.spacing = spacing
self.vdw = vdw
self.supercell_mult = supercell_mult
self.supercell = SupercellSphere(mult=self.supercell_mult)
self.low_memory = low_memory
self.lat = np.vstack(self.struct.get_lattice_vectors())
self.norm = np.linalg.norm(self.lat,axis=-1)
self.linv = np.linalg.inv(self.lat)
self.unit_cell_struct = self.keep_inside(self.supercell_struct)
self.num = self.norm / self.spacing
self.grid_frac = self.generate_grid(self.num)
self.grid_cart = np.dot(self.grid_frac, self.lat)
if self.low_memory == False:
### Compute pairwise distances with unit cell modified structure
dist = cdist(self.grid_cart, self.unit_cell_struct.get_geo_array())
### Divide dist by vdW radius
ele = self.unit_cell_struct.elements
self.vdw_array = [self.vdw[atomic_numbers[x]] for x in ele]
self.vdw_array = np.array(self.vdw_array)[None,:]
dist = dist / self.vdw_array
self.min_dist = np.min(dist, axis=-1)
self.occupied_idx = np.where(self.min_dist < 1)[0]
## Exact definition of packing factor
packing_factor = np.where(self.min_dist < 1)[0].shape[0] / \
self.min_dist.shape[0]
else:
#### Certainly a batch size of 25000 should be possible for
#### reasonably size molecular cryestal structures on a modern
#### server.
#### In addition, this type of operation would be excellent for
#### GPU implementation.
ele = self.unit_cell_struct.elements
self.vdw_array = [self.vdw[atomic_numbers[x]] for x in ele]
self.vdw_array = np.array(self.vdw_array)[None,:]
total_occupied = 0
total_points = 0
total = len(self.grid_cart[::self.batch_size])
for idx,value in enumerate(self.grid_cart[::self.batch_size]):
print("{}: {}".format(total, idx))
start_idx = idx*self.batch_size
end_idx = (idx+1)*self.batch_size
if end_idx > len(self.grid_cart):
end_idx = len(self.grid_cart)
idx_values = np.arange(start_idx,end_idx,1)
temp_cart = self.grid_cart[idx_values]
temp_dist = cdist(temp_cart,
self.unit_cell_struct.get_geo_array())
### Divide dist by vdW radius
temp_dist = temp_dist / self.vdw_array
temp_min_dist = np.min(temp_dist, axis=-1)
temp_occupied_idx = np.where(temp_min_dist < 1)[0]
total_occupied += temp_occupied_idx.shape[0]
total_points += temp_cart.shape[0]
packing_factor = total_occupied / total_points
self.struct.properties["PackingFactor"] = packing_factor
return packing_factor
def keep_inside(self, struct):
geo = struct.get_geo_array()
ele = struct.elements
vdw_list = [self.vdw[atomic_numbers[x]] for x in ele]
correction = np.max(vdw_list)
max_frac_correction = correction / np.min(self.norm)
max_frac_correction = max_frac_correction*2
frac = np.dot(geo,self.linv)
keep_idx = np.where(
np.logical_and(
(frac>=(0-max_frac_correction)).all(axis=-1),
(frac<=(1+max_frac_correction)).all(axis=-1)
))[0]
geo = geo[keep_idx]
ele = ele[keep_idx]
unit_cell_struct = Structure.from_geo(geo, ele)
unit_cell_struct.set_lattice_vectors(self.lat)
return unit_cell_struct
def move_atoms_in(self):
geo = self.struct.get_geo_array()
ele = self.struct.elements
frac = np.dot(self.linv,geo.T).T
## Fix values greater than 1
greater_idx = np.where(frac > 1)
subtract = frac[greater_idx].astype(int)
frac[greater_idx] -= subtract
## Fix values less than zero
less_idx = np.where(frac < 0)
add = -frac[less_idx].astype(int)+1
frac[less_idx] += add
cart = np.dot(frac, self.lat)
struct = Structure.from_geo(cart, ele)
struct.set_lattice_vectors(self.lat)
return struct
def generate_grid(self, num):
## This should be adjusted so that it's exactly at the edge of the
ge_x = np.arange(0,num[0],1) / num[0]
range_y = np.arange(0,num[1],1) / num[1]
range_z = np.arange(0,num[2],1) / num[2]
grid_x,grid_y,grid_z = np.meshgrid(range_x,range_y,range_z,
indexing="ij")
grid = np.c_[grid_x.ravel(),
grid_y.ravel(),
grid_z.ravel()]
return grid
if __name__ == "__main__":
pass
| true | true |
1c343b4f04b36da4721ba9a17023c8fdf11baf55 | 306 | py | Python | sndg/users/apps.py | ezequieljsosa/sndg-web | 7763c8fbc83dc92abb9c53326e2fe227bcabf607 | [
"MIT"
] | null | null | null | sndg/users/apps.py | ezequieljsosa/sndg-web | 7763c8fbc83dc92abb9c53326e2fe227bcabf607 | [
"MIT"
] | null | null | null | sndg/users/apps.py | ezequieljsosa/sndg-web | 7763c8fbc83dc92abb9c53326e2fe227bcabf607 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "sndg.users"
verbose_name = _("Users")
def ready(self):
try:
import sndg.users.signals # noqa F401
except ImportError:
pass
| 21.857143 | 54 | 0.647059 | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "sndg.users"
verbose_name = _("Users")
def ready(self):
try:
import sndg.users.signals
except ImportError:
pass
| true | true |
1c343ba8a0535159ec3ae4576051ac4bf144647f | 263 | py | Python | geminidr/niri/recipes/ql/recipes_FLAT_IMAGE.py | DBerke/DRAGONS | cecf9a03970af95126bd17a227bd5214a5d6c64b | [
"BSD-3-Clause"
] | 19 | 2017-10-23T14:52:51.000Z | 2022-03-28T04:49:00.000Z | geminidr/niri/recipes/ql/recipes_FLAT_IMAGE.py | DBerke/DRAGONS | cecf9a03970af95126bd17a227bd5214a5d6c64b | [
"BSD-3-Clause"
] | 194 | 2017-11-01T17:32:45.000Z | 2022-03-31T21:32:59.000Z | geminidr/niri/recipes/ql/recipes_FLAT_IMAGE.py | DBerke/DRAGONS | cecf9a03970af95126bd17a227bd5214a5d6c64b | [
"BSD-3-Clause"
] | 16 | 2017-11-01T05:18:04.000Z | 2021-12-14T23:08:57.000Z | """
Recipes available to data with tags ['NIRI', 'CAL', 'IMAGE', 'FLAT'].
Default is "makeProcessedFlat".
"""
recipe_tags = {'NIRI', 'CAL', 'IMAGE', 'FLAT'}
from geminidr.niri.recipes.sq.recipes_FLAT_IMAGE import makeProcessedFlat
_default = makeProcessedFlat
| 23.909091 | 73 | 0.722433 | recipe_tags = {'NIRI', 'CAL', 'IMAGE', 'FLAT'}
from geminidr.niri.recipes.sq.recipes_FLAT_IMAGE import makeProcessedFlat
_default = makeProcessedFlat
| true | true |
1c343bc3d230830ecaf1ae3297bee7e19a1e30f8 | 3,258 | py | Python | examples/demo3.py | Freakwill/skinner | bcb036fc753addcd09655b7a775dbcdb1f99f1f6 | [
"MIT"
] | null | null | null | examples/demo3.py | Freakwill/skinner | bcb036fc753addcd09655b7a775dbcdb1f99f1f6 | [
"MIT"
] | null | null | null | examples/demo3.py | Freakwill/skinner | bcb036fc753addcd09655b7a775dbcdb1f99f1f6 | [
"MIT"
] | 2 | 2020-09-28T06:09:45.000Z | 2021-01-17T04:16:40.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import gym
gym.register(
id='GridWorld-v1',
entry_point='simple_grid:MyGridWorld3',
max_episode_steps=200,
reward_threshold=1000
)
from objects import Robot, NeuralRobot
class MyRobot(Robot):
init_power = 25
@property
def flag1(self):
return self.state[2]
@property
def flag2(self):
return self.state[3]
@property
def flag3(self):
return self.state[4]
@property
def power(self):
return self.state[5]
def _reset(self):
self.state = 1, self.env.n_rows, 0, 0, 0, self.init_power
def _next_state(self, state, action):
"""Transition function
Arguments:
state -- state before action
action -- the action selected by the agent
Returns:
new state
Raises:
Exception -- invalid action
"""
if action == 'e':
next_state = (state[0]+1, state[1], *state[2:])
elif action == 'w':
next_state = (state[0]-1, state[1], *state[2:])
elif action == 's':
next_state = (state[0], state[1]-1, *state[2:])
elif action == 'n':
next_state = (state[0], state[1]+1, *state[2:])
else:
raise Exception('invalid action!')
if self.env.collide(next_state[:2]):
next_state = state
if next_state[:2] == self.env.BUTTON1:
next_state = (next_state[0], next_state[1], 1, next_state[3], next_state[4], next_state[5])
elif next_state[:2] == self.env.BUTTON2:
next_state = (next_state[0], next_state[1], next_state[2], 1, next_state[4], next_state[5])
elif next_state[:2] == self.env.BUTTON3:
next_state = (*next_state[:4], 1, next_state[5])
elif next_state[:2] == self.env.CHARGER:
next_state = (*next_state[:5], 50)
return next_state
def _get_reward(self, state0, action, state1):
"""Reward function
called in step method
Arguments:
state0 -- state before action
action -- the action
state1 -- state after action
Returns:
number -- reward
"""
if state1[:2] in self.env.TRAPS:
r = -20
elif state1[:2] == self.env.DEATHTRAP1:
if state1[2]:
r = -1
else:
r = -30
elif state1[:2] == self.env.DEATHTRAP2:
if state1[3]:
r = -1
else:
r = -30
elif state1[:2] == self.env.DEATHTRAP3:
if state1[3]:
r = -1
else:
r = -30
elif state1[:2] == self.env.DEATHTRAP4:
r = -30
elif state1[:2] == self.env.GOLD:
r = 20
elif state0[:2] == state1[:2]:
r = -2
else:
r = -1
return r
if __name__ == '__main__':
env = gym.make('GridWorld-v1')
env.config('config3.yaml')
agent = MyRobot(alpha=0.7, gamma=0.9, epsilon=0.1)
env.add_agent(agent)
env.seed()
env.demo(n_epochs=200)
| 25.653543 | 103 | 0.503376 |
import gym
gym.register(
id='GridWorld-v1',
entry_point='simple_grid:MyGridWorld3',
max_episode_steps=200,
reward_threshold=1000
)
from objects import Robot, NeuralRobot
class MyRobot(Robot):
init_power = 25
@property
def flag1(self):
return self.state[2]
@property
def flag2(self):
return self.state[3]
@property
def flag3(self):
return self.state[4]
@property
def power(self):
return self.state[5]
def _reset(self):
self.state = 1, self.env.n_rows, 0, 0, 0, self.init_power
def _next_state(self, state, action):
if action == 'e':
next_state = (state[0]+1, state[1], *state[2:])
elif action == 'w':
next_state = (state[0]-1, state[1], *state[2:])
elif action == 's':
next_state = (state[0], state[1]-1, *state[2:])
elif action == 'n':
next_state = (state[0], state[1]+1, *state[2:])
else:
raise Exception('invalid action!')
if self.env.collide(next_state[:2]):
next_state = state
if next_state[:2] == self.env.BUTTON1:
next_state = (next_state[0], next_state[1], 1, next_state[3], next_state[4], next_state[5])
elif next_state[:2] == self.env.BUTTON2:
next_state = (next_state[0], next_state[1], next_state[2], 1, next_state[4], next_state[5])
elif next_state[:2] == self.env.BUTTON3:
next_state = (*next_state[:4], 1, next_state[5])
elif next_state[:2] == self.env.CHARGER:
next_state = (*next_state[:5], 50)
return next_state
def _get_reward(self, state0, action, state1):
if state1[:2] in self.env.TRAPS:
r = -20
elif state1[:2] == self.env.DEATHTRAP1:
if state1[2]:
r = -1
else:
r = -30
elif state1[:2] == self.env.DEATHTRAP2:
if state1[3]:
r = -1
else:
r = -30
elif state1[:2] == self.env.DEATHTRAP3:
if state1[3]:
r = -1
else:
r = -30
elif state1[:2] == self.env.DEATHTRAP4:
r = -30
elif state1[:2] == self.env.GOLD:
r = 20
elif state0[:2] == state1[:2]:
r = -2
else:
r = -1
return r
if __name__ == '__main__':
env = gym.make('GridWorld-v1')
env.config('config3.yaml')
agent = MyRobot(alpha=0.7, gamma=0.9, epsilon=0.1)
env.add_agent(agent)
env.seed()
env.demo(n_epochs=200)
| true | true |
1c343cb4a2aa73ac773b7d30298cff3da21bcd2e | 1,392 | py | Python | resources/errors.py | weis999/btfwest01 | faba7c53c1ff6e0eb3ffd36f33df39ab490ec2f6 | [
"MIT"
] | null | null | null | resources/errors.py | weis999/btfwest01 | faba7c53c1ff6e0eb3ffd36f33df39ab490ec2f6 | [
"MIT"
] | null | null | null | resources/errors.py | weis999/btfwest01 | faba7c53c1ff6e0eb3ffd36f33df39ab490ec2f6 | [
"MIT"
] | null | null | null | class InternalServerError(Exception):
pass
class SchemaValidationError(Exception):
pass
class FieldAlreadyExistsError(Exception):
pass
class UpdatingFieldError(Exception):
pass
class DeletingFieldError(Exception):
pass
class FieldNotExistsError(Exception):
pass
class EmailAlreadyExistsError(Exception):
pass
class UnauthorizedError(Exception):
pass
errors = {
"InternalServerError": {
"message": "Er is iets misgegaan.",
"status": 500
},
"SchemaValidationError": {
"message": "Request ontbreekt verplichte velden",
"status": 400
},
"FieldAlreadyExistsError": {
"message": "Field met naam bestaat al",
"status": 400
},
"UpdatingFieldError": {
"message": "Updating field welke toegevoegd is door andere is verboden",
"status": 403
},
"DeletingFieldError": {
"message": "Deleting field welke toegevoegd is door andere is verboden",
"status": 403
},
"FieldNotExistsError": {
"message": "Field met gegeven uuid bestaat niet",
"status": 400
},
"EmailAlreadyExistsError": {
"message": "User met het opgegeven email bestaat al",
"status": 400
},
"UnauthorizedError": {
"message": "Ongeldige gebruikersnaam of wachtwoord",
"status": 401
}
} | 24 | 81 | 0.619253 | class InternalServerError(Exception):
pass
class SchemaValidationError(Exception):
pass
class FieldAlreadyExistsError(Exception):
pass
class UpdatingFieldError(Exception):
pass
class DeletingFieldError(Exception):
pass
class FieldNotExistsError(Exception):
pass
class EmailAlreadyExistsError(Exception):
pass
class UnauthorizedError(Exception):
pass
errors = {
"InternalServerError": {
"message": "Er is iets misgegaan.",
"status": 500
},
"SchemaValidationError": {
"message": "Request ontbreekt verplichte velden",
"status": 400
},
"FieldAlreadyExistsError": {
"message": "Field met naam bestaat al",
"status": 400
},
"UpdatingFieldError": {
"message": "Updating field welke toegevoegd is door andere is verboden",
"status": 403
},
"DeletingFieldError": {
"message": "Deleting field welke toegevoegd is door andere is verboden",
"status": 403
},
"FieldNotExistsError": {
"message": "Field met gegeven uuid bestaat niet",
"status": 400
},
"EmailAlreadyExistsError": {
"message": "User met het opgegeven email bestaat al",
"status": 400
},
"UnauthorizedError": {
"message": "Ongeldige gebruikersnaam of wachtwoord",
"status": 401
}
} | true | true |
1c343ec8d7406166f1473bdd9c9d642b8f2b9fdb | 4,027 | py | Python | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/french/timeperiod_parser_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 10 | 2019-05-11T18:07:14.000Z | 2021-08-20T03:02:47.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/french/timeperiod_parser_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 1 | 2020-07-10T08:25:36.000Z | 2020-07-10T08:25:36.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/french/timeperiod_parser_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 18 | 2019-08-19T12:11:00.000Z | 2021-10-12T09:36:27.000Z | from typing import Pattern, Dict
from recognizers_text.utilities import RegExpUtility
from recognizers_text.extractor import Extractor
from recognizers_number.number.french.extractors import FrenchIntegerExtractor
from ...resources.french_date_time import FrenchDateTime
from ..extractors import DateTimeExtractor
from ..parsers import DateTimeParser
from ..base_configs import BaseDateParserConfiguration, DateTimeUtilityConfiguration
from ..base_timeperiod import TimePeriodParserConfiguration, MatchedTimeRegex
from ..constants import Constants
from ..utilities import TimexUtil
class FrenchTimePeriodParserConfiguration(TimePeriodParserConfiguration):
@property
def time_extractor(self) -> DateTimeExtractor:
return self._time_extractor
@property
def time_parser(self) -> DateTimeParser:
return self._time_parser
@property
def integer_extractor(self) -> Extractor:
return self._integer_extractor
@property
def pure_number_from_to_regex(self) -> Pattern:
return self._pure_number_from_to_regex
@property
def pure_number_between_and_regex(self) -> Pattern:
return self._pure_number_between_and_regex
@property
def time_of_day_regex(self) -> Pattern:
return self._time_of_day_regex
@property
def till_regex(self) -> Pattern:
return self._till_regex
@property
def numbers(self) -> Dict[str, int]:
return self._numbers
@property
def utility_configuration(self) -> DateTimeUtilityConfiguration:
return self._utility_configuration
def __init__(self, config: BaseDateParserConfiguration):
self._time_extractor = config.time_extractor
self._time_parser = config.time_parser
self._integer_extractor = config.integer_extractor
self._numbers = config.numbers
self._utility_configuration = config.utility_configuration
self._pure_number_from_to_regex = RegExpUtility.get_safe_reg_exp(
FrenchDateTime.PureNumFromTo)
self._pure_number_between_and_regex = RegExpUtility.get_safe_reg_exp(
FrenchDateTime.PureNumBetweenAnd)
self._time_of_day_regex = RegExpUtility.get_safe_reg_exp(
FrenchDateTime.TimeOfDayRegex)
self._till_regex = RegExpUtility.get_safe_reg_exp(
FrenchDateTime.TillRegex)
def get_matched_timex_range(self, source: str) -> MatchedTimeRegex:
trimmed_text = source.strip().lower()
if trimmed_text.endswith('s'):
trimmed_text = trimmed_text[:-1]
timex = ''
begin_hour = 0
end_hour = 0
end_min = 0
time_of_day = ""
if any(trimmed_text.endswith(o) for o in FrenchDateTime.MorningTermList):
time_of_day = Constants.Morning
elif any(trimmed_text.endswith(o) for o in FrenchDateTime.AfternoonTermList):
time_of_day = Constants.Afternoon
elif any(trimmed_text.endswith(o) for o in FrenchDateTime.EveningTermList):
time_of_day = Constants.Evening
elif source == FrenchDateTime.DaytimeTermList[0] or source.endswith(FrenchDateTime.DaytimeTermList[1]) \
or source.endswith(FrenchDateTime.DaytimeTermList[2]):
time_of_day = Constants.Daytime
elif any(trimmed_text.endswith(o) for o in FrenchDateTime.NightTermList):
time_of_day = Constants.Night
else:
return MatchedTimeRegex(
matched=False,
timex='',
begin_hour=0,
end_hour=0,
end_min=0
)
parse_result = TimexUtil.parse_time_of_day(time_of_day)
timex = parse_result.timex
begin_hour = parse_result.begin_hour
end_hour = parse_result.end_hour
end_min = parse_result.end_min
return MatchedTimeRegex(
matched=True,
timex=timex,
begin_hour=begin_hour,
end_hour=end_hour,
end_min=end_min
)
| 36.279279 | 112 | 0.6963 | from typing import Pattern, Dict
from recognizers_text.utilities import RegExpUtility
from recognizers_text.extractor import Extractor
from recognizers_number.number.french.extractors import FrenchIntegerExtractor
from ...resources.french_date_time import FrenchDateTime
from ..extractors import DateTimeExtractor
from ..parsers import DateTimeParser
from ..base_configs import BaseDateParserConfiguration, DateTimeUtilityConfiguration
from ..base_timeperiod import TimePeriodParserConfiguration, MatchedTimeRegex
from ..constants import Constants
from ..utilities import TimexUtil
class FrenchTimePeriodParserConfiguration(TimePeriodParserConfiguration):
@property
def time_extractor(self) -> DateTimeExtractor:
return self._time_extractor
@property
def time_parser(self) -> DateTimeParser:
return self._time_parser
@property
def integer_extractor(self) -> Extractor:
return self._integer_extractor
@property
def pure_number_from_to_regex(self) -> Pattern:
return self._pure_number_from_to_regex
@property
def pure_number_between_and_regex(self) -> Pattern:
return self._pure_number_between_and_regex
@property
def time_of_day_regex(self) -> Pattern:
return self._time_of_day_regex
@property
def till_regex(self) -> Pattern:
return self._till_regex
@property
def numbers(self) -> Dict[str, int]:
return self._numbers
@property
def utility_configuration(self) -> DateTimeUtilityConfiguration:
return self._utility_configuration
def __init__(self, config: BaseDateParserConfiguration):
self._time_extractor = config.time_extractor
self._time_parser = config.time_parser
self._integer_extractor = config.integer_extractor
self._numbers = config.numbers
self._utility_configuration = config.utility_configuration
self._pure_number_from_to_regex = RegExpUtility.get_safe_reg_exp(
FrenchDateTime.PureNumFromTo)
self._pure_number_between_and_regex = RegExpUtility.get_safe_reg_exp(
FrenchDateTime.PureNumBetweenAnd)
self._time_of_day_regex = RegExpUtility.get_safe_reg_exp(
FrenchDateTime.TimeOfDayRegex)
self._till_regex = RegExpUtility.get_safe_reg_exp(
FrenchDateTime.TillRegex)
def get_matched_timex_range(self, source: str) -> MatchedTimeRegex:
trimmed_text = source.strip().lower()
if trimmed_text.endswith('s'):
trimmed_text = trimmed_text[:-1]
timex = ''
begin_hour = 0
end_hour = 0
end_min = 0
time_of_day = ""
if any(trimmed_text.endswith(o) for o in FrenchDateTime.MorningTermList):
time_of_day = Constants.Morning
elif any(trimmed_text.endswith(o) for o in FrenchDateTime.AfternoonTermList):
time_of_day = Constants.Afternoon
elif any(trimmed_text.endswith(o) for o in FrenchDateTime.EveningTermList):
time_of_day = Constants.Evening
elif source == FrenchDateTime.DaytimeTermList[0] or source.endswith(FrenchDateTime.DaytimeTermList[1]) \
or source.endswith(FrenchDateTime.DaytimeTermList[2]):
time_of_day = Constants.Daytime
elif any(trimmed_text.endswith(o) for o in FrenchDateTime.NightTermList):
time_of_day = Constants.Night
else:
return MatchedTimeRegex(
matched=False,
timex='',
begin_hour=0,
end_hour=0,
end_min=0
)
parse_result = TimexUtil.parse_time_of_day(time_of_day)
timex = parse_result.timex
begin_hour = parse_result.begin_hour
end_hour = parse_result.end_hour
end_min = parse_result.end_min
return MatchedTimeRegex(
matched=True,
timex=timex,
begin_hour=begin_hour,
end_hour=end_hour,
end_min=end_min
)
| true | true |
1c343f0558d28063ad73e27bd3bc87fab9203ce6 | 1,016 | py | Python | main.py | angie1148/eciespy-demo | 021a20c827e354d858a6d3df376e657695b37f44 | [
"MIT"
] | 1 | 2020-06-28T11:42:24.000Z | 2020-06-28T11:42:24.000Z | main.py | angie1148/eciespy-demo | 021a20c827e354d858a6d3df376e657695b37f44 | [
"MIT"
] | null | null | null | main.py | angie1148/eciespy-demo | 021a20c827e354d858a6d3df376e657695b37f44 | [
"MIT"
] | 2 | 2020-06-28T11:54:29.000Z | 2021-09-13T04:38:50.000Z | from typing import Optional
from ecies import decrypt
from ecies import encrypt
from fastapi import FastAPI
from fastapi import Form
from fastapi import HTTPException
from fastapi.responses import Response
app = FastAPI()
def resp_string(msg):
return Response(content=msg, media_type="plain/text")
@app.post("/")
async def encrypt_decrypt(
prv: Optional[str] = Form(None),
pub: Optional[str] = Form(None),
data: str = Form(...),
):
if prv and data:
try:
decrypted = decrypt(prv, bytes.fromhex(data))
return resp_string(decrypted)
except ValueError:
raise HTTPException(status_code=400, detail="Invalid private key")
elif pub and data:
try:
encrypted = encrypt(pub, data.encode())
return resp_string(encrypted.hex())
except ValueError:
raise HTTPException(status_code=400, detail="Invalid public key")
else:
raise HTTPException(status_code=400, detail="Invalid request")
| 27.459459 | 78 | 0.669291 | from typing import Optional
from ecies import decrypt
from ecies import encrypt
from fastapi import FastAPI
from fastapi import Form
from fastapi import HTTPException
from fastapi.responses import Response
app = FastAPI()
def resp_string(msg):
return Response(content=msg, media_type="plain/text")
@app.post("/")
async def encrypt_decrypt(
prv: Optional[str] = Form(None),
pub: Optional[str] = Form(None),
data: str = Form(...),
):
if prv and data:
try:
decrypted = decrypt(prv, bytes.fromhex(data))
return resp_string(decrypted)
except ValueError:
raise HTTPException(status_code=400, detail="Invalid private key")
elif pub and data:
try:
encrypted = encrypt(pub, data.encode())
return resp_string(encrypted.hex())
except ValueError:
raise HTTPException(status_code=400, detail="Invalid public key")
else:
raise HTTPException(status_code=400, detail="Invalid request")
| true | true |
1c343f3fa0f6f5b05faa870617a0bb83679f92a3 | 2,207 | py | Python | src/openpersonen/contrib/stufbg/migrations/0004_auto_20210423_1630.py | maykinmedia/open-personen | ddcf083ccd4eb864c5305bcd8bc75c6c64108272 | [
"RSA-MD"
] | 2 | 2020-08-26T11:24:43.000Z | 2021-07-28T09:46:40.000Z | src/openpersonen/contrib/stufbg/migrations/0004_auto_20210423_1630.py | maykinmedia/open-personen | ddcf083ccd4eb864c5305bcd8bc75c6c64108272 | [
"RSA-MD"
] | 153 | 2020-08-26T10:45:35.000Z | 2021-12-10T17:33:16.000Z | src/openpersonen/contrib/stufbg/migrations/0004_auto_20210423_1630.py | maykinmedia/open-personen | ddcf083ccd4eb864c5305bcd8bc75c6c64108272 | [
"RSA-MD"
] | null | null | null | # Generated by Django 2.2.15 on 2021-04-23 14:30
from django.db import migrations, models
import privates.fields
import privates.storages
class Migration(migrations.Migration):
dependencies = [
("stufbg", "0003_auto_20201117_1144"),
]
operations = [
migrations.AlterField(
model_name="stufbgclient",
name="certificate",
field=privates.fields.PrivateMediaFileField(
blank=True,
help_text="The SSL certificate file used for client identification. If left empty, mutual TLS is disabled.",
null=True,
storage=privates.storages.PrivateMediaFileSystemStorage(),
upload_to="certificate/",
),
),
migrations.AlterField(
model_name="stufbgclient",
name="certificate_key",
field=privates.fields.PrivateMediaFileField(
blank=True,
help_text="The SSL certificate key file used for client identification. If left empty, mutual TLS is disabled.",
null=True,
storage=privates.storages.PrivateMediaFileSystemStorage(),
upload_to="certificate/",
),
),
migrations.AlterField(
model_name="stufbgclient",
name="password",
field=models.CharField(
blank=True,
help_text="Password to use in the XML security context.",
max_length=200,
verbose_name="password",
),
),
migrations.AlterField(
model_name="stufbgclient",
name="url",
field=models.URLField(
blank=True,
help_text="URL of the StUF-BG service to connect to.",
verbose_name="url",
),
),
migrations.AlterField(
model_name="stufbgclient",
name="user",
field=models.CharField(
blank=True,
help_text="Username to use in the XML security context.",
max_length=200,
verbose_name="user",
),
),
]
| 32.940299 | 128 | 0.541459 |
from django.db import migrations, models
import privates.fields
import privates.storages
class Migration(migrations.Migration):
dependencies = [
("stufbg", "0003_auto_20201117_1144"),
]
operations = [
migrations.AlterField(
model_name="stufbgclient",
name="certificate",
field=privates.fields.PrivateMediaFileField(
blank=True,
help_text="The SSL certificate file used for client identification. If left empty, mutual TLS is disabled.",
null=True,
storage=privates.storages.PrivateMediaFileSystemStorage(),
upload_to="certificate/",
),
),
migrations.AlterField(
model_name="stufbgclient",
name="certificate_key",
field=privates.fields.PrivateMediaFileField(
blank=True,
help_text="The SSL certificate key file used for client identification. If left empty, mutual TLS is disabled.",
null=True,
storage=privates.storages.PrivateMediaFileSystemStorage(),
upload_to="certificate/",
),
),
migrations.AlterField(
model_name="stufbgclient",
name="password",
field=models.CharField(
blank=True,
help_text="Password to use in the XML security context.",
max_length=200,
verbose_name="password",
),
),
migrations.AlterField(
model_name="stufbgclient",
name="url",
field=models.URLField(
blank=True,
help_text="URL of the StUF-BG service to connect to.",
verbose_name="url",
),
),
migrations.AlterField(
model_name="stufbgclient",
name="user",
field=models.CharField(
blank=True,
help_text="Username to use in the XML security context.",
max_length=200,
verbose_name="user",
),
),
]
| true | true |
1c343fc9ce308e57dd6611549a4cce4eadd6b1a8 | 569 | py | Python | examples/1_simple_script/school_demand.py | culebron/erde | 9bbaaa1df46629a182c355413a120aa33dc6b377 | [
"BSD-3-Clause"
] | 16 | 2021-08-24T05:59:04.000Z | 2021-11-16T12:30:34.000Z | examples/1_simple_script/school_demand.py | culebron/erde | 9bbaaa1df46629a182c355413a120aa33dc6b377 | [
"BSD-3-Clause"
] | null | null | null | examples/1_simple_script/school_demand.py | culebron/erde | 9bbaaa1df46629a182c355413a120aa33dc6b377 | [
"BSD-3-Clause"
] | 2 | 2021-08-30T10:27:13.000Z | 2021-08-31T09:46:49.000Z | from erde import read_df, write_df
from erde.op import sjoin, buffer
houses_df = read_df('houses.csv')
schools_df = read_df('schools.csv')
schools_df['school'] = schools_df.index.tolist()
school_buf = buffer.main(schools_df.geometry, 1000, default_crs=4326)
demand = sjoin.sagg(houses_df, schools_df, {'school': 'count'}, right_on=school_buf)
demand['apts_demand'] = demand.apartments / demand.school
write_df(demand, '/tmp/demand.csv')
result = sjoin.sagg(schools_df, demand, {'apts_demand': 'sum'}, left_on=school_buf)
write_df(result, '/tmp/school_demand.csv')
| 33.470588 | 84 | 0.757469 | from erde import read_df, write_df
from erde.op import sjoin, buffer
houses_df = read_df('houses.csv')
schools_df = read_df('schools.csv')
schools_df['school'] = schools_df.index.tolist()
school_buf = buffer.main(schools_df.geometry, 1000, default_crs=4326)
demand = sjoin.sagg(houses_df, schools_df, {'school': 'count'}, right_on=school_buf)
demand['apts_demand'] = demand.apartments / demand.school
write_df(demand, '/tmp/demand.csv')
result = sjoin.sagg(schools_df, demand, {'apts_demand': 'sum'}, left_on=school_buf)
write_df(result, '/tmp/school_demand.csv')
| true | true |
1c3441dc3d6dbd427623f915beacb0d55c67e152 | 6,491 | py | Python | wmb.py | cimini-tech/wmb | 94000cd2a0971e25d437879429759fc57704df8f | [
"MIT"
] | null | null | null | wmb.py | cimini-tech/wmb | 94000cd2a0971e25d437879429759fc57704df8f | [
"MIT"
] | null | null | null | wmb.py | cimini-tech/wmb | 94000cd2a0971e25d437879429759fc57704df8f | [
"MIT"
] | null | null | null | source_folder = "publish/"
destination_folder = "html/"
template_folder = "parts/"
aside_folder = "aside/"
page_header = "header.html"
page_footer = "footer.html"
index_header_file = "index_header.html"
html_ext = ".html"
import os
import re
from datetime import datetime
from shutil import move, copy2
from html.parser import HTMLParser
from pathlib import Path
from distutils import dir_util
def test_is_file(self):
try:
assert self.is_file()
except:
return
def _copy(self, target):
test_is_file(self)
copy2(str(self), str(target)) # str() only there for Python < (3, 6)
def _move(self, target):
test_is_file(self)
move(str(self), str(target))
Path.copy = _copy
Path.move = _move
def sanitize_filename(filename, article_title):
if article_title and article_title != filename :
new_article_title=re.sub(r'[^a-zA-Z0-9 ]','',article_title)
new_article_title=re.sub(r'\s+','-',new_article_title)
return (new_article_title).lower() + html_ext
return filename
def fix_file_name(post):
if post.filename != post.filename_sanitized:
post.source.move(post.source.parents[0] / post.filename_sanitized)
return Post(post.source.parents[0] / post.filename_sanitized)
return post
def get_date_stamps(*times):
return [timestamps for time in times for timestamps in (time.strftime("%m/%d/%Y %H:%M:%S"), time.strftime("%m/%d/%Y"))]
def insert_publish_time(post, now = datetime.now()):
if not post.metadata.published_time:
post.content.insert(
(post.metadata.article_title_index or -1) + 1,
"<p class=\"published-time\">Published on <time datetime=\"{0}\">{1}</time>".format(
*get_date_stamps(now)
)
)
post.source.write_text(str(post))
return Post(post.source)
return post
def insert_modified_time(post):
if post.metadata.published_time and post.last_modified.date() > post.metadata.published_time.date():
post.content[post.metadata.published_time_index] += " and last modified <time datetime=\"{0}\">{1}</time>".format(
*get_date_stamps(post.last_modified)
)
return post
class ArticleParser(HTMLParser):
article_title_index = None
article_title = None
published_time_index = None
published_time = None
category = None
open_tag = None
def parse_published_time(self, tag, attrs):
if (self.published_time is None
and tag == 'time'
and len(attrs) > 0
and len(attrs[0]) > 1
and attrs[0][0] == 'datetime'):
try:
self.published_time = datetime.strptime(attrs[0][1], "%m/%d/%Y %H:%M:%S")
except ValueError:
pass
def parse_category(self, tag, attrs):
if (self.category is None
and tag == "category"
and len(attrs) > 0
and len(attrs[0]) > 1
and attrs[0][0] == "type"):
self.category = attrs[0][1]
def handle_starttag(self, tag, attrs):
self.parse_published_time(tag, attrs)
self.parse_category(tag, attrs)
self.open_tag = tag
def handle_endtag(self, tag):
self.open_tag = None
def handle_data(self, data):
if self.article_title is None and self.open_tag == "h1":
self.article_title = data
def is_not_done(self):
return self.article_title_index is None or self.published_time_index is None or self.category is None
def parse(self, article, filename):
i = 0
#article = article.splitlines()
while self.is_not_done() and i < len(article):
self.feed(article[i])
if self.article_title_index is None and self.article_title:
self.article_title_index = i
if self.published_time_index is None and self.published_time:
self.published_time_index = i
i = i + 1
if not self.article_title:
self.article_title = filename
def __str__(self):
return "\n".join("%s: %s" % item for item in vars(self).items())
class Post:
def __init__(self, filepath):
self.filename = filepath.name
self.source = filepath
self.destination = Path() / destination_folder / self.filename
self.content = self.source.read_text().splitlines()
self.metadata = ArticleParser()
self.metadata.parse(self.content,self.filename)
self.filename_sanitized = sanitize_filename(self.filename,self.metadata.article_title)
self.last_modified = datetime.fromtimestamp(self.source.stat().st_mtime)
def __str__(self):
return "\n".join(self.content)
def get_icon_path(post):
if post.metadata.category:
return "<img src={0}.svg/>".format(post.metadata.category)
return ""
def get_html_index_list_item(post):
return ("<li>{3}<a href=\"{0}\">{1}</a>{2}".format(
post.filename,
post.metadata.article_title,
post.metadata.published_time.strftime(" – %b %-d"),
get_icon_path(post)))
def generate_html_index(posts):
html_index = ["<ul class=\"posts\">"]
[html_index.append(get_html_index_list_item(post)) for post in posts]
html_index.append("</ul>")
return "\n".join(html_index)
def compile(posts):
template_path = Path(template_folder)
destination_path = Path(destination_folder)
aside_path = Path(aside_folder)
header = (template_path / page_header).read_text()
footer = (template_path / page_footer).read_text()
index_header = (template_path / index_header_file).read_text()
index_html = header + index_header + generate_html_index(posts) + footer
[os.remove(file) for file in destination_path.glob("*") if file.is_file()]
[post.destination.write_text(header + str(post) + footer) for post in posts]
[(destination_path / file.name).write_text(header + (file.read_text()) + footer) for file in aside_path.glob("*")]
[file.copy(destination_path / file.name) for file in template_path.glob("*")]
dir_util.copy_tree((source_folder + "attachments/"),(destination_folder + "attachments/"),preserve_mode=0)
(destination_path / "index.html").write_text(index_html)
def get_posts():
posts = [
Post(file_path)
for file_path in Path(source_folder).glob("*")
if file_path.is_file()
]
posts = [insert_modified_time(insert_publish_time(fix_file_name(post))) for post in posts]
posts.sort(key=lambda x: x.metadata.published_time, reverse=True)
return posts
compile(get_posts())
| 34.343915 | 123 | 0.665075 | source_folder = "publish/"
destination_folder = "html/"
template_folder = "parts/"
aside_folder = "aside/"
page_header = "header.html"
page_footer = "footer.html"
index_header_file = "index_header.html"
html_ext = ".html"
import os
import re
from datetime import datetime
from shutil import move, copy2
from html.parser import HTMLParser
from pathlib import Path
from distutils import dir_util
def test_is_file(self):
try:
assert self.is_file()
except:
return
def _copy(self, target):
test_is_file(self)
copy2(str(self), str(target))
def _move(self, target):
test_is_file(self)
move(str(self), str(target))
Path.copy = _copy
Path.move = _move
def sanitize_filename(filename, article_title):
if article_title and article_title != filename :
new_article_title=re.sub(r'[^a-zA-Z0-9 ]','',article_title)
new_article_title=re.sub(r'\s+','-',new_article_title)
return (new_article_title).lower() + html_ext
return filename
def fix_file_name(post):
if post.filename != post.filename_sanitized:
post.source.move(post.source.parents[0] / post.filename_sanitized)
return Post(post.source.parents[0] / post.filename_sanitized)
return post
def get_date_stamps(*times):
return [timestamps for time in times for timestamps in (time.strftime("%m/%d/%Y %H:%M:%S"), time.strftime("%m/%d/%Y"))]
def insert_publish_time(post, now = datetime.now()):
if not post.metadata.published_time:
post.content.insert(
(post.metadata.article_title_index or -1) + 1,
"<p class=\"published-time\">Published on <time datetime=\"{0}\">{1}</time>".format(
*get_date_stamps(now)
)
)
post.source.write_text(str(post))
return Post(post.source)
return post
def insert_modified_time(post):
if post.metadata.published_time and post.last_modified.date() > post.metadata.published_time.date():
post.content[post.metadata.published_time_index] += " and last modified <time datetime=\"{0}\">{1}</time>".format(
*get_date_stamps(post.last_modified)
)
return post
class ArticleParser(HTMLParser):
article_title_index = None
article_title = None
published_time_index = None
published_time = None
category = None
open_tag = None
def parse_published_time(self, tag, attrs):
if (self.published_time is None
and tag == 'time'
and len(attrs) > 0
and len(attrs[0]) > 1
and attrs[0][0] == 'datetime'):
try:
self.published_time = datetime.strptime(attrs[0][1], "%m/%d/%Y %H:%M:%S")
except ValueError:
pass
def parse_category(self, tag, attrs):
if (self.category is None
and tag == "category"
and len(attrs) > 0
and len(attrs[0]) > 1
and attrs[0][0] == "type"):
self.category = attrs[0][1]
def handle_starttag(self, tag, attrs):
self.parse_published_time(tag, attrs)
self.parse_category(tag, attrs)
self.open_tag = tag
def handle_endtag(self, tag):
self.open_tag = None
def handle_data(self, data):
if self.article_title is None and self.open_tag == "h1":
self.article_title = data
def is_not_done(self):
return self.article_title_index is None or self.published_time_index is None or self.category is None
def parse(self, article, filename):
i = 0
while self.is_not_done() and i < len(article):
self.feed(article[i])
if self.article_title_index is None and self.article_title:
self.article_title_index = i
if self.published_time_index is None and self.published_time:
self.published_time_index = i
i = i + 1
if not self.article_title:
self.article_title = filename
def __str__(self):
return "\n".join("%s: %s" % item for item in vars(self).items())
class Post:
def __init__(self, filepath):
self.filename = filepath.name
self.source = filepath
self.destination = Path() / destination_folder / self.filename
self.content = self.source.read_text().splitlines()
self.metadata = ArticleParser()
self.metadata.parse(self.content,self.filename)
self.filename_sanitized = sanitize_filename(self.filename,self.metadata.article_title)
self.last_modified = datetime.fromtimestamp(self.source.stat().st_mtime)
def __str__(self):
return "\n".join(self.content)
def get_icon_path(post):
if post.metadata.category:
return "<img src={0}.svg/>".format(post.metadata.category)
return ""
def get_html_index_list_item(post):
return ("<li>{3}<a href=\"{0}\">{1}</a>{2}".format(
post.filename,
post.metadata.article_title,
post.metadata.published_time.strftime(" – %b %-d"),
get_icon_path(post)))
def generate_html_index(posts):
html_index = ["<ul class=\"posts\">"]
[html_index.append(get_html_index_list_item(post)) for post in posts]
html_index.append("</ul>")
return "\n".join(html_index)
def compile(posts):
template_path = Path(template_folder)
destination_path = Path(destination_folder)
aside_path = Path(aside_folder)
header = (template_path / page_header).read_text()
footer = (template_path / page_footer).read_text()
index_header = (template_path / index_header_file).read_text()
index_html = header + index_header + generate_html_index(posts) + footer
[os.remove(file) for file in destination_path.glob("*") if file.is_file()]
[post.destination.write_text(header + str(post) + footer) for post in posts]
[(destination_path / file.name).write_text(header + (file.read_text()) + footer) for file in aside_path.glob("*")]
[file.copy(destination_path / file.name) for file in template_path.glob("*")]
dir_util.copy_tree((source_folder + "attachments/"),(destination_folder + "attachments/"),preserve_mode=0)
(destination_path / "index.html").write_text(index_html)
def get_posts():
posts = [
Post(file_path)
for file_path in Path(source_folder).glob("*")
if file_path.is_file()
]
posts = [insert_modified_time(insert_publish_time(fix_file_name(post))) for post in posts]
posts.sort(key=lambda x: x.metadata.published_time, reverse=True)
return posts
compile(get_posts())
| true | true |
1c344437850de447a32479ca19b9ab57140a6b25 | 4,303 | py | Python | src/scout_apm/core/context.py | tony/scout_apm_python | f477b09b1ef6e644980130d4d44954f27570ada2 | [
"MIT"
] | 60 | 2018-04-15T04:09:39.000Z | 2022-03-29T12:10:40.000Z | src/scout_apm/core/context.py | tony/scout_apm_python | f477b09b1ef6e644980130d4d44954f27570ada2 | [
"MIT"
] | 326 | 2018-03-28T16:09:13.000Z | 2022-03-03T13:50:23.000Z | src/scout_apm/core/context.py | tony/scout_apm_python | f477b09b1ef6e644980130d4d44954f27570ada2 | [
"MIT"
] | 25 | 2018-05-30T17:59:46.000Z | 2022-02-24T19:40:02.000Z | # coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import threading
import time
from threading import local as ThreadLocal
from scout_apm.core.tracked_request import TrackedRequest
try:
from asgiref.local import Local as AsgiRefLocal
except ImportError:
# Old versions of Python or asgiref < 3.1
AsgiRefLocal = None
try:
import asyncio
except ImportError:
asyncio = None
try:
from contextvars import ContextVar
scout_context_var = ContextVar("__scout_trackedrequest")
except ImportError:
scout_context_var = None
SCOUT_REQUEST_ATTR = "__scout_trackedrequest"
def get_current_asyncio_task():
"""
Cross-version implementation of asyncio.current_task()
Returns None if there is no task.
"""
if asyncio:
try:
if hasattr(asyncio, "current_task"):
# Python 3.7 and up
return asyncio.current_task()
else:
# Python 3.6
return asyncio.Task.current_task()
except RuntimeError:
return None
class SimplifiedAsgirefLocal:
"""
A copy of asgiref 3.1+'s Local class without the sync_to_async /
async_to_sync compatibility.
"""
CLEANUP_INTERVAL = 60 # seconds
def __init__(self):
self._storage = {}
self._last_cleanup = time.time()
self._clean_lock = threading.Lock()
def _get_context_id(self):
"""
Get the ID we should use for looking up variables
"""
# First, pull the current task if we can
context_id = get_current_asyncio_task()
# OK, let's try for a thread ID
if context_id is None:
context_id = threading.current_thread()
return context_id
def _cleanup(self):
"""
Cleans up any references to dead threads or tasks
"""
for key in list(self._storage.keys()):
if isinstance(key, threading.Thread):
if not key.is_alive():
del self._storage[key]
elif isinstance(key, asyncio.Task):
if key.done():
del self._storage[key]
self._last_cleanup = time.time()
def _maybe_cleanup(self):
"""
Cleans up if enough time has passed
"""
if time.time() - self._last_cleanup > self.CLEANUP_INTERVAL:
with self._clean_lock:
self._cleanup()
def __getattr__(self, key):
context_id = self._get_context_id()
if key in self._storage.get(context_id, {}):
return self._storage[context_id][key]
else:
raise AttributeError("%r object has no attribute %r" % (self, key))
def __setattr__(self, key, value):
if key in ("_storage", "_last_cleanup", "_clean_lock", "_thread_critical"):
return super().__setattr__(key, value)
self._maybe_cleanup()
self._storage.setdefault(self._get_context_id(), {})[key] = value
def __delattr__(self, key):
context_id = self._get_context_id()
if key in self._storage.get(context_id, {}):
del self._storage[context_id][key]
else:
raise AttributeError("%r object has no attribute %r" % (self, key))
class LocalContext(object):
def __init__(self):
if AsgiRefLocal is not None:
self._local = AsgiRefLocal()
elif asyncio is not None:
self._local = SimplifiedAsgirefLocal()
else:
self._local = ThreadLocal()
self.use_context_var = scout_context_var is not None
def get_tracked_request(self):
if scout_context_var:
if not scout_context_var.get(None):
scout_context_var.set(TrackedRequest())
return scout_context_var.get()
if not hasattr(self._local, "tracked_request"):
self._local.tracked_request = TrackedRequest()
return self._local.tracked_request
def clear_tracked_request(self, instance):
if getattr(self._local, "tracked_request", None) is instance:
del self._local.tracked_request
if scout_context_var and scout_context_var.get(None) is instance:
scout_context_var.set(None)
context = LocalContext()
| 30.302817 | 83 | 0.62654 |
from __future__ import absolute_import, division, print_function, unicode_literals
import threading
import time
from threading import local as ThreadLocal
from scout_apm.core.tracked_request import TrackedRequest
try:
from asgiref.local import Local as AsgiRefLocal
except ImportError:
AsgiRefLocal = None
try:
import asyncio
except ImportError:
asyncio = None
try:
from contextvars import ContextVar
scout_context_var = ContextVar("__scout_trackedrequest")
except ImportError:
scout_context_var = None
SCOUT_REQUEST_ATTR = "__scout_trackedrequest"
def get_current_asyncio_task():
if asyncio:
try:
if hasattr(asyncio, "current_task"):
return asyncio.current_task()
else:
return asyncio.Task.current_task()
except RuntimeError:
return None
class SimplifiedAsgirefLocal:
CLEANUP_INTERVAL = 60
def __init__(self):
self._storage = {}
self._last_cleanup = time.time()
self._clean_lock = threading.Lock()
def _get_context_id(self):
context_id = get_current_asyncio_task()
if context_id is None:
context_id = threading.current_thread()
return context_id
def _cleanup(self):
for key in list(self._storage.keys()):
if isinstance(key, threading.Thread):
if not key.is_alive():
del self._storage[key]
elif isinstance(key, asyncio.Task):
if key.done():
del self._storage[key]
self._last_cleanup = time.time()
def _maybe_cleanup(self):
if time.time() - self._last_cleanup > self.CLEANUP_INTERVAL:
with self._clean_lock:
self._cleanup()
def __getattr__(self, key):
context_id = self._get_context_id()
if key in self._storage.get(context_id, {}):
return self._storage[context_id][key]
else:
raise AttributeError("%r object has no attribute %r" % (self, key))
def __setattr__(self, key, value):
if key in ("_storage", "_last_cleanup", "_clean_lock", "_thread_critical"):
return super().__setattr__(key, value)
self._maybe_cleanup()
self._storage.setdefault(self._get_context_id(), {})[key] = value
def __delattr__(self, key):
context_id = self._get_context_id()
if key in self._storage.get(context_id, {}):
del self._storage[context_id][key]
else:
raise AttributeError("%r object has no attribute %r" % (self, key))
class LocalContext(object):
def __init__(self):
if AsgiRefLocal is not None:
self._local = AsgiRefLocal()
elif asyncio is not None:
self._local = SimplifiedAsgirefLocal()
else:
self._local = ThreadLocal()
self.use_context_var = scout_context_var is not None
def get_tracked_request(self):
if scout_context_var:
if not scout_context_var.get(None):
scout_context_var.set(TrackedRequest())
return scout_context_var.get()
if not hasattr(self._local, "tracked_request"):
self._local.tracked_request = TrackedRequest()
return self._local.tracked_request
def clear_tracked_request(self, instance):
if getattr(self._local, "tracked_request", None) is instance:
del self._local.tracked_request
if scout_context_var and scout_context_var.get(None) is instance:
scout_context_var.set(None)
context = LocalContext()
| true | true |
1c3444983478f7ee55cc4aec280838b76375b3dc | 363 | py | Python | cv-api/app/cam/ICamera.py | TheLongRunSmoke/birdfeeder | 6f238c9b8c8abdc866aaf042f79b674714fdaa8c | [
"MIT"
] | null | null | null | cv-api/app/cam/ICamera.py | TheLongRunSmoke/birdfeeder | 6f238c9b8c8abdc866aaf042f79b674714fdaa8c | [
"MIT"
] | 1 | 2017-11-28T04:26:45.000Z | 2017-11-28T04:57:47.000Z | cv-api/app/cam/ICamera.py | TheLongRunSmoke/birdfeeder | 6f238c9b8c8abdc866aaf042f79b674714fdaa8c | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
class ICamera:
__metaclass__ = ABCMeta
@abstractmethod
def retrieve(self):
"""Return next frame from camera."""
@abstractmethod
def auto_exposure(self, frame):
"""Calculate and set exposure."""
@abstractmethod
def get_exposure(self):
"""Return current exposure."""
| 20.166667 | 44 | 0.650138 | from abc import ABCMeta, abstractmethod
class ICamera:
__metaclass__ = ABCMeta
@abstractmethod
def retrieve(self):
@abstractmethod
def auto_exposure(self, frame):
@abstractmethod
def get_exposure(self):
| true | true |
1c3444a9a1f2938fa8cc9e71aba3bb3f78c33aba | 1,353 | py | Python | medium/31_next_permutation.py | czla/leetcode-solution | bee4bc1588b270ca580199d23ab83c939b7e17b8 | [
"MIT"
] | 3 | 2019-05-01T08:23:37.000Z | 2019-08-03T01:35:28.000Z | medium/31_next_permutation.py | czla/leetcode-solution | bee4bc1588b270ca580199d23ab83c939b7e17b8 | [
"MIT"
] | null | null | null | medium/31_next_permutation.py | czla/leetcode-solution | bee4bc1588b270ca580199d23ab83c939b7e17b8 | [
"MIT"
] | null | null | null | # Description: 实现获取下一个排列的函数,算法需要将给定数字序列重新排列成字典序中下一个更大的排列。
#
# 如果不存在下一个更大的排列,则将数字重新排列成最小的排列(即升序排列)。
#
# 必须原地修改,只允许使用额外常数空间。
#
#
#
# Examples: 输入: [1,2,3] 输出:[1,3,2]
# 输入: [3,2,1] 输出:[1,2,3]
# 输入: [1,1,5] 输出:[1,5,1]
#
# Difficulty: Medium
# Author: zlchen
# Date: 8/5/2019
# Performance: 72 ms, surpass 28.78%'s python3 submissions
class Solution:
def nextPermutation(self, nums) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
length = len(nums)
if length < 2:
return
# find the first pair, where n[index-1] <= n[index]
index = -1
while (nums[index] <= nums[index - 1]):
index -= 1
if index == -length: # can not find pair, already ordered
nums.reverse()
return
tmp = nums[index - 1] # value to be swapped
index1 = index - 1 # index to be swapped
# find another index to be swapped, whose value is bigger than tmp and closest to tmp
while nums[index] > tmp and index < 0:
index += 1
# swap
nums[index1], nums[index - 1] = nums[index - 1], nums[index1]
nums[index1 + 1:] = nums[:index1:-1] # reverse the value of n[index+1:]
| 29.413043 | 93 | 0.524021 |
class Solution:
def nextPermutation(self, nums) -> None:
length = len(nums)
if length < 2:
return
# find the first pair, where n[index-1] <= n[index]
index = -1
while (nums[index] <= nums[index - 1]):
index -= 1
if index == -length: # can not find pair, already ordered
nums.reverse()
return
tmp = nums[index - 1] # value to be swapped
index1 = index - 1 # index to be swapped
# find another index to be swapped, whose value is bigger than tmp and closest to tmp
while nums[index] > tmp and index < 0:
index += 1
# swap
nums[index1], nums[index - 1] = nums[index - 1], nums[index1]
nums[index1 + 1:] = nums[:index1:-1] # reverse the value of n[index+1:]
| true | true |
1c3444df9ee7708900b28d2f1be7d4b3cbd23113 | 604 | py | Python | 01_Python by Example Learning to Program in 150 Challenges by Nichola Lacey/07_chapter seven Random/problem54.py | Magdyedwar1996/python-level-one-codes | 066086672f43488bc8b32c620b5e2f94cedfe3da | [
"MIT"
] | 1 | 2021-11-16T14:14:38.000Z | 2021-11-16T14:14:38.000Z | 01_Python by Example Learning to Program in 150 Challenges by Nichola Lacey/07_chapter seven Random/problem54.py | Magdyedwar1996/python-level-one-codes | 066086672f43488bc8b32c620b5e2f94cedfe3da | [
"MIT"
] | null | null | null | 01_Python by Example Learning to Program in 150 Challenges by Nichola Lacey/07_chapter seven Random/problem54.py | Magdyedwar1996/python-level-one-codes | 066086672f43488bc8b32c620b5e2f94cedfe3da | [
"MIT"
] | null | null | null | """
054
Randomly choose either heads or tails (“h” or “t”). Ask
the user to make their choice. If their choice is the same
as the randomly selected value, display the message
“You win”, otherwise display “Bad luck”. At the end, tell
the user if the computer selected heads or tails.
"""
import random
heads_or_tails = ["h","t"]
while 1 :
computer_choice = random.choice(heads_or_tails)
user_choice = input("Choose either head or tail : ")
if computer_choice == user_choice:
print("You win ")
else :
print("Bad luck ")
print("the computer chose ",computer_choice+"\n") | 31.789474 | 58 | 0.690397 | import random
heads_or_tails = ["h","t"]
while 1 :
computer_choice = random.choice(heads_or_tails)
user_choice = input("Choose either head or tail : ")
if computer_choice == user_choice:
print("You win ")
else :
print("Bad luck ")
print("the computer chose ",computer_choice+"\n") | true | true |
1c34455d3c27bc9b9d5d46870aab6fd77a15f7e4 | 13,744 | py | Python | reframe/core/runtime.py | Lumi-supercomputer/reframe | f1e46807663db0b4f7e6b1252c4fcda6fbcc3270 | [
"BSD-3-Clause"
] | null | null | null | reframe/core/runtime.py | Lumi-supercomputer/reframe | f1e46807663db0b4f7e6b1252c4fcda6fbcc3270 | [
"BSD-3-Clause"
] | 3 | 2022-03-11T09:51:33.000Z | 2022-03-31T08:20:19.000Z | reframe/core/runtime.py | Lumi-supercomputer/reframe | f1e46807663db0b4f7e6b1252c4fcda6fbcc3270 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Handling of the current host context
#
import os
import functools
from datetime import datetime
import reframe.core.config as config
import reframe.utility.osext as osext
from reframe.core.environments import (Environment, snapshot)
from reframe.core.exceptions import ReframeFatalError
from reframe.core.logging import getlogger
from reframe.core.systems import System
class RuntimeContext:
'''The runtime context of the framework.
There is a single instance of this class globally in the framework.
.. versionadded:: 2.13
'''
def __init__(self, site_config):
self._site_config = site_config
self._system = System.create(site_config)
self._current_run = 0
self._timestamp = datetime.now()
def _makedir(self, *dirs, wipeout=False):
ret = os.path.join(*dirs)
if wipeout:
osext.rmtree(ret, ignore_errors=True)
os.makedirs(ret, exist_ok=True)
return ret
def _format_dirs(self, *dirs):
if not self.get_option('general/0/clean_stagedir'):
# If stagedir is to be reused, no new stage directories will be
# used for retries
return dirs
try:
last = dirs[-1]
except IndexError:
return dirs
current_run = runtime().current_run
if current_run == 0:
return dirs
last += '_retry%s' % current_run
return (*dirs[:-1], last)
def next_run(self):
self._current_run += 1
@property
def current_run(self):
return self._current_run
@property
def site_config(self):
return self._site_config
@property
def system(self):
'''The current host system.
:type: :class:`reframe.core.systems.System`
'''
return self._system
@property
def prefix(self):
return osext.expandvars(
self.site_config.get('systems/0/prefix')
)
@property
def stagedir(self):
return osext.expandvars(
self.site_config.get('systems/0/stagedir')
)
@property
def outputdir(self):
return osext.expandvars(
self.site_config.get('systems/0/outputdir')
)
@property
def perflogdir(self):
# Find the first filelog handler
handlers = self.site_config.get('logging/0/handlers_perflog')
for i, h in enumerate(handlers):
if h['type'] == 'filelog':
break
return osext.expandvars(
self.site_config.get(f'logging/0/handlers_perflog/{i}/basedir')
)
@property
def timestamp(self):
timefmt = self.site_config.get('general/0/timestamp_dirs')
return self._timestamp.strftime(timefmt)
@property
def output_prefix(self):
'''The output directory prefix.
:type: :class:`str`
'''
if self.outputdir:
ret = os.path.join(self.outputdir, self.timestamp)
else:
ret = os.path.join(self.prefix, 'output', self.timestamp)
return os.path.abspath(ret)
@property
def stage_prefix(self):
'''The stage directory prefix.
:type: :class:`str`
'''
if self.stagedir:
ret = os.path.join(self.stagedir, self.timestamp)
else:
ret = os.path.join(self.prefix, 'stage', self.timestamp)
return os.path.abspath(ret)
def make_stagedir(self, *dirs):
wipeout = self.get_option('general/0/clean_stagedir')
ret = self._makedir(self.stage_prefix,
*self._format_dirs(*dirs), wipeout=wipeout)
getlogger().debug(
f'Created stage directory {ret!r} [clean_stagedir: {wipeout}]'
)
return ret
def make_outputdir(self, *dirs):
ret = self._makedir(self.output_prefix,
*self._format_dirs(*dirs), wipeout=True)
getlogger().debug(f'Created output directory {ret!r}')
return ret
@property
def modules_system(self):
'''The environment modules system used in the current host.
:type: :class:`reframe.core.modules.ModulesSystem`.
'''
return self._system.modules_system
def get_option(self, option, default=None):
'''Get a configuration option.
:arg option: The option to be retrieved.
:arg default: The value to return if ``option`` cannot be retrieved.
:returns: The value of the option.
.. versionchanged:: 3.11.0
Add ``default`` named argument.
'''
return self._site_config.get(option, default=default)
# Global resources for the current host
_runtime_context = None
def init_runtime(site_config):
global _runtime_context
if _runtime_context is None:
_runtime_context = RuntimeContext(site_config)
def runtime():
'''Get the runtime context of the framework.
.. versionadded:: 2.13
:returns: A :class:`reframe.core.runtime.RuntimeContext` object.
'''
if _runtime_context is None:
raise ReframeFatalError('no runtime context is configured')
return _runtime_context
def loadenv(*environs):
'''Load environments in the current Python context.
:arg environs: A list of environments to load.
:type environs: List[Environment]
:returns: A tuple containing snapshot of the current environment upon
entry to this function and a list of shell commands required to load
the environments.
:rtype: Tuple[_EnvironmentSnapshot, List[str]]
'''
def _load_cmds_tracked(**module):
commands = []
load_seq = modules_system.load_module(**module, force=True)
for m, conflicted in load_seq:
for c in conflicted:
commands += modules_system.emit_unload_commands(c)
commands += modules_system.emit_load_commands(
m, module.get('collection', False), module.get('path', None)
)
return commands
modules_system = runtime().modules_system
env_snapshot = snapshot()
commands = []
for env in environs:
for mod in env.modules_detailed:
if runtime().get_option('general/0/resolve_module_conflicts'):
commands += _load_cmds_tracked(**mod)
else:
commands += modules_system.emit_load_commands(**mod)
for k, v in env.variables.items():
os.environ[k] = osext.expandvars(v)
commands.append(f'export {k}={v}')
return env_snapshot, commands
def emit_loadenv_commands(*environs):
env_snapshot = snapshot()
try:
_, commands = loadenv(*environs)
finally:
env_snapshot.restore()
return commands
def is_env_loaded(environ):
'''Check if environment is loaded.
:arg environ: Environment to check for.
:type environ: Environment
:returns: :class:`True` if this environment is loaded, :class:`False`
otherwise.
'''
is_module_loaded = runtime().modules_system.is_module_loaded
return (all(map(is_module_loaded, environ.modules)) and
all(os.environ.get(k, None) == osext.expandvars(v)
for k, v in environ.variables.items()))
def _is_valid_part(part, valid_systems):
for spec in valid_systems:
if spec[0] not in ('+', '-', '%'):
# This is the standard case
sysname, partname = part.fullname.split(':')
valid_matches = ['*', '*:*', sysname, f'{sysname}:*',
f'*:{partname}', f'{part.fullname}']
if spec in valid_matches:
return True
else:
plus_feats = []
minus_feats = []
props = {}
for subspec in spec.split(' '):
if subspec.startswith('+'):
plus_feats.append(subspec[1:])
elif subspec.startswith('-'):
minus_feats.append(subspec[1:])
elif subspec.startswith('%'):
key, val = subspec[1:].split('=')
props[key] = val
have_plus_feats = all(
ft in part.features or ft in part.resources
for ft in plus_feats
)
have_minus_feats = any(
ft in part.features or ft in part.resources
for ft in minus_feats
)
try:
have_props = True
for k, v in props.items():
extra_value = part.extras[k]
extra_type = type(extra_value)
if extra_value != extra_type(v):
have_props = False
break
except (KeyError, ValueError):
have_props = False
if have_plus_feats and not have_minus_feats and have_props:
return True
return False
def _is_valid_env(env, valid_prog_environs):
if '*' in valid_prog_environs:
return True
for spec in valid_prog_environs:
if spec[0] not in ('+', '-', '%'):
# This is the standard case
if env.name == spec:
return True
else:
plus_feats = []
minus_feats = []
props = {}
for subspec in spec.split(' '):
if subspec.startswith('+'):
plus_feats.append(subspec[1:])
elif subspec.startswith('-'):
minus_feats.append(subspec[1:])
elif subspec.startswith('%'):
key, val = subspec[1:].split('=')
props[key] = val
have_plus_feats = all(ft in env.features for ft in plus_feats)
have_minus_feats = any(ft in env.features
for ft in minus_feats)
try:
have_props = True
for k, v in props.items():
extra_value = env.extras[k]
extra_type = type(extra_value)
if extra_value != extra_type(v):
have_props = False
break
except (KeyError, ValueError):
have_props = False
if have_plus_feats and not have_minus_feats and have_props:
return True
return False
def valid_sysenv_comb(valid_systems, valid_prog_environs,
check_systems=True, check_environs=True):
ret = {}
curr_sys = runtime().system
for part in curr_sys.partitions:
if check_systems and not _is_valid_part(part, valid_systems):
continue
ret[part] = []
for env in part.environs:
if check_environs and not _is_valid_env(env, valid_prog_environs):
continue
ret[part].append(env)
return ret
class temp_environment:
'''Context manager to temporarily change the environment.'''
def __init__(self, modules=[], variables=[]):
self._modules = modules
self._variables = variables
def __enter__(self):
new_env = Environment('_rfm_temp_env', self._modules, self._variables)
self._environ_save, _ = loadenv(new_env)
return new_env
def __exit__(self, exc_type, exc_value, traceback):
self._environ_save.restore()
class temp_config:
'''Context manager to temporarily switch to specific configuration.'''
def __init__(self, system):
self.__to = system
self.__from = runtime().system.name
def __enter__(self):
runtime().site_config.select_subconfig(self.__to)
def __exit__(self, exc_type, exc_value, traceback):
runtime().site_config.select_subconfig(self.__from)
# The following utilities are useful only for the unit tests
class temp_runtime:
'''Context manager to temporarily switch to another runtime.
:meta private:
'''
def __init__(self, config_file, sysname=None, options=None):
global _runtime_context
options = options or {}
self._runtime_save = _runtime_context
if config_file is None:
_runtime_context = None
else:
site_config = config.load_config(config_file)
site_config.select_subconfig(sysname, ignore_resolve_errors=True)
for opt, value in options.items():
site_config.add_sticky_option(opt, value)
_runtime_context = RuntimeContext(site_config)
def __enter__(self):
return _runtime_context
def __exit__(self, exc_type, exc_value, traceback):
global _runtime_context
_runtime_context = self._runtime_save
def switch_runtime(config_file, sysname=None, options=None):
'''Function decorator for temporarily changing the runtime for a
function.
:meta private:
'''
def _runtime_deco(fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
with temp_runtime(config_file, sysname, options):
ret = fn(*args, **kwargs)
return ret
return _fn
return _runtime_deco
class module_use:
'''Context manager for temporarily modifying the module path.'''
def __init__(self, *paths):
self._paths = paths
def __enter__(self):
runtime().modules_system.searchpath_add(*self._paths)
return self
def __exit__(self, exc_type, exc_value, traceback):
runtime().modules_system.searchpath_remove(*self._paths)
| 29.304904 | 78 | 0.596333 |
import os
import functools
from datetime import datetime
import reframe.core.config as config
import reframe.utility.osext as osext
from reframe.core.environments import (Environment, snapshot)
from reframe.core.exceptions import ReframeFatalError
from reframe.core.logging import getlogger
from reframe.core.systems import System
class RuntimeContext:
def __init__(self, site_config):
self._site_config = site_config
self._system = System.create(site_config)
self._current_run = 0
self._timestamp = datetime.now()
def _makedir(self, *dirs, wipeout=False):
ret = os.path.join(*dirs)
if wipeout:
osext.rmtree(ret, ignore_errors=True)
os.makedirs(ret, exist_ok=True)
return ret
def _format_dirs(self, *dirs):
if not self.get_option('general/0/clean_stagedir'):
return dirs
try:
last = dirs[-1]
except IndexError:
return dirs
current_run = runtime().current_run
if current_run == 0:
return dirs
last += '_retry%s' % current_run
return (*dirs[:-1], last)
def next_run(self):
self._current_run += 1
@property
def current_run(self):
return self._current_run
@property
def site_config(self):
return self._site_config
@property
def system(self):
return self._system
@property
def prefix(self):
return osext.expandvars(
self.site_config.get('systems/0/prefix')
)
@property
def stagedir(self):
return osext.expandvars(
self.site_config.get('systems/0/stagedir')
)
@property
def outputdir(self):
return osext.expandvars(
self.site_config.get('systems/0/outputdir')
)
@property
def perflogdir(self):
handlers = self.site_config.get('logging/0/handlers_perflog')
for i, h in enumerate(handlers):
if h['type'] == 'filelog':
break
return osext.expandvars(
self.site_config.get(f'logging/0/handlers_perflog/{i}/basedir')
)
@property
def timestamp(self):
timefmt = self.site_config.get('general/0/timestamp_dirs')
return self._timestamp.strftime(timefmt)
@property
def output_prefix(self):
if self.outputdir:
ret = os.path.join(self.outputdir, self.timestamp)
else:
ret = os.path.join(self.prefix, 'output', self.timestamp)
return os.path.abspath(ret)
@property
def stage_prefix(self):
if self.stagedir:
ret = os.path.join(self.stagedir, self.timestamp)
else:
ret = os.path.join(self.prefix, 'stage', self.timestamp)
return os.path.abspath(ret)
def make_stagedir(self, *dirs):
wipeout = self.get_option('general/0/clean_stagedir')
ret = self._makedir(self.stage_prefix,
*self._format_dirs(*dirs), wipeout=wipeout)
getlogger().debug(
f'Created stage directory {ret!r} [clean_stagedir: {wipeout}]'
)
return ret
def make_outputdir(self, *dirs):
ret = self._makedir(self.output_prefix,
*self._format_dirs(*dirs), wipeout=True)
getlogger().debug(f'Created output directory {ret!r}')
return ret
@property
def modules_system(self):
return self._system.modules_system
def get_option(self, option, default=None):
return self._site_config.get(option, default=default)
_runtime_context = None
def init_runtime(site_config):
global _runtime_context
if _runtime_context is None:
_runtime_context = RuntimeContext(site_config)
def runtime():
if _runtime_context is None:
raise ReframeFatalError('no runtime context is configured')
return _runtime_context
def loadenv(*environs):
def _load_cmds_tracked(**module):
commands = []
load_seq = modules_system.load_module(**module, force=True)
for m, conflicted in load_seq:
for c in conflicted:
commands += modules_system.emit_unload_commands(c)
commands += modules_system.emit_load_commands(
m, module.get('collection', False), module.get('path', None)
)
return commands
modules_system = runtime().modules_system
env_snapshot = snapshot()
commands = []
for env in environs:
for mod in env.modules_detailed:
if runtime().get_option('general/0/resolve_module_conflicts'):
commands += _load_cmds_tracked(**mod)
else:
commands += modules_system.emit_load_commands(**mod)
for k, v in env.variables.items():
os.environ[k] = osext.expandvars(v)
commands.append(f'export {k}={v}')
return env_snapshot, commands
def emit_loadenv_commands(*environs):
env_snapshot = snapshot()
try:
_, commands = loadenv(*environs)
finally:
env_snapshot.restore()
return commands
def is_env_loaded(environ):
is_module_loaded = runtime().modules_system.is_module_loaded
return (all(map(is_module_loaded, environ.modules)) and
all(os.environ.get(k, None) == osext.expandvars(v)
for k, v in environ.variables.items()))
def _is_valid_part(part, valid_systems):
for spec in valid_systems:
if spec[0] not in ('+', '-', '%'):
sysname, partname = part.fullname.split(':')
valid_matches = ['*', '*:*', sysname, f'{sysname}:*',
f'*:{partname}', f'{part.fullname}']
if spec in valid_matches:
return True
else:
plus_feats = []
minus_feats = []
props = {}
for subspec in spec.split(' '):
if subspec.startswith('+'):
plus_feats.append(subspec[1:])
elif subspec.startswith('-'):
minus_feats.append(subspec[1:])
elif subspec.startswith('%'):
key, val = subspec[1:].split('=')
props[key] = val
have_plus_feats = all(
ft in part.features or ft in part.resources
for ft in plus_feats
)
have_minus_feats = any(
ft in part.features or ft in part.resources
for ft in minus_feats
)
try:
have_props = True
for k, v in props.items():
extra_value = part.extras[k]
extra_type = type(extra_value)
if extra_value != extra_type(v):
have_props = False
break
except (KeyError, ValueError):
have_props = False
if have_plus_feats and not have_minus_feats and have_props:
return True
return False
def _is_valid_env(env, valid_prog_environs):
if '*' in valid_prog_environs:
return True
for spec in valid_prog_environs:
if spec[0] not in ('+', '-', '%'):
if env.name == spec:
return True
else:
plus_feats = []
minus_feats = []
props = {}
for subspec in spec.split(' '):
if subspec.startswith('+'):
plus_feats.append(subspec[1:])
elif subspec.startswith('-'):
minus_feats.append(subspec[1:])
elif subspec.startswith('%'):
key, val = subspec[1:].split('=')
props[key] = val
have_plus_feats = all(ft in env.features for ft in plus_feats)
have_minus_feats = any(ft in env.features
for ft in minus_feats)
try:
have_props = True
for k, v in props.items():
extra_value = env.extras[k]
extra_type = type(extra_value)
if extra_value != extra_type(v):
have_props = False
break
except (KeyError, ValueError):
have_props = False
if have_plus_feats and not have_minus_feats and have_props:
return True
return False
def valid_sysenv_comb(valid_systems, valid_prog_environs,
check_systems=True, check_environs=True):
ret = {}
curr_sys = runtime().system
for part in curr_sys.partitions:
if check_systems and not _is_valid_part(part, valid_systems):
continue
ret[part] = []
for env in part.environs:
if check_environs and not _is_valid_env(env, valid_prog_environs):
continue
ret[part].append(env)
return ret
class temp_environment:
def __init__(self, modules=[], variables=[]):
self._modules = modules
self._variables = variables
def __enter__(self):
new_env = Environment('_rfm_temp_env', self._modules, self._variables)
self._environ_save, _ = loadenv(new_env)
return new_env
def __exit__(self, exc_type, exc_value, traceback):
self._environ_save.restore()
class temp_config:
def __init__(self, system):
self.__to = system
self.__from = runtime().system.name
def __enter__(self):
runtime().site_config.select_subconfig(self.__to)
def __exit__(self, exc_type, exc_value, traceback):
runtime().site_config.select_subconfig(self.__from)
class temp_runtime:
def __init__(self, config_file, sysname=None, options=None):
global _runtime_context
options = options or {}
self._runtime_save = _runtime_context
if config_file is None:
_runtime_context = None
else:
site_config = config.load_config(config_file)
site_config.select_subconfig(sysname, ignore_resolve_errors=True)
for opt, value in options.items():
site_config.add_sticky_option(opt, value)
_runtime_context = RuntimeContext(site_config)
def __enter__(self):
return _runtime_context
def __exit__(self, exc_type, exc_value, traceback):
global _runtime_context
_runtime_context = self._runtime_save
def switch_runtime(config_file, sysname=None, options=None):
def _runtime_deco(fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
with temp_runtime(config_file, sysname, options):
ret = fn(*args, **kwargs)
return ret
return _fn
return _runtime_deco
class module_use:
def __init__(self, *paths):
self._paths = paths
def __enter__(self):
runtime().modules_system.searchpath_add(*self._paths)
return self
def __exit__(self, exc_type, exc_value, traceback):
runtime().modules_system.searchpath_remove(*self._paths)
| true | true |
1c3445a2e6767a3749a48932c05f9b500fc7368f | 256 | py | Python | tomolab/ScannerGeometries/__init__.py | TomographyLab/TomoLab | 86b9a5894ef1660d7f4de39f560f1f92024b40f8 | [
"Apache-2.0"
] | 5 | 2019-06-01T13:16:00.000Z | 2022-03-02T10:21:59.000Z | tomolab/ScannerGeometries/__init__.py | TomographyLab/TomoLab | 86b9a5894ef1660d7f4de39f560f1f92024b40f8 | [
"Apache-2.0"
] | null | null | null | tomolab/ScannerGeometries/__init__.py | TomographyLab/TomoLab | 86b9a5894ef1660d7f4de39f560f1f92024b40f8 | [
"Apache-2.0"
] | 1 | 2019-06-01T13:19:18.000Z | 2019-06-01T13:19:18.000Z | # -*- coding: utf-8 -*-
# tomolab
# Michele Scipioni
# Harvard University, Martinos Center for Biomedical Imaging
# University of Pisa
__all__ = ['Siemens_Biograph_mMR', 'GE_Discovery_RX']
from . import Siemens_Biograph_mMR
from . import GE_Discovery_RX
| 23.272727 | 60 | 0.765625 |
__all__ = ['Siemens_Biograph_mMR', 'GE_Discovery_RX']
from . import Siemens_Biograph_mMR
from . import GE_Discovery_RX
| true | true |
1c34466bdecf6638f5d8bace2d07c85114ab8cec | 459 | py | Python | articleapp/models.py | wkd-woo/RecommendMovie | ae5507f6466c417e48ff4769a7968082c014da11 | [
"MIT"
] | 2 | 2021-05-05T08:36:37.000Z | 2021-05-09T13:01:32.000Z | articleapp/models.py | wkd-woo/RecommendMovie | ae5507f6466c417e48ff4769a7968082c014da11 | [
"MIT"
] | null | null | null | articleapp/models.py | wkd-woo/RecommendMovie | ae5507f6466c417e48ff4769a7968082c014da11 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Article(models.Model):
writer = models.ForeignKey(User, on_delete=models.SET_NULL, related_name='article', null=True)
title = models.CharField(max_length=200, null=True)
image = models.ImageField(upload_to='article/', null=False)
content = models.TextField(null=True)
created_at = models.DateField(auto_created=True, null=True)
| 30.6 | 98 | 0.753813 | from django.contrib.auth.models import User
from django.db import models
class Article(models.Model):
writer = models.ForeignKey(User, on_delete=models.SET_NULL, related_name='article', null=True)
title = models.CharField(max_length=200, null=True)
image = models.ImageField(upload_to='article/', null=False)
content = models.TextField(null=True)
created_at = models.DateField(auto_created=True, null=True)
| true | true |
1c344692d6a25c29742187b7cf37f00b9ad1881f | 5,594 | py | Python | awx/main/models/activity_stream.py | withshubh/awx | 38f3176221fe6981f38931d050705b736ea89fdc | [
"Apache-2.0"
] | 2 | 2021-03-18T11:08:15.000Z | 2021-03-19T09:20:27.000Z | awx/main/models/activity_stream.py | withshubh/awx | 38f3176221fe6981f38931d050705b736ea89fdc | [
"Apache-2.0"
] | 24 | 2021-04-01T08:33:08.000Z | 2022-03-01T21:13:06.000Z | awx/main/models/activity_stream.py | it-baschtler/awx | 8ba9eef97b4f9ab707f31538874b37e8d1a5b525 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Tower
from awx.api.versioning import reverse
from awx.main.fields import JSONField
from awx.main.models.base import accepts_json
# Django
from django.db import models
from django.conf import settings
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
__all__ = ['ActivityStream']
class ActivityStream(models.Model):
'''
Model used to describe activity stream (audit) events
'''
class Meta:
app_label = 'main'
ordering = ('pk',)
OPERATION_CHOICES = [
('create', _('Entity Created')),
('update', _("Entity Updated")),
('delete', _("Entity Deleted")),
('associate', _("Entity Associated with another Entity")),
('disassociate', _("Entity was Disassociated with another Entity"))
]
actor = models.ForeignKey('auth.User', null=True, on_delete=models.SET_NULL, related_name='activity_stream')
operation = models.CharField(max_length=13, choices=OPERATION_CHOICES)
timestamp = models.DateTimeField(auto_now_add=True)
changes = accepts_json(models.TextField(blank=True))
deleted_actor = JSONField(null=True)
action_node = models.CharField(
blank=True,
default='',
editable=False,
max_length=512,
help_text=_("The cluster node the activity took place on."),
)
object_relationship_type = models.TextField(blank=True)
object1 = models.TextField()
object2 = models.TextField()
user = models.ManyToManyField("auth.User", blank=True)
organization = models.ManyToManyField("Organization", blank=True)
inventory = models.ManyToManyField("Inventory", blank=True)
host = models.ManyToManyField("Host", blank=True)
group = models.ManyToManyField("Group", blank=True)
inventory_source = models.ManyToManyField("InventorySource", blank=True)
inventory_update = models.ManyToManyField("InventoryUpdate", blank=True)
credential = models.ManyToManyField("Credential", blank=True)
credential_type = models.ManyToManyField("CredentialType", blank=True)
team = models.ManyToManyField("Team", blank=True)
project = models.ManyToManyField("Project", blank=True)
project_update = models.ManyToManyField("ProjectUpdate", blank=True)
execution_environment = models.ManyToManyField("ExecutionEnvironment", blank=True)
job_template = models.ManyToManyField("JobTemplate", blank=True)
job = models.ManyToManyField("Job", blank=True)
workflow_job_template_node = models.ManyToManyField("WorkflowJobTemplateNode", blank=True)
workflow_job_node = models.ManyToManyField("WorkflowJobNode", blank=True)
workflow_job_template = models.ManyToManyField("WorkflowJobTemplate", blank=True)
workflow_job = models.ManyToManyField("WorkflowJob", blank=True)
workflow_approval_template = models.ManyToManyField("WorkflowApprovalTemplate", blank=True)
workflow_approval = models.ManyToManyField("WorkflowApproval", blank=True)
unified_job_template = models.ManyToManyField("UnifiedJobTemplate", blank=True, related_name='activity_stream_as_unified_job_template+')
unified_job = models.ManyToManyField("UnifiedJob", blank=True, related_name='activity_stream_as_unified_job+')
ad_hoc_command = models.ManyToManyField("AdHocCommand", blank=True)
schedule = models.ManyToManyField("Schedule", blank=True)
custom_inventory_script = models.ManyToManyField("CustomInventoryScript", blank=True)
execution_environment = models.ManyToManyField("ExecutionEnvironment", blank=True)
notification_template = models.ManyToManyField("NotificationTemplate", blank=True)
notification = models.ManyToManyField("Notification", blank=True)
label = models.ManyToManyField("Label", blank=True)
role = models.ManyToManyField("Role", blank=True)
instance = models.ManyToManyField("Instance", blank=True)
instance_group = models.ManyToManyField("InstanceGroup", blank=True)
o_auth2_application = models.ManyToManyField("OAuth2Application", blank=True)
o_auth2_access_token = models.ManyToManyField("OAuth2AccessToken", blank=True)
setting = JSONField(blank=True)
def __str__(self):
operation = self.operation if 'operation' in self.__dict__ else '_delayed_'
if 'timestamp' in self.__dict__:
if self.timestamp:
timestamp = self.timestamp.isoformat()
else:
timestamp = self.timestamp
else:
timestamp = '_delayed_'
return u'%s-%s-pk=%s' % (operation, timestamp, self.pk)
def get_absolute_url(self, request=None):
return reverse('api:activity_stream_detail', kwargs={'pk': self.pk}, request=request)
def save(self, *args, **kwargs):
# Store denormalized actor metadata so that we retain it for accounting
# purposes when the User row is deleted.
if self.actor:
self.deleted_actor = {
'id': self.actor_id,
'username': smart_str(self.actor.username),
'first_name': smart_str(self.actor.first_name),
'last_name': smart_str(self.actor.last_name),
}
if 'update_fields' in kwargs and 'deleted_actor' not in kwargs['update_fields']:
kwargs['update_fields'].append('deleted_actor')
hostname_char_limit = self._meta.get_field('action_node').max_length
self.action_node = settings.CLUSTER_HOST_ID[:hostname_char_limit]
super(ActivityStream, self).save(*args, **kwargs)
| 45.479675 | 140 | 0.712907 |
from awx.api.versioning import reverse
from awx.main.fields import JSONField
from awx.main.models.base import accepts_json
from django.db import models
from django.conf import settings
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
__all__ = ['ActivityStream']
class ActivityStream(models.Model):
class Meta:
app_label = 'main'
ordering = ('pk',)
OPERATION_CHOICES = [
('create', _('Entity Created')),
('update', _("Entity Updated")),
('delete', _("Entity Deleted")),
('associate', _("Entity Associated with another Entity")),
('disassociate', _("Entity was Disassociated with another Entity"))
]
actor = models.ForeignKey('auth.User', null=True, on_delete=models.SET_NULL, related_name='activity_stream')
operation = models.CharField(max_length=13, choices=OPERATION_CHOICES)
timestamp = models.DateTimeField(auto_now_add=True)
changes = accepts_json(models.TextField(blank=True))
deleted_actor = JSONField(null=True)
action_node = models.CharField(
blank=True,
default='',
editable=False,
max_length=512,
help_text=_("The cluster node the activity took place on."),
)
object_relationship_type = models.TextField(blank=True)
object1 = models.TextField()
object2 = models.TextField()
user = models.ManyToManyField("auth.User", blank=True)
organization = models.ManyToManyField("Organization", blank=True)
inventory = models.ManyToManyField("Inventory", blank=True)
host = models.ManyToManyField("Host", blank=True)
group = models.ManyToManyField("Group", blank=True)
inventory_source = models.ManyToManyField("InventorySource", blank=True)
inventory_update = models.ManyToManyField("InventoryUpdate", blank=True)
credential = models.ManyToManyField("Credential", blank=True)
credential_type = models.ManyToManyField("CredentialType", blank=True)
team = models.ManyToManyField("Team", blank=True)
project = models.ManyToManyField("Project", blank=True)
project_update = models.ManyToManyField("ProjectUpdate", blank=True)
execution_environment = models.ManyToManyField("ExecutionEnvironment", blank=True)
job_template = models.ManyToManyField("JobTemplate", blank=True)
job = models.ManyToManyField("Job", blank=True)
workflow_job_template_node = models.ManyToManyField("WorkflowJobTemplateNode", blank=True)
workflow_job_node = models.ManyToManyField("WorkflowJobNode", blank=True)
workflow_job_template = models.ManyToManyField("WorkflowJobTemplate", blank=True)
workflow_job = models.ManyToManyField("WorkflowJob", blank=True)
workflow_approval_template = models.ManyToManyField("WorkflowApprovalTemplate", blank=True)
workflow_approval = models.ManyToManyField("WorkflowApproval", blank=True)
unified_job_template = models.ManyToManyField("UnifiedJobTemplate", blank=True, related_name='activity_stream_as_unified_job_template+')
unified_job = models.ManyToManyField("UnifiedJob", blank=True, related_name='activity_stream_as_unified_job+')
ad_hoc_command = models.ManyToManyField("AdHocCommand", blank=True)
schedule = models.ManyToManyField("Schedule", blank=True)
custom_inventory_script = models.ManyToManyField("CustomInventoryScript", blank=True)
execution_environment = models.ManyToManyField("ExecutionEnvironment", blank=True)
notification_template = models.ManyToManyField("NotificationTemplate", blank=True)
notification = models.ManyToManyField("Notification", blank=True)
label = models.ManyToManyField("Label", blank=True)
role = models.ManyToManyField("Role", blank=True)
instance = models.ManyToManyField("Instance", blank=True)
instance_group = models.ManyToManyField("InstanceGroup", blank=True)
o_auth2_application = models.ManyToManyField("OAuth2Application", blank=True)
o_auth2_access_token = models.ManyToManyField("OAuth2AccessToken", blank=True)
setting = JSONField(blank=True)
def __str__(self):
operation = self.operation if 'operation' in self.__dict__ else '_delayed_'
if 'timestamp' in self.__dict__:
if self.timestamp:
timestamp = self.timestamp.isoformat()
else:
timestamp = self.timestamp
else:
timestamp = '_delayed_'
return u'%s-%s-pk=%s' % (operation, timestamp, self.pk)
def get_absolute_url(self, request=None):
return reverse('api:activity_stream_detail', kwargs={'pk': self.pk}, request=request)
def save(self, *args, **kwargs):
if self.actor:
self.deleted_actor = {
'id': self.actor_id,
'username': smart_str(self.actor.username),
'first_name': smart_str(self.actor.first_name),
'last_name': smart_str(self.actor.last_name),
}
if 'update_fields' in kwargs and 'deleted_actor' not in kwargs['update_fields']:
kwargs['update_fields'].append('deleted_actor')
hostname_char_limit = self._meta.get_field('action_node').max_length
self.action_node = settings.CLUSTER_HOST_ID[:hostname_char_limit]
super(ActivityStream, self).save(*args, **kwargs)
| true | true |
1c3446bbf83437a2b59dbe0b68646890b07f5470 | 7,790 | py | Python | docs/conf.py | charlos1204/sabrina_test | b3d840b9fd2d42c4bd9c0eae4a1c294555171e3a | [
"RSA-MD"
] | null | null | null | docs/conf.py | charlos1204/sabrina_test | b3d840b9fd2d42c4bd9c0eae4a1c294555171e3a | [
"RSA-MD"
] | null | null | null | docs/conf.py | charlos1204/sabrina_test | b3d840b9fd2d42c4bd9c0eae4a1c294555171e3a | [
"RSA-MD"
] | null | null | null | # -*- coding: utf-8 -*-
#
# sabrina_test documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sabrina_test'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sabrina_testdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'sabrina_test.tex',
u'sabrina_test Documentation',
u"sabrina", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sabrina_test', u'sabrina_test Documentation',
[u"sabrina"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sabrina_test', u'sabrina_test Documentation',
u"sabrina", 'sabrina_test',
'A short description of the project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.795918 | 80 | 0.707702 |
import os
import sys
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'sabrina_test'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sabrina_testdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'sabrina_test.tex',
u'sabrina_test Documentation',
u"sabrina", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sabrina_test', u'sabrina_test Documentation',
[u"sabrina"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sabrina_test', u'sabrina_test Documentation',
u"sabrina", 'sabrina_test',
'A short description of the project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| true | true |
1c3447258842c979c163cd20706019dc302914bf | 7,581 | py | Python | tdc3/models/py/data/FunctionNameConsistencyDataset.py | TDC3Tool/TDC3 | c746d8e89c0eb35cc3d1b73f469a9f89f5e7bbfd | [
"MIT"
] | null | null | null | tdc3/models/py/data/FunctionNameConsistencyDataset.py | TDC3Tool/TDC3 | c746d8e89c0eb35cc3d1b73f469a9f89f5e7bbfd | [
"MIT"
] | null | null | null | tdc3/models/py/data/FunctionNameConsistencyDataset.py | TDC3Tool/TDC3 | c746d8e89c0eb35cc3d1b73f469a9f89f5e7bbfd | [
"MIT"
] | null | null | null | import random
import torch as t
from py.data.XYDataset import XYDataset
from py.util.Config import dtype, device, en_stops
from nltk.tokenize import word_tokenize
import numpy as np
def genericDescriptionClassifier(description, embedding):
some_generic_descriptions = ['work', 'success', 'test', 'filter',
'failure', 'explodes', 'case', 'scenario', 'screen']
for generic_description in some_generic_descriptions:
if embedding.similarity(generic_description, description) > 0.5:
return True
return False
# TODO: move into more reusable class
class Embedding():
def __init__(self, embedding):
self.embedding = embedding
self.cache = {}
def get(self, token):
if token in self.cache:
return self.cache[token]
else:
vec = self.embedding[token]
self.cache[token] = vec
return vec
def similarity(self, token, comparison_token):
return self.embedding.similarity(token, comparison_token)
class FunctionNameConsistencyDataset(XYDataset):
def __init__(self, description_embedding, test_embedding, embedding_size, max_body_length, max_description_length):
self.description_embedding = description_embedding
self.test_embedding = test_embedding
self.embedding_size = embedding_size
self.max_body_length = max_body_length
self.max_description_length = max_description_length
def prepare_data(self, json_dataset):
print("Preparing dataset")
self.body_tensors = []
self.description_tensors = []
self.is_consistent_tensors = [] # consistent (1.0) inconsistent (0.0)
self.ids = [] # unique id for each item, useful for debugging
de = Embedding(self.description_embedding)
te = Embedding(self.test_embedding)
# read all data once to get test descriptions embeddings for creating negative examples
print(f"Reading all data once to get all test descriptions")
description_vectors = []
for token_seq in json_dataset:
test_description = token_seq["metadata"]["description"]
description_vec = []
for token in word_tokenize(test_description)[:self.max_description_length]:
if token not in en_stops:
description_vec.append(de.get(token))
while len(description_vec) < self.max_description_length:
description_vec.append([0] * self.embedding_size)
description_vectors.append(description_vec)
# read all data again to create positive and negative examples
print(f"Creating positive and negative examples")
next_neg_example_id = -1 # negative ids for negative examples
for idx in range(len(json_dataset)):
token_seq = json_dataset[idx]
description_vec = description_vectors[idx]
body_vec = []
for token in token_seq["data"][:self.max_body_length]:
if token not in en_stops:
body_vec.append(te.get(token))
while len(body_vec) < self.max_body_length:
body_vec.append([0] * self.embedding_size)
# positive example
self.body_tensors.append(body_vec)
self.description_tensors.append(description_vec)
self.is_consistent_tensors.append([1.0])
self.ids.append(token_seq["id"])
# negative example (randomly combine a description with a test body)
some_other_description_vec = random.choice(description_vectors)
self.body_tensors.append(body_vec)
self.description_tensors.append(some_other_description_vec)
self.is_consistent_tensors.append([0.0])
self.ids.append(next_neg_example_id)
next_neg_example_id -= 1
if len(self.body_tensors) % 1000 == 0:
print(
f"Have created {len(self.body_tensors)}/{2*len(description_vectors)} data points")
if len(self.body_tensors) % 1000 == 0:
print(
f"Have created {len(self.body_tensors)}/{2*len(description_vectors)} data points")
self.body_tensors = t.as_tensor(
self.body_tensors, dtype=dtype, device="cpu")
self.description_tensors = t.as_tensor(
self.description_tensors, dtype=dtype, device="cpu")
self.is_consistent_tensors = t.as_tensor(
self.is_consistent_tensors, dtype=dtype, device="cpu")
self.ids = t.as_tensor(
self.ids, device="cpu")
print(
f"Done with data preparation: {len(self.body_tensors)} datapoints")
def save_to_disk(self, filename):
t.save({"body_tensors": self.body_tensors,
"description_tensors": self.description_tensors,
"is_consistent_tensors": self.is_consistent_tensors,
"ids": self.ids},
filename)
def load_from_disk(self, filename):
tensors = t.load(filename)
self.body_tensors = tensors["body_tensors"]
self.description_tensors = tensors["description_tensors"]
self.is_consistent_tensors = tensors["is_consistent_tensors"]
self.ids = tensors["ids"]
def move_to_target_device(self):
print("Moving dataset to target device (e.g. GPU)")
self.body_tensors = t.as_tensor(
self.body_tensors, dtype=dtype, device=device)
self.description_tensors = t.as_tensor(
self.description_tensors, dtype=dtype, device=device)
self.is_consistent_tensors = t.as_tensor(
self.is_consistent_tensors, dtype=dtype, device=device)
self.ids = t.as_tensor(
self.ids, dtype=dtype, device=device)
def __len__(self):
return len(self.body_tensors)
def __getitem__(self, index):
return [self.body_tensors[index], self.description_tensors[index]], self.is_consistent_tensors[index], self.ids[index]
class ToyDataset(XYDataset):
def __init__(self, nb_datapoints, embedding_size, seq_length):
self.embedding_size = embedding_size
self.body_tensors = t.empty(
nb_datapoints, seq_length, embedding_size, dtype=dtype, device=device)
self.description_tensors = t.empty(
nb_datapoints, embedding_size, dtype=dtype, device=device)
self.is_consistent_tensors = t.empty(
nb_datapoints, 1, dtype=dtype, device=device)
for datapoint_idx in range(nb_datapoints):
token_vec1 = t.rand(embedding_size, dtype=dtype, device=device)
token_vec2 = t.rand(embedding_size, dtype=dtype, device=device)
token_vec3 = t.rand(embedding_size, dtype=dtype, device=device)
if datapoint_idx % 2 == 0:
for seq_idx in range(seq_length):
self.body_tensors[datapoint_idx][seq_idx] = token_vec1
self.description_tensors[datapoint_idx] = token_vec1
self.is_consistent_tensors[datapoint_idx] = 1.0
else:
for seq_idx in range(seq_length):
self.body_tensors[datapoint_idx][seq_idx] = token_vec2
self.description_tensors[datapoint_idx] = token_vec3
self.is_consistent_tensors[datapoint_idx] = 0.0
def __len__(self):
return len(self.body_tensors)
def __getitem__(self, i):
return [self.body_tensors[i], self.description_tensors[i]], self.is_consistent_tensors[i]
| 43.568966 | 126 | 0.64965 | import random
import torch as t
from py.data.XYDataset import XYDataset
from py.util.Config import dtype, device, en_stops
from nltk.tokenize import word_tokenize
import numpy as np
def genericDescriptionClassifier(description, embedding):
some_generic_descriptions = ['work', 'success', 'test', 'filter',
'failure', 'explodes', 'case', 'scenario', 'screen']
for generic_description in some_generic_descriptions:
if embedding.similarity(generic_description, description) > 0.5:
return True
return False
class Embedding():
def __init__(self, embedding):
self.embedding = embedding
self.cache = {}
def get(self, token):
if token in self.cache:
return self.cache[token]
else:
vec = self.embedding[token]
self.cache[token] = vec
return vec
def similarity(self, token, comparison_token):
return self.embedding.similarity(token, comparison_token)
class FunctionNameConsistencyDataset(XYDataset):
def __init__(self, description_embedding, test_embedding, embedding_size, max_body_length, max_description_length):
self.description_embedding = description_embedding
self.test_embedding = test_embedding
self.embedding_size = embedding_size
self.max_body_length = max_body_length
self.max_description_length = max_description_length
def prepare_data(self, json_dataset):
print("Preparing dataset")
self.body_tensors = []
self.description_tensors = []
self.is_consistent_tensors = []
self.ids = []
de = Embedding(self.description_embedding)
te = Embedding(self.test_embedding)
print(f"Reading all data once to get all test descriptions")
description_vectors = []
for token_seq in json_dataset:
test_description = token_seq["metadata"]["description"]
description_vec = []
for token in word_tokenize(test_description)[:self.max_description_length]:
if token not in en_stops:
description_vec.append(de.get(token))
while len(description_vec) < self.max_description_length:
description_vec.append([0] * self.embedding_size)
description_vectors.append(description_vec)
print(f"Creating positive and negative examples")
next_neg_example_id = -1
for idx in range(len(json_dataset)):
token_seq = json_dataset[idx]
description_vec = description_vectors[idx]
body_vec = []
for token in token_seq["data"][:self.max_body_length]:
if token not in en_stops:
body_vec.append(te.get(token))
while len(body_vec) < self.max_body_length:
body_vec.append([0] * self.embedding_size)
self.body_tensors.append(body_vec)
self.description_tensors.append(description_vec)
self.is_consistent_tensors.append([1.0])
self.ids.append(token_seq["id"])
some_other_description_vec = random.choice(description_vectors)
self.body_tensors.append(body_vec)
self.description_tensors.append(some_other_description_vec)
self.is_consistent_tensors.append([0.0])
self.ids.append(next_neg_example_id)
next_neg_example_id -= 1
if len(self.body_tensors) % 1000 == 0:
print(
f"Have created {len(self.body_tensors)}/{2*len(description_vectors)} data points")
if len(self.body_tensors) % 1000 == 0:
print(
f"Have created {len(self.body_tensors)}/{2*len(description_vectors)} data points")
self.body_tensors = t.as_tensor(
self.body_tensors, dtype=dtype, device="cpu")
self.description_tensors = t.as_tensor(
self.description_tensors, dtype=dtype, device="cpu")
self.is_consistent_tensors = t.as_tensor(
self.is_consistent_tensors, dtype=dtype, device="cpu")
self.ids = t.as_tensor(
self.ids, device="cpu")
print(
f"Done with data preparation: {len(self.body_tensors)} datapoints")
def save_to_disk(self, filename):
t.save({"body_tensors": self.body_tensors,
"description_tensors": self.description_tensors,
"is_consistent_tensors": self.is_consistent_tensors,
"ids": self.ids},
filename)
def load_from_disk(self, filename):
tensors = t.load(filename)
self.body_tensors = tensors["body_tensors"]
self.description_tensors = tensors["description_tensors"]
self.is_consistent_tensors = tensors["is_consistent_tensors"]
self.ids = tensors["ids"]
def move_to_target_device(self):
print("Moving dataset to target device (e.g. GPU)")
self.body_tensors = t.as_tensor(
self.body_tensors, dtype=dtype, device=device)
self.description_tensors = t.as_tensor(
self.description_tensors, dtype=dtype, device=device)
self.is_consistent_tensors = t.as_tensor(
self.is_consistent_tensors, dtype=dtype, device=device)
self.ids = t.as_tensor(
self.ids, dtype=dtype, device=device)
def __len__(self):
return len(self.body_tensors)
def __getitem__(self, index):
return [self.body_tensors[index], self.description_tensors[index]], self.is_consistent_tensors[index], self.ids[index]
class ToyDataset(XYDataset):
def __init__(self, nb_datapoints, embedding_size, seq_length):
self.embedding_size = embedding_size
self.body_tensors = t.empty(
nb_datapoints, seq_length, embedding_size, dtype=dtype, device=device)
self.description_tensors = t.empty(
nb_datapoints, embedding_size, dtype=dtype, device=device)
self.is_consistent_tensors = t.empty(
nb_datapoints, 1, dtype=dtype, device=device)
for datapoint_idx in range(nb_datapoints):
token_vec1 = t.rand(embedding_size, dtype=dtype, device=device)
token_vec2 = t.rand(embedding_size, dtype=dtype, device=device)
token_vec3 = t.rand(embedding_size, dtype=dtype, device=device)
if datapoint_idx % 2 == 0:
for seq_idx in range(seq_length):
self.body_tensors[datapoint_idx][seq_idx] = token_vec1
self.description_tensors[datapoint_idx] = token_vec1
self.is_consistent_tensors[datapoint_idx] = 1.0
else:
for seq_idx in range(seq_length):
self.body_tensors[datapoint_idx][seq_idx] = token_vec2
self.description_tensors[datapoint_idx] = token_vec3
self.is_consistent_tensors[datapoint_idx] = 0.0
def __len__(self):
return len(self.body_tensors)
def __getitem__(self, i):
return [self.body_tensors[i], self.description_tensors[i]], self.is_consistent_tensors[i]
| true | true |
1c344732e5c723a7e896560c0d04766385a5d092 | 201 | py | Python | Week 5/Lecture 9 - Efficiency and Orders of Growth/In-Video Problems/Lec9.2Slide2.py | roshanM99/edX--mitX--introduction-to-computer-science-and-programming-with-python | 81a7247e8442feddd624b5dbcd70cde1b58d2965 | [
"MIT"
] | null | null | null | Week 5/Lecture 9 - Efficiency and Orders of Growth/In-Video Problems/Lec9.2Slide2.py | roshanM99/edX--mitX--introduction-to-computer-science-and-programming-with-python | 81a7247e8442feddd624b5dbcd70cde1b58d2965 | [
"MIT"
] | null | null | null | Week 5/Lecture 9 - Efficiency and Orders of Growth/In-Video Problems/Lec9.2Slide2.py | roshanM99/edX--mitX--introduction-to-computer-science-and-programming-with-python | 81a7247e8442feddd624b5dbcd70cde1b58d2965 | [
"MIT"
] | null | null | null | # Lecture 9.2, slide 2
def f(x):
for i in range(1000):
ans = i
for i in range(x):
ans += 1
for i in range(x):
for j in range(x):
ans += 1
return ans | 18.272727 | 26 | 0.457711 |
def f(x):
for i in range(1000):
ans = i
for i in range(x):
ans += 1
for i in range(x):
for j in range(x):
ans += 1
return ans | true | true |
1c34479a78d8bad008e561dc974b9c0fdb23c85a | 2,639 | py | Python | encodings/hp_roman8.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | 1 | 2020-03-12T16:44:10.000Z | 2020-03-12T16:44:10.000Z | encodings/hp_roman8.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | null | null | null | encodings/hp_roman8.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: encodings.hp_roman8
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_map)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_map)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='hp-roman8', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader)
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({161: 192,
162: 194,
163: 200,
164: 202,
165: 203,
166: 206,
167: 207,
168: 180,
169: 715,
170: 710,
171: 168,
172: 732,
173: 217,
174: 219,
175: 8356,
176: 175,
177: 221,
178: 253,
179: 176,
180: 199,
181: 231,
182: 209,
183: 241,
184: 161,
185: 191,
186: 164,
187: 163,
188: 165,
189: 167,
190: 402,
191: 162,
192: 226,
193: 234,
194: 244,
195: 251,
196: 225,
197: 233,
198: 243,
199: 250,
200: 224,
201: 232,
202: 242,
203: 249,
204: 228,
205: 235,
206: 246,
207: 252,
208: 197,
209: 238,
210: 216,
211: 198,
212: 229,
213: 237,
214: 248,
215: 230,
216: 196,
217: 236,
218: 214,
219: 220,
220: 201,
221: 239,
222: 223,
223: 212,
224: 193,
225: 195,
226: 227,
227: 208,
228: 240,
229: 205,
230: 204,
231: 211,
232: 210,
233: 213,
234: 245,
235: 352,
236: 353,
237: 218,
238: 376,
239: 255,
240: 222,
241: 254,
242: 183,
243: 181,
244: 182,
245: 190,
246: 8212,
247: 188,
248: 189,
249: 170,
250: 186,
251: 171,
252: 9632,
253: 187,
254: 177,
255: None})
encoding_map = codecs.make_encoding_map(decoding_map) | 19.404412 | 223 | 0.594164 |
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_map)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_map)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='hp-roman8', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader)
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({161: 192,
162: 194,
163: 200,
164: 202,
165: 203,
166: 206,
167: 207,
168: 180,
169: 715,
170: 710,
171: 168,
172: 732,
173: 217,
174: 219,
175: 8356,
176: 175,
177: 221,
178: 253,
179: 176,
180: 199,
181: 231,
182: 209,
183: 241,
184: 161,
185: 191,
186: 164,
187: 163,
188: 165,
189: 167,
190: 402,
191: 162,
192: 226,
193: 234,
194: 244,
195: 251,
196: 225,
197: 233,
198: 243,
199: 250,
200: 224,
201: 232,
202: 242,
203: 249,
204: 228,
205: 235,
206: 246,
207: 252,
208: 197,
209: 238,
210: 216,
211: 198,
212: 229,
213: 237,
214: 248,
215: 230,
216: 196,
217: 236,
218: 214,
219: 220,
220: 201,
221: 239,
222: 223,
223: 212,
224: 193,
225: 195,
226: 227,
227: 208,
228: 240,
229: 205,
230: 204,
231: 211,
232: 210,
233: 213,
234: 245,
235: 352,
236: 353,
237: 218,
238: 376,
239: 255,
240: 222,
241: 254,
242: 183,
243: 181,
244: 182,
245: 190,
246: 8212,
247: 188,
248: 189,
249: 170,
250: 186,
251: 171,
252: 9632,
253: 187,
254: 177,
255: None})
encoding_map = codecs.make_encoding_map(decoding_map) | true | true |
1c34479adff6a45dc6ea14c7d57795f293fd8a8e | 399 | py | Python | onmt/encoders/__init__.py | philhchen/OpenNMT-evidential-softmax | 87709ce1cf7bda783aed4a64c096fa23282e7aa9 | [
"MIT"
] | null | null | null | onmt/encoders/__init__.py | philhchen/OpenNMT-evidential-softmax | 87709ce1cf7bda783aed4a64c096fa23282e7aa9 | [
"MIT"
] | null | null | null | onmt/encoders/__init__.py | philhchen/OpenNMT-evidential-softmax | 87709ce1cf7bda783aed4a64c096fa23282e7aa9 | [
"MIT"
] | null | null | null | """Module defining encoders."""
from onmt.encoders.encoder import EncoderBase
from onmt.encoders.transformer import TransformerEncoder
from onmt.encoders.rnn_encoder import RNNEncoder
from onmt.encoders.cnn_encoder import CNNEncoder
from onmt.encoders.mean_encoder import MeanEncoder
__all__ = [
"EncoderBase",
"TransformerEncoder",
"RNNEncoder",
"CNNEncoder",
"MeanEncoder",
]
| 26.6 | 56 | 0.779449 | from onmt.encoders.encoder import EncoderBase
from onmt.encoders.transformer import TransformerEncoder
from onmt.encoders.rnn_encoder import RNNEncoder
from onmt.encoders.cnn_encoder import CNNEncoder
from onmt.encoders.mean_encoder import MeanEncoder
__all__ = [
"EncoderBase",
"TransformerEncoder",
"RNNEncoder",
"CNNEncoder",
"MeanEncoder",
]
| true | true |
1c3447ef6cb0d182f4ceaf86c91f75002f05e23d | 137,113 | py | Python | goodies/ospexporter/import_fbx.py | Ghimli/new-ospgl | 31bd84e52d954683671211ff16ce8702bdb87312 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | goodies/ospexporter/import_fbx.py | Ghimli/new-ospgl | 31bd84e52d954683671211ff16ce8702bdb87312 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | goodies/ospexporter/import_fbx.py | Ghimli/new-ospgl | 31bd84e52d954683671211ff16ce8702bdb87312 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Script copyright (C) Blender Foundation
# FBX 7.1.0 -> 7.4.0 loader for Blender
# Not totally pep8 compliant.
# pep8 import_fbx.py --ignore=E501,E123,E702,E125
if "bpy" in locals():
import importlib
if "parse_fbx" in locals():
importlib.reload(parse_fbx)
if "fbx_utils" in locals():
importlib.reload(fbx_utils)
import bpy
from mathutils import Matrix, Euler, Vector
# -----
# Utils
from . import parse_fbx, fbx_utils
from .parse_fbx import (
data_types,
FBXElem,
)
from .fbx_utils import (
PerfMon,
units_blender_to_fbx_factor,
units_convertor_iter,
array_to_matrix4,
similar_values,
similar_values_iter,
FBXImportSettings,
)
# global singleton, assign on execution
fbx_elem_nil = None
# Units convertors...
convert_deg_to_rad_iter = units_convertor_iter("degree", "radian")
MAT_CONVERT_BONE = fbx_utils.MAT_CONVERT_BONE.inverted()
MAT_CONVERT_LIGHT = fbx_utils.MAT_CONVERT_LIGHT.inverted()
MAT_CONVERT_CAMERA = fbx_utils.MAT_CONVERT_CAMERA.inverted()
def validate_blend_names(name):
assert(type(name) == bytes)
# Blender typically does not accept names over 63 bytes...
if len(name) > 63:
import hashlib
h = hashlib.sha1(name).hexdigest()
n = 55
name_utf8 = name[:n].decode('utf-8', 'replace') + "_" + h[:7]
while len(name_utf8.encode()) > 63:
n -= 1
name_utf8 = name[:n].decode('utf-8', 'replace') + "_" + h[:7]
return name_utf8
else:
# We use 'replace' even though FBX 'specs' say it should always be utf8, see T53841.
return name.decode('utf-8', 'replace')
def elem_find_first(elem, id_search, default=None):
for fbx_item in elem.elems:
if fbx_item.id == id_search:
return fbx_item
return default
def elem_find_iter(elem, id_search):
for fbx_item in elem.elems:
if fbx_item.id == id_search:
yield fbx_item
def elem_find_first_string(elem, id_search):
fbx_item = elem_find_first(elem, id_search)
if fbx_item is not None and fbx_item.props: # Do not error on complete empty properties (see T45291).
assert(len(fbx_item.props) == 1)
assert(fbx_item.props_type[0] == data_types.STRING)
return fbx_item.props[0].decode('utf-8', 'replace')
return None
def elem_find_first_string_as_bytes(elem, id_search):
fbx_item = elem_find_first(elem, id_search)
if fbx_item is not None and fbx_item.props: # Do not error on complete empty properties (see T45291).
assert(len(fbx_item.props) == 1)
assert(fbx_item.props_type[0] == data_types.STRING)
return fbx_item.props[0] # Keep it as bytes as requested...
return None
def elem_find_first_bytes(elem, id_search, decode=True):
fbx_item = elem_find_first(elem, id_search)
if fbx_item is not None and fbx_item.props: # Do not error on complete empty properties (see T45291).
assert(len(fbx_item.props) == 1)
assert(fbx_item.props_type[0] == data_types.BYTES)
return fbx_item.props[0]
return None
def elem_repr(elem):
return "%s: props[%d=%r], elems=(%r)" % (
elem.id,
len(elem.props),
", ".join([repr(p) for p in elem.props]),
# elem.props_type,
b", ".join([e.id for e in elem.elems]),
)
def elem_split_name_class(elem):
assert(elem.props_type[-2] == data_types.STRING)
elem_name, elem_class = elem.props[-2].split(b'\x00\x01')
return elem_name, elem_class
def elem_name_ensure_class(elem, clss=...):
elem_name, elem_class = elem_split_name_class(elem)
if clss is not ...:
assert(elem_class == clss)
return validate_blend_names(elem_name)
def elem_name_ensure_classes(elem, clss=...):
elem_name, elem_class = elem_split_name_class(elem)
if clss is not ...:
assert(elem_class in clss)
return validate_blend_names(elem_name)
def elem_split_name_class_nodeattr(elem):
assert(elem.props_type[-2] == data_types.STRING)
elem_name, elem_class = elem.props[-2].split(b'\x00\x01')
assert(elem_class == b'NodeAttribute')
assert(elem.props_type[-1] == data_types.STRING)
elem_class = elem.props[-1]
return elem_name, elem_class
def elem_uuid(elem):
assert(elem.props_type[0] == data_types.INT64)
return elem.props[0]
def elem_prop_first(elem, default=None):
return elem.props[0] if (elem is not None) and elem.props else default
# ----
# Support for
# Properties70: { ... P:
def elem_props_find_first(elem, elem_prop_id):
if elem is None:
# When properties are not found... Should never happen, but happens - as usual.
return None
# support for templates (tuple of elems)
if type(elem) is not FBXElem:
assert(type(elem) is tuple)
for e in elem:
result = elem_props_find_first(e, elem_prop_id)
if result is not None:
return result
assert(len(elem) > 0)
return None
for subelem in elem.elems:
assert(subelem.id == b'P')
if subelem.props[0] == elem_prop_id:
return subelem
return None
def elem_props_get_color_rgb(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
if elem_prop.props[1] == b'Color':
# FBX version 7300
assert(elem_prop.props[1] == b'Color')
assert(elem_prop.props[2] == b'')
else:
assert(elem_prop.props[1] == b'ColorRGB')
assert(elem_prop.props[2] == b'Color')
assert(elem_prop.props_type[4:7] == bytes((data_types.FLOAT64,)) * 3)
return elem_prop.props[4:7]
return default
def elem_props_get_vector_3d(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props_type[4:7] == bytes((data_types.FLOAT64,)) * 3)
return elem_prop.props[4:7]
return default
def elem_props_get_number(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
if elem_prop.props[1] == b'double':
assert(elem_prop.props[1] == b'double')
assert(elem_prop.props[2] == b'Number')
else:
assert(elem_prop.props[1] == b'Number')
assert(elem_prop.props[2] == b'')
# we could allow other number types
assert(elem_prop.props_type[4] == data_types.FLOAT64)
return elem_prop.props[4]
return default
def elem_props_get_integer(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
if elem_prop.props[1] == b'int':
assert(elem_prop.props[1] == b'int')
assert(elem_prop.props[2] == b'Integer')
elif elem_prop.props[1] == b'ULongLong':
assert(elem_prop.props[1] == b'ULongLong')
assert(elem_prop.props[2] == b'')
# we could allow other number types
assert(elem_prop.props_type[4] in {data_types.INT32, data_types.INT64})
return elem_prop.props[4]
return default
def elem_props_get_bool(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
# b'Bool' with a capital seems to be used for animated property... go figure...
assert(elem_prop.props[1] in {b'bool', b'Bool'})
assert(elem_prop.props[2] == b'')
# we could allow other number types
assert(elem_prop.props_type[4] == data_types.INT32)
assert(elem_prop.props[4] in {0, 1})
return bool(elem_prop.props[4])
return default
def elem_props_get_enum(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
assert(elem_prop.props[1] == b'enum')
assert(elem_prop.props[2] == b'')
assert(elem_prop.props[3] == b'')
# we could allow other number types
assert(elem_prop.props_type[4] == data_types.INT32)
return elem_prop.props[4]
return default
def elem_props_get_visibility(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
assert(elem_prop.props[1] == b'Visibility')
assert(elem_prop.props[2] == b'')
# we could allow other number types
assert(elem_prop.props_type[4] == data_types.FLOAT64)
return elem_prop.props[4]
return default
# ----------------------------------------------------------------------------
# Blender
# ------
# Object
from collections import namedtuple
FBXTransformData = namedtuple("FBXTransformData", (
"loc", "geom_loc",
"rot", "rot_ofs", "rot_piv", "pre_rot", "pst_rot", "rot_ord", "rot_alt_mat", "geom_rot",
"sca", "sca_ofs", "sca_piv", "geom_sca",
))
def blen_read_custom_properties(fbx_obj, blen_obj, settings):
# There doesn't seem to be a way to put user properties into templates, so this only get the object properties:
fbx_obj_props = elem_find_first(fbx_obj, b'Properties70')
if fbx_obj_props:
for fbx_prop in fbx_obj_props.elems:
assert(fbx_prop.id == b'P')
if b'U' in fbx_prop.props[3]:
if fbx_prop.props[0] == b'UDP3DSMAX':
# Special case for 3DS Max user properties:
assert(fbx_prop.props[1] == b'KString')
assert(fbx_prop.props_type[4] == data_types.STRING)
items = fbx_prop.props[4].decode('utf-8', 'replace')
for item in items.split('\r\n'):
if item:
prop_name, prop_value = item.split('=', 1)
prop_name = validate_blend_names(prop_name.strip().encode('utf-8'))
blen_obj[prop_name] = prop_value.strip()
else:
prop_name = validate_blend_names(fbx_prop.props[0])
prop_type = fbx_prop.props[1]
if prop_type in {b'Vector', b'Vector3D', b'Color', b'ColorRGB'}:
assert(fbx_prop.props_type[4:7] == bytes((data_types.FLOAT64,)) * 3)
blen_obj[prop_name] = fbx_prop.props[4:7]
elif prop_type in {b'Vector4', b'ColorRGBA'}:
assert(fbx_prop.props_type[4:8] == bytes((data_types.FLOAT64,)) * 4)
blen_obj[prop_name] = fbx_prop.props[4:8]
elif prop_type == b'Vector2D':
assert(fbx_prop.props_type[4:6] == bytes((data_types.FLOAT64,)) * 2)
blen_obj[prop_name] = fbx_prop.props[4:6]
elif prop_type in {b'Integer', b'int'}:
assert(fbx_prop.props_type[4] == data_types.INT32)
blen_obj[prop_name] = fbx_prop.props[4]
elif prop_type == b'KString':
assert(fbx_prop.props_type[4] == data_types.STRING)
blen_obj[prop_name] = fbx_prop.props[4].decode('utf-8', 'replace')
elif prop_type in {b'Number', b'double', b'Double'}:
assert(fbx_prop.props_type[4] == data_types.FLOAT64)
blen_obj[prop_name] = fbx_prop.props[4]
elif prop_type in {b'Float', b'float'}:
assert(fbx_prop.props_type[4] == data_types.FLOAT32)
blen_obj[prop_name] = fbx_prop.props[4]
elif prop_type in {b'Bool', b'bool'}:
assert(fbx_prop.props_type[4] == data_types.INT32)
blen_obj[prop_name] = fbx_prop.props[4] != 0
elif prop_type in {b'Enum', b'enum'}:
assert(fbx_prop.props_type[4:6] == bytes((data_types.INT32, data_types.STRING)))
val = fbx_prop.props[4]
if settings.use_custom_props_enum_as_string and fbx_prop.props[5]:
enum_items = fbx_prop.props[5].decode('utf-8', 'replace').split('~')
assert(val >= 0 and val < len(enum_items))
blen_obj[prop_name] = enum_items[val]
else:
blen_obj[prop_name] = val
else:
print ("WARNING: User property type '%s' is not supported" % prop_type.decode('utf-8', 'replace'))
def blen_read_object_transform_do(transform_data):
# This is a nightmare. FBX SDK uses Maya way to compute the transformation matrix of a node - utterly simple:
#
# WorldTransform = ParentWorldTransform @ T @ Roff @ Rp @ Rpre @ R @ Rpost @ Rp-1 @ Soff @ Sp @ S @ Sp-1
#
# Where all those terms are 4 x 4 matrices that contain:
# WorldTransform: Transformation matrix of the node in global space.
# ParentWorldTransform: Transformation matrix of the parent node in global space.
# T: Translation
# Roff: Rotation offset
# Rp: Rotation pivot
# Rpre: Pre-rotation
# R: Rotation
# Rpost: Post-rotation
# Rp-1: Inverse of the rotation pivot
# Soff: Scaling offset
# Sp: Scaling pivot
# S: Scaling
# Sp-1: Inverse of the scaling pivot
#
# But it was still too simple, and FBX notion of compatibility is... quite specific. So we also have to
# support 3DSMax way:
#
# WorldTransform = ParentWorldTransform @ T @ R @ S @ OT @ OR @ OS
#
# Where all those terms are 4 x 4 matrices that contain:
# WorldTransform: Transformation matrix of the node in global space
# ParentWorldTransform: Transformation matrix of the parent node in global space
# T: Translation
# R: Rotation
# S: Scaling
# OT: Geometric transform translation
# OR: Geometric transform rotation
# OS: Geometric transform translation
#
# Notes:
# Geometric transformations ***are not inherited***: ParentWorldTransform does not contain the OT, OR, OS
# of WorldTransform's parent node.
#
# Taken from http://download.autodesk.com/us/fbx/20112/FBX_SDK_HELP/
# index.html?url=WS1a9193826455f5ff1f92379812724681e696651.htm,topicNumber=d0e7429
# translation
lcl_translation = Matrix.Translation(transform_data.loc)
geom_loc = Matrix.Translation(transform_data.geom_loc)
# rotation
to_rot = lambda rot, rot_ord: Euler(convert_deg_to_rad_iter(rot), rot_ord).to_matrix().to_4x4()
lcl_rot = to_rot(transform_data.rot, transform_data.rot_ord) @ transform_data.rot_alt_mat
pre_rot = to_rot(transform_data.pre_rot, transform_data.rot_ord)
pst_rot = to_rot(transform_data.pst_rot, transform_data.rot_ord)
geom_rot = to_rot(transform_data.geom_rot, transform_data.rot_ord)
rot_ofs = Matrix.Translation(transform_data.rot_ofs)
rot_piv = Matrix.Translation(transform_data.rot_piv)
sca_ofs = Matrix.Translation(transform_data.sca_ofs)
sca_piv = Matrix.Translation(transform_data.sca_piv)
# scale
lcl_scale = Matrix()
lcl_scale[0][0], lcl_scale[1][1], lcl_scale[2][2] = transform_data.sca
geom_scale = Matrix();
geom_scale[0][0], geom_scale[1][1], geom_scale[2][2] = transform_data.geom_sca
base_mat = (
lcl_translation @
rot_ofs @
rot_piv @
pre_rot @
lcl_rot @
pst_rot @
rot_piv.inverted_safe() @
sca_ofs @
sca_piv @
lcl_scale @
sca_piv.inverted_safe()
)
geom_mat = geom_loc @ geom_rot @ geom_scale
# We return mat without 'geometric transforms' too, because it is to be used for children, sigh...
return (base_mat @ geom_mat, base_mat, geom_mat)
# XXX This might be weak, now that we can add vgroups from both bones and shapes, name collisions become
# more likely, will have to make this more robust!!!
def add_vgroup_to_objects(vg_indices, vg_weights, vg_name, objects):
assert(len(vg_indices) == len(vg_weights))
if vg_indices:
for obj in objects:
# We replace/override here...
vg = obj.vertex_groups.get(vg_name)
if vg is None:
vg = obj.vertex_groups.new(name=vg_name)
for i, w in zip(vg_indices, vg_weights):
vg.add((i,), w, 'REPLACE')
def blen_read_object_transform_preprocess(fbx_props, fbx_obj, rot_alt_mat, use_prepost_rot):
# This is quite involved, 'fbxRNode.cpp' from openscenegraph used as a reference
const_vector_zero_3d = 0.0, 0.0, 0.0
const_vector_one_3d = 1.0, 1.0, 1.0
loc = list(elem_props_get_vector_3d(fbx_props, b'Lcl Translation', const_vector_zero_3d))
rot = list(elem_props_get_vector_3d(fbx_props, b'Lcl Rotation', const_vector_zero_3d))
sca = list(elem_props_get_vector_3d(fbx_props, b'Lcl Scaling', const_vector_one_3d))
geom_loc = list(elem_props_get_vector_3d(fbx_props, b'GeometricTranslation', const_vector_zero_3d))
geom_rot = list(elem_props_get_vector_3d(fbx_props, b'GeometricRotation', const_vector_zero_3d))
geom_sca = list(elem_props_get_vector_3d(fbx_props, b'GeometricScaling', const_vector_one_3d))
rot_ofs = elem_props_get_vector_3d(fbx_props, b'RotationOffset', const_vector_zero_3d)
rot_piv = elem_props_get_vector_3d(fbx_props, b'RotationPivot', const_vector_zero_3d)
sca_ofs = elem_props_get_vector_3d(fbx_props, b'ScalingOffset', const_vector_zero_3d)
sca_piv = elem_props_get_vector_3d(fbx_props, b'ScalingPivot', const_vector_zero_3d)
is_rot_act = elem_props_get_bool(fbx_props, b'RotationActive', False)
if is_rot_act:
if use_prepost_rot:
pre_rot = elem_props_get_vector_3d(fbx_props, b'PreRotation', const_vector_zero_3d)
pst_rot = elem_props_get_vector_3d(fbx_props, b'PostRotation', const_vector_zero_3d)
else:
pre_rot = const_vector_zero_3d
pst_rot = const_vector_zero_3d
rot_ord = {
0: 'XYZ',
1: 'XZY',
2: 'YZX',
3: 'YXZ',
4: 'ZXY',
5: 'ZYX',
6: 'XYZ', # XXX eSphericXYZ, not really supported...
}.get(elem_props_get_enum(fbx_props, b'RotationOrder', 0))
else:
pre_rot = const_vector_zero_3d
pst_rot = const_vector_zero_3d
rot_ord = 'XYZ'
return FBXTransformData(loc, geom_loc,
rot, rot_ofs, rot_piv, pre_rot, pst_rot, rot_ord, rot_alt_mat, geom_rot,
sca, sca_ofs, sca_piv, geom_sca)
# ---------
# Animation
def blen_read_animations_curves_iter(fbx_curves, blen_start_offset, fbx_start_offset, fps):
"""
Get raw FBX AnimCurve list, and yield values for all curves at each singular curves' keyframes,
together with (blender) timing, in frames.
blen_start_offset is expected in frames, while fbx_start_offset is expected in FBX ktime.
"""
# As a first step, assume linear interpolation between key frames, we'll (try to!) handle more
# of FBX curves later.
from .fbx_utils import FBX_KTIME
timefac = fps / FBX_KTIME
curves = tuple([0,
elem_prop_first(elem_find_first(c[2], b'KeyTime')),
elem_prop_first(elem_find_first(c[2], b'KeyValueFloat')),
c]
for c in fbx_curves)
allkeys = sorted({item for sublist in curves for item in sublist[1]})
for curr_fbxktime in allkeys:
curr_values = []
for item in curves:
idx, times, values, fbx_curve = item
if times[idx] < curr_fbxktime:
if idx >= 0:
idx += 1
if idx >= len(times):
# We have reached our last element for this curve, stay on it from now on...
idx = -1
item[0] = idx
if times[idx] >= curr_fbxktime:
if idx == 0:
curr_values.append((values[idx], fbx_curve))
else:
# Interpolate between this key and the previous one.
ifac = (curr_fbxktime - times[idx - 1]) / (times[idx] - times[idx - 1])
curr_values.append(((values[idx] - values[idx - 1]) * ifac + values[idx - 1], fbx_curve))
curr_blenkframe = (curr_fbxktime - fbx_start_offset) * timefac + blen_start_offset
yield (curr_blenkframe, curr_values)
def blen_read_animations_action_item(action, item, cnodes, fps, anim_offset):
"""
'Bake' loc/rot/scale into the action,
taking any pre_ and post_ matrix into account to transform from fbx into blender space.
"""
from bpy.types import Object, PoseBone, ShapeKey, Material, Camera
from itertools import chain
fbx_curves = []
for curves, fbxprop in cnodes.values():
for (fbx_acdata, _blen_data), channel in curves.values():
fbx_curves.append((fbxprop, channel, fbx_acdata))
# Leave if no curves are attached (if a blender curve is attached to scale but without keys it defaults to 0).
if len(fbx_curves) == 0:
return
blen_curves = []
props = []
if isinstance(item, Material):
grpname = item.name
props = [("diffuse_color", 3, grpname or "Diffuse Color")]
elif isinstance(item, ShapeKey):
props = [(item.path_from_id("value"), 1, "Key")]
elif isinstance(item, Camera):
props = [(item.path_from_id("lens"), 1, "Camera")]
else: # Object or PoseBone:
if item.is_bone:
bl_obj = item.bl_obj.pose.bones[item.bl_bone]
else:
bl_obj = item.bl_obj
# We want to create actions for objects, but for bones we 'reuse' armatures' actions!
grpname = item.bl_obj.name
# Since we might get other channels animated in the end, due to all FBX transform magic,
# we need to add curves for whole loc/rot/scale in any case.
props = [(bl_obj.path_from_id("location"), 3, grpname or "Location"),
None,
(bl_obj.path_from_id("scale"), 3, grpname or "Scale")]
rot_mode = bl_obj.rotation_mode
if rot_mode == 'QUATERNION':
props[1] = (bl_obj.path_from_id("rotation_quaternion"), 4, grpname or "Quaternion Rotation")
elif rot_mode == 'AXIS_ANGLE':
props[1] = (bl_obj.path_from_id("rotation_axis_angle"), 4, grpname or "Axis Angle Rotation")
else: # Euler
props[1] = (bl_obj.path_from_id("rotation_euler"), 3, grpname or "Euler Rotation")
blen_curves = [action.fcurves.new(prop, index=channel, action_group=grpname)
for prop, nbr_channels, grpname in props for channel in range(nbr_channels)]
if isinstance(item, Material):
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps):
value = [0,0,0]
for v, (fbxprop, channel, _fbx_acdata) in values:
assert(fbxprop == b'DiffuseColor')
assert(channel in {0, 1, 2})
value[channel] = v
for fc, v in zip(blen_curves, value):
fc.keyframe_points.insert(frame, v, options={'NEEDED', 'FAST'}).interpolation = 'LINEAR'
elif isinstance(item, ShapeKey):
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps):
value = 0.0
for v, (fbxprop, channel, _fbx_acdata) in values:
assert(fbxprop == b'DeformPercent')
assert(channel == 0)
value = v / 100.0
for fc, v in zip(blen_curves, (value,)):
fc.keyframe_points.insert(frame, v, options={'NEEDED', 'FAST'}).interpolation = 'LINEAR'
elif isinstance(item, Camera):
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps):
value = 0.0
for v, (fbxprop, channel, _fbx_acdata) in values:
assert(fbxprop == b'FocalLength')
assert(channel == 0)
value = v
for fc, v in zip(blen_curves, (value,)):
fc.keyframe_points.insert(frame, v, options={'NEEDED', 'FAST'}).interpolation = 'LINEAR'
else: # Object or PoseBone:
if item.is_bone:
bl_obj = item.bl_obj.pose.bones[item.bl_bone]
else:
bl_obj = item.bl_obj
transform_data = item.fbx_transform_data
rot_eul_prev = bl_obj.rotation_euler.copy()
rot_quat_prev = bl_obj.rotation_quaternion.copy()
# Pre-compute inverted local rest matrix of the bone, if relevant.
restmat_inv = item.get_bind_matrix().inverted_safe() if item.is_bone else None
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps):
for v, (fbxprop, channel, _fbx_acdata) in values:
if fbxprop == b'Lcl Translation':
transform_data.loc[channel] = v
elif fbxprop == b'Lcl Rotation':
transform_data.rot[channel] = v
elif fbxprop == b'Lcl Scaling':
transform_data.sca[channel] = v
mat, _, _ = blen_read_object_transform_do(transform_data)
# compensate for changes in the local matrix during processing
if item.anim_compensation_matrix:
mat = mat @ item.anim_compensation_matrix
# apply pre- and post matrix
# post-matrix will contain any correction for lights, camera and bone orientation
# pre-matrix will contain any correction for a parent's correction matrix or the global matrix
if item.pre_matrix:
mat = item.pre_matrix @ mat
if item.post_matrix:
mat = mat @ item.post_matrix
# And now, remove that rest pose matrix from current mat (also in parent space).
if restmat_inv:
mat = restmat_inv @ mat
# Now we have a virtual matrix of transform from AnimCurves, we can insert keyframes!
loc, rot, sca = mat.decompose()
if rot_mode == 'QUATERNION':
if rot_quat_prev.dot(rot) < 0.0:
rot = -rot
rot_quat_prev = rot
elif rot_mode == 'AXIS_ANGLE':
vec, ang = rot.to_axis_angle()
rot = ang, vec.x, vec.y, vec.z
else: # Euler
rot = rot.to_euler(rot_mode, rot_eul_prev)
rot_eul_prev = rot
for fc, value in zip(blen_curves, chain(loc, rot, sca)):
fc.keyframe_points.insert(frame, value, options={'NEEDED', 'FAST'}).interpolation = 'LINEAR'
# Since we inserted our keyframes in 'FAST' mode, we have to update the fcurves now.
for fc in blen_curves:
fc.update()
def blen_read_animations(fbx_tmpl_astack, fbx_tmpl_alayer, stacks, scene, anim_offset):
"""
Recreate an action per stack/layer/object combinations.
Only the first found action is linked to objects, more complex setups are not handled,
it's up to user to reproduce them!
"""
from bpy.types import ShapeKey, Material, Camera
actions = {}
for as_uuid, ((fbx_asdata, _blen_data), alayers) in stacks.items():
stack_name = elem_name_ensure_class(fbx_asdata, b'AnimStack')
for al_uuid, ((fbx_aldata, _blen_data), items) in alayers.items():
layer_name = elem_name_ensure_class(fbx_aldata, b'AnimLayer')
for item, cnodes in items.items():
if isinstance(item, Material):
id_data = item
elif isinstance(item, ShapeKey):
id_data = item.id_data
elif isinstance(item, Camera):
id_data = item
else:
id_data = item.bl_obj
# XXX Ignore rigged mesh animations - those are a nightmare to handle, see note about it in
# FbxImportHelperNode class definition.
if id_data and id_data.type == 'MESH' and id_data.parent and id_data.parent.type == 'ARMATURE':
continue
if id_data is None:
continue
# Create new action if needed (should always be needed, except for keyblocks from shapekeys cases).
key = (as_uuid, al_uuid, id_data)
action = actions.get(key)
if action is None:
action_name = "|".join((id_data.name, stack_name, layer_name))
actions[key] = action = bpy.data.actions.new(action_name)
action.use_fake_user = True
# If none yet assigned, assign this action to id_data.
if not id_data.animation_data:
id_data.animation_data_create()
if not id_data.animation_data.action:
id_data.animation_data.action = action
# And actually populate the action!
blen_read_animations_action_item(action, item, cnodes, scene.render.fps, anim_offset)
# ----
# Mesh
def blen_read_geom_layerinfo(fbx_layer):
return (
validate_blend_names(elem_find_first_string_as_bytes(fbx_layer, b'Name')),
elem_find_first_string_as_bytes(fbx_layer, b'MappingInformationType'),
elem_find_first_string_as_bytes(fbx_layer, b'ReferenceInformationType'),
)
def blen_read_geom_array_setattr(generator, blen_data, blen_attr, fbx_data, stride, item_size, descr, xform):
"""Generic fbx_layer to blen_data setter, generator is expected to yield tuples (ble_idx, fbx_idx)."""
max_idx = len(blen_data) - 1
print_error = True
def check_skip(blen_idx, fbx_idx):
nonlocal print_error
if fbx_idx < 0: # Negative values mean 'skip'.
return True
if blen_idx > max_idx:
if print_error:
print("ERROR: too much data in this layer, compared to elements in mesh, skipping!")
print_error = False
return True
return False
if xform is not None:
if isinstance(blen_data, list):
if item_size == 1:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
blen_data[blen_idx] = xform(fbx_data[fbx_idx])
else:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
blen_data[blen_idx] = xform(fbx_data[fbx_idx:fbx_idx + item_size])
else:
if item_size == 1:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
setattr(blen_data[blen_idx], blen_attr, xform(fbx_data[fbx_idx]))
else:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
setattr(blen_data[blen_idx], blen_attr, xform(fbx_data[fbx_idx:fbx_idx + item_size]))
else:
if isinstance(blen_data, list):
if item_size == 1:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
blen_data[blen_idx] = fbx_data[fbx_idx]
else:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
blen_data[blen_idx] = fbx_data[fbx_idx:fbx_idx + item_size]
else:
if item_size == 1:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
setattr(blen_data[blen_idx], blen_attr, fbx_data[fbx_idx])
else:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
setattr(blen_data[blen_idx], blen_attr, fbx_data[fbx_idx:fbx_idx + item_size])
for blen_idx, fbx_idx in generator:
if check_skip(blen_idx, fbx_idx):
continue
_process(blen_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx)
# generic generators.
def blen_read_geom_array_gen_allsame(data_len):
return zip(*(range(data_len), (0,) * data_len))
def blen_read_geom_array_gen_direct(fbx_data, stride):
fbx_data_len = len(fbx_data)
return zip(*(range(fbx_data_len // stride), range(0, fbx_data_len, stride)))
def blen_read_geom_array_gen_indextodirect(fbx_layer_index, stride):
return ((bi, fi * stride) for bi, fi in enumerate(fbx_layer_index))
def blen_read_geom_array_gen_direct_looptovert(mesh, fbx_data, stride):
fbx_data_len = len(fbx_data) // stride
loops = mesh.loops
for p in mesh.polygons:
for lidx in p.loop_indices:
vidx = loops[lidx].vertex_index
if vidx < fbx_data_len:
yield lidx, vidx * stride
# generic error printers.
def blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet=False):
if not quiet:
print("warning layer %r mapping type unsupported: %r" % (descr, fbx_layer_mapping))
def blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet=False):
if not quiet:
print("warning layer %r ref type unsupported: %r" % (descr, fbx_layer_ref))
def blen_read_geom_array_mapped_vert(
mesh, blen_data, blen_attr,
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
stride, item_size, descr,
xform=None, quiet=False,
):
if fbx_layer_mapping == b'ByVertice':
if fbx_layer_ref == b'Direct':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
elif fbx_layer_mapping == b'AllSame':
if fbx_layer_ref == b'IndexToDirect':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_allsame(len(blen_data)),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
else:
blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet)
return False
def blen_read_geom_array_mapped_edge(
mesh, blen_data, blen_attr,
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
stride, item_size, descr,
xform=None, quiet=False,
):
if fbx_layer_mapping == b'ByEdge':
if fbx_layer_ref == b'Direct':
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
elif fbx_layer_mapping == b'AllSame':
if fbx_layer_ref == b'IndexToDirect':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_allsame(len(blen_data)),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
else:
blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet)
return False
def blen_read_geom_array_mapped_polygon(
mesh, blen_data, blen_attr,
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
stride, item_size, descr,
xform=None, quiet=False,
):
if fbx_layer_mapping == b'ByPolygon':
if fbx_layer_ref == b'IndexToDirect':
# XXX Looks like we often get no fbx_layer_index in this case, shall not happen but happens...
# We fallback to 'Direct' mapping in this case.
#~ assert(fbx_layer_index is not None)
if fbx_layer_index is None:
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
else:
blen_read_geom_array_setattr(blen_read_geom_array_gen_indextodirect(fbx_layer_index, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
elif fbx_layer_ref == b'Direct':
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
elif fbx_layer_mapping == b'AllSame':
if fbx_layer_ref == b'IndexToDirect':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_allsame(len(blen_data)),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
else:
blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet)
return False
def blen_read_geom_array_mapped_polyloop(
mesh, blen_data, blen_attr,
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
stride, item_size, descr,
xform=None, quiet=False,
):
if fbx_layer_mapping == b'ByPolygonVertex':
if fbx_layer_ref == b'IndexToDirect':
# XXX Looks like we often get no fbx_layer_index in this case, shall not happen but happens...
# We fallback to 'Direct' mapping in this case.
#~ assert(fbx_layer_index is not None)
if fbx_layer_index is None:
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
else:
blen_read_geom_array_setattr(blen_read_geom_array_gen_indextodirect(fbx_layer_index, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
elif fbx_layer_ref == b'Direct':
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
elif fbx_layer_mapping == b'ByVertice':
if fbx_layer_ref == b'Direct':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct_looptovert(mesh, fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
elif fbx_layer_mapping == b'AllSame':
if fbx_layer_ref == b'IndexToDirect':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_allsame(len(blen_data)),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
else:
blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet)
return False
def blen_read_geom_layer_material(fbx_obj, mesh):
fbx_layer = elem_find_first(fbx_obj, b'LayerElementMaterial')
if fbx_layer is None:
return
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
layer_id = b'Materials'
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id))
blen_data = mesh.polygons
blen_read_geom_array_mapped_polygon(
mesh, blen_data, "material_index",
fbx_layer_data, None,
fbx_layer_mapping, fbx_layer_ref,
1, 1, layer_id,
)
def blen_read_geom_layer_uv(fbx_obj, mesh):
for layer_id in (b'LayerElementUV',):
for fbx_layer in elem_find_iter(fbx_obj, layer_id):
# all should be valid
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, b'UV'))
fbx_layer_index = elem_prop_first(elem_find_first(fbx_layer, b'UVIndex'))
# Always init our new layers with (0, 0) UVs.
uv_lay = mesh.uv_layers.new(name=fbx_layer_name, do_init=False)
if uv_lay is None:
print("Failed to add {%r %r} UVLayer to %r (probably too many of them?)"
"" % (layer_id, fbx_layer_name, mesh.name))
continue
blen_data = uv_lay.data
# some valid files omit this data
if fbx_layer_data is None:
print("%r %r missing data" % (layer_id, fbx_layer_name))
continue
blen_read_geom_array_mapped_polyloop(
mesh, blen_data, "uv",
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
2, 2, layer_id,
)
def blen_read_geom_layer_color(fbx_obj, mesh):
# almost same as UV's
for layer_id in (b'LayerElementColor',):
for fbx_layer in elem_find_iter(fbx_obj, layer_id):
# all should be valid
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, b'Colors'))
fbx_layer_index = elem_prop_first(elem_find_first(fbx_layer, b'ColorIndex'))
# Always init our new layers with full white opaque color.
color_lay = mesh.vertex_colors.new(name=fbx_layer_name, do_init=False)
blen_data = color_lay.data
# some valid files omit this data
if fbx_layer_data is None:
print("%r %r missing data" % (layer_id, fbx_layer_name))
continue
blen_read_geom_array_mapped_polyloop(
mesh, blen_data, "color",
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
4, 4, layer_id,
)
def blen_read_geom_layer_smooth(fbx_obj, mesh):
fbx_layer = elem_find_first(fbx_obj, b'LayerElementSmoothing')
if fbx_layer is None:
return False
# all should be valid
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
layer_id = b'Smoothing'
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id))
# udk has 'Direct' mapped, with no Smoothing, not sure why, but ignore these
if fbx_layer_data is None:
return False
if fbx_layer_mapping == b'ByEdge':
# some models have bad edge data, we cant use this info...
if not mesh.edges:
print("warning skipping sharp edges data, no valid edges...")
return False
blen_data = mesh.edges
blen_read_geom_array_mapped_edge(
mesh, blen_data, "use_edge_sharp",
fbx_layer_data, None,
fbx_layer_mapping, fbx_layer_ref,
1, 1, layer_id,
xform=lambda s: not s,
)
# We only set sharp edges here, not face smoothing itself...
mesh.use_auto_smooth = True
return False
elif fbx_layer_mapping == b'ByPolygon':
blen_data = mesh.polygons
return blen_read_geom_array_mapped_polygon(
mesh, blen_data, "use_smooth",
fbx_layer_data, None,
fbx_layer_mapping, fbx_layer_ref,
1, 1, layer_id,
xform=lambda s: (s != 0), # smoothgroup bitflags, treat as booleans for now
)
else:
print("warning layer %r mapping type unsupported: %r" % (fbx_layer.id, fbx_layer_mapping))
return False
def blen_read_geom_layer_edge_crease(fbx_obj, mesh):
from math import sqrt
fbx_layer = elem_find_first(fbx_obj, b'LayerElementEdgeCrease')
if fbx_layer is None:
return False
# all should be valid
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
if fbx_layer_mapping != b'ByEdge':
return False
layer_id = b'EdgeCrease'
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id))
# some models have bad edge data, we cant use this info...
if not mesh.edges:
print("warning skipping edge crease data, no valid edges...")
return False
if fbx_layer_mapping == b'ByEdge':
# some models have bad edge data, we cant use this info...
if not mesh.edges:
print("warning skipping edge crease data, no valid edges...")
return False
blen_data = mesh.edges
return blen_read_geom_array_mapped_edge(
mesh, blen_data, "crease",
fbx_layer_data, None,
fbx_layer_mapping, fbx_layer_ref,
1, 1, layer_id,
# Blender squares those values before sending them to OpenSubdiv, when other softwares don't,
# so we need to compensate that to get similar results through FBX...
xform=sqrt,
)
else:
print("warning layer %r mapping type unsupported: %r" % (fbx_layer.id, fbx_layer_mapping))
return False
def blen_read_geom_layer_normal(fbx_obj, mesh, xform=None):
fbx_layer = elem_find_first(fbx_obj, b'LayerElementNormal')
if fbx_layer is None:
return False
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
layer_id = b'Normals'
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id))
fbx_layer_index = elem_prop_first(elem_find_first(fbx_layer, b'NormalsIndex'))
# try loops, then vertices.
tries = ((mesh.loops, "Loops", False, blen_read_geom_array_mapped_polyloop),
(mesh.polygons, "Polygons", True, blen_read_geom_array_mapped_polygon),
(mesh.vertices, "Vertices", True, blen_read_geom_array_mapped_vert))
for blen_data, blen_data_type, is_fake, func in tries:
bdata = [None] * len(blen_data) if is_fake else blen_data
if func(mesh, bdata, "normal",
fbx_layer_data, fbx_layer_index, fbx_layer_mapping, fbx_layer_ref, 3, 3, layer_id, xform, True):
if blen_data_type == "Polygons":
for pidx, p in enumerate(mesh.polygons):
for lidx in range(p.loop_start, p.loop_start + p.loop_total):
mesh.loops[lidx].normal[:] = bdata[pidx]
elif blen_data_type == "Vertices":
# We have to copy vnors to lnors! Far from elegant, but simple.
for l in mesh.loops:
l.normal[:] = bdata[l.vertex_index]
return True
blen_read_geom_array_error_mapping("normal", fbx_layer_mapping)
blen_read_geom_array_error_ref("normal", fbx_layer_ref)
return False
def blen_read_geom(fbx_tmpl, fbx_obj, settings):
from itertools import chain
import array
# Vertices are in object space, but we are post-multiplying all transforms with the inverse of the
# global matrix, so we need to apply the global matrix to the vertices to get the correct result.
geom_mat_co = settings.global_matrix if settings.bake_space_transform else None
# We need to apply the inverse transpose of the global matrix when transforming normals.
geom_mat_no = Matrix(settings.global_matrix_inv_transposed) if settings.bake_space_transform else None
if geom_mat_no is not None:
# Remove translation & scaling!
geom_mat_no.translation = Vector()
geom_mat_no.normalize()
# TODO, use 'fbx_tmpl'
elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'Geometry')
fbx_verts = elem_prop_first(elem_find_first(fbx_obj, b'Vertices'))
fbx_polys = elem_prop_first(elem_find_first(fbx_obj, b'PolygonVertexIndex'))
fbx_edges = elem_prop_first(elem_find_first(fbx_obj, b'Edges'))
if geom_mat_co is not None:
def _vcos_transformed_gen(raw_cos, m=None):
# Note: we could most likely get much better performances with numpy, but will leave this as TODO for now.
return chain(*(m @ Vector(v) for v in zip(*(iter(raw_cos),) * 3)))
fbx_verts = array.array(fbx_verts.typecode, _vcos_transformed_gen(fbx_verts, geom_mat_co))
if fbx_verts is None:
fbx_verts = ()
if fbx_polys is None:
fbx_polys = ()
mesh = bpy.data.meshes.new(name=elem_name_utf8)
mesh.vertices.add(len(fbx_verts) // 3)
mesh.vertices.foreach_set("co", fbx_verts)
if fbx_polys:
mesh.loops.add(len(fbx_polys))
poly_loop_starts = []
poly_loop_totals = []
poly_loop_prev = 0
for i, l in enumerate(mesh.loops):
index = fbx_polys[i]
if index < 0:
poly_loop_starts.append(poly_loop_prev)
poly_loop_totals.append((i - poly_loop_prev) + 1)
poly_loop_prev = i + 1
index ^= -1
l.vertex_index = index
mesh.polygons.add(len(poly_loop_starts))
mesh.polygons.foreach_set("loop_start", poly_loop_starts)
mesh.polygons.foreach_set("loop_total", poly_loop_totals)
blen_read_geom_layer_material(fbx_obj, mesh)
blen_read_geom_layer_uv(fbx_obj, mesh)
blen_read_geom_layer_color(fbx_obj, mesh)
if fbx_edges:
# edges in fact index the polygons (NOT the vertices)
import array
tot_edges = len(fbx_edges)
edges_conv = array.array('i', [0]) * (tot_edges * 2)
edge_index = 0
for i in fbx_edges:
e_a = fbx_polys[i]
if e_a >= 0:
e_b = fbx_polys[i + 1]
if e_b < 0:
e_b ^= -1
else:
# Last index of polygon, wrap back to the start.
# ideally we wouldn't have to search back,
# but it should only be 2-3 iterations.
j = i - 1
while j >= 0 and fbx_polys[j] >= 0:
j -= 1
e_a ^= -1
e_b = fbx_polys[j + 1]
edges_conv[edge_index] = e_a
edges_conv[edge_index + 1] = e_b
edge_index += 2
mesh.edges.add(tot_edges)
mesh.edges.foreach_set("vertices", edges_conv)
# must be after edge, face loading.
ok_smooth = blen_read_geom_layer_smooth(fbx_obj, mesh)
ok_crease = blen_read_geom_layer_edge_crease(fbx_obj, mesh)
ok_normals = False
if settings.use_custom_normals:
# Note: we store 'temp' normals in loops, since validate() may alter final mesh,
# we can only set custom lnors *after* calling it.
mesh.create_normals_split()
if geom_mat_no is None:
ok_normals = blen_read_geom_layer_normal(fbx_obj, mesh)
else:
def nortrans(v):
return geom_mat_no @ Vector(v)
ok_normals = blen_read_geom_layer_normal(fbx_obj, mesh, nortrans)
mesh.validate(clean_customdata=False) # *Very* important to not remove lnors here!
if ok_normals:
clnors = array.array('f', [0.0] * (len(mesh.loops) * 3))
mesh.loops.foreach_get("normal", clnors)
if not ok_smooth:
mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons))
ok_smooth = True
mesh.normals_split_custom_set(tuple(zip(*(iter(clnors),) * 3)))
mesh.use_auto_smooth = True
else:
mesh.calc_normals()
if settings.use_custom_normals:
mesh.free_normals_split()
if not ok_smooth:
mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons))
if ok_crease:
mesh.use_customdata_edge_crease = True
if settings.use_custom_props:
blen_read_custom_properties(fbx_obj, mesh, settings)
return mesh
def blen_read_shape(fbx_tmpl, fbx_sdata, fbx_bcdata, meshes, scene):
elem_name_utf8 = elem_name_ensure_class(fbx_sdata, b'Geometry')
indices = elem_prop_first(elem_find_first(fbx_sdata, b'Indexes'), default=())
dvcos = tuple(co for co in zip(*[iter(elem_prop_first(elem_find_first(fbx_sdata, b'Vertices'), default=()))] * 3))
# We completely ignore normals here!
weight = elem_prop_first(elem_find_first(fbx_bcdata, b'DeformPercent'), default=100.0) / 100.0
vgweights = tuple(vgw / 100.0 for vgw in elem_prop_first(elem_find_first(fbx_bcdata, b'FullWeights'), default=()))
# Special case, in case all weights are the same, FullWeight can have only one element - *sigh!*
nbr_indices = len(indices)
if len(vgweights) == 1 and nbr_indices > 1:
vgweights = (vgweights[0],) * nbr_indices
assert(len(vgweights) == nbr_indices == len(dvcos))
create_vg = bool(set(vgweights) - {1.0})
keyblocks = []
for me, objects in meshes:
vcos = tuple((idx, me.vertices[idx].co + Vector(dvco)) for idx, dvco in zip(indices, dvcos))
objects = list({node.bl_obj for node in objects})
assert(objects)
if me.shape_keys is None:
objects[0].shape_key_add(name="Basis", from_mix=False)
kb = objects[0].shape_key_add(name=elem_name_utf8, from_mix=False)
me.shape_keys.use_relative = True # Should already be set as such.
for idx, co in vcos:
kb.data[idx].co[:] = co
kb.value = weight
# Add vgroup if necessary.
if create_vg:
vgoups = add_vgroup_to_objects(indices, vgweights, kb.name, objects)
kb.vertex_group = kb.name
keyblocks.append(kb)
return keyblocks
# --------
# Material
def blen_read_material(fbx_tmpl, fbx_obj, settings):
from bpy_extras import node_shader_utils
from math import sqrt
elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'Material')
nodal_material_wrap_map = settings.nodal_material_wrap_map
ma = bpy.data.materials.new(name=elem_name_utf8)
const_color_white = 1.0, 1.0, 1.0
const_color_black = 0.0, 0.0, 0.0
fbx_props = (elem_find_first(fbx_obj, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
fbx_props_no_template = (fbx_props[0], fbx_elem_nil)
ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=False, use_nodes=True)
ma_wrap.base_color = elem_props_get_color_rgb(fbx_props, b'DiffuseColor', const_color_white)
# No specular color in Principled BSDF shader, assumed to be either white or take some tint from diffuse one...
# TODO: add way to handle tint option (guesstimate from spec color + intensity...)?
ma_wrap.specular = elem_props_get_number(fbx_props, b'SpecularFactor', 0.25) * 2.0
# XXX Totally empirical conversion, trying to adapt it
# (from 1.0 - 0.0 Principled BSDF range to 0.0 - 100.0 FBX shininess range)...
fbx_shininess = elem_props_get_number(fbx_props, b'Shininess', 20.0)
ma_wrap.roughness = 1.0 - (sqrt(fbx_shininess) / 10.0)
# Sweetness... Looks like we are not the only ones to not know exactly how FBX is supposed to work (see T59850).
# According to one of its developers, Unity uses that formula to extract alpha value:
#
# alpha = 1 - TransparencyFactor
# if (alpha == 1 or alpha == 0):
# alpha = 1 - TransparentColor.r
#
# Until further info, let's assume this is correct way to do, hence the following code for TransparentColor.
# However, there are some cases (from 3DSMax, see T65065), where we do have TransparencyFactor only defined
# in the template to 0.0, and then materials defining TransparentColor to pure white (1.0, 1.0, 1.0),
# and setting alpha value in Opacity... try to cope with that too. :((((
alpha = 1.0 - elem_props_get_number(fbx_props, b'TransparencyFactor', 0.0)
if (alpha == 1.0 or alpha == 0.0):
alpha = elem_props_get_number(fbx_props_no_template, b'Opacity', None)
if alpha is None:
alpha = 1.0 - elem_props_get_color_rgb(fbx_props, b'TransparentColor', const_color_black)[0]
ma_wrap.alpha = alpha
ma_wrap.metallic = elem_props_get_number(fbx_props, b'ReflectionFactor', 0.0)
# We have no metallic (a.k.a. reflection) color...
# elem_props_get_color_rgb(fbx_props, b'ReflectionColor', const_color_white)
ma_wrap.normalmap_strength = elem_props_get_number(fbx_props, b'BumpFactor', 1.0)
# For emission color we can take into account the factor, but only for default values, not in case of texture.
emission_factor = elem_props_get_number(fbx_props, b'EmissiveFactor', 1.0)
ma_wrap.emission_color = [c * emission_factor
for c in elem_props_get_color_rgb(fbx_props, b'EmissiveColor', const_color_black)]
nodal_material_wrap_map[ma] = ma_wrap
if settings.use_custom_props:
blen_read_custom_properties(fbx_obj, ma, settings)
return ma
# -------
# Image & Texture
def blen_read_texture_image(fbx_tmpl, fbx_obj, basedir, settings):
import os
from bpy_extras import image_utils
def pack_data_from_content(image, fbx_obj):
data = elem_find_first_bytes(fbx_obj, b'Content')
if (data):
data_len = len(data)
if (data_len):
image.pack(data=data, data_len=data_len)
elem_name_utf8 = elem_name_ensure_classes(fbx_obj, {b'Texture', b'Video'})
image_cache = settings.image_cache
# Yet another beautiful logic demonstration by Master FBX:
# * RelativeFilename in both Video and Texture nodes.
# * FileName in texture nodes.
# * Filename in video nodes.
# Aaaaaaaarrrrrrrrgggggggggggg!!!!!!!!!!!!!!
filepath = elem_find_first_string(fbx_obj, b'RelativeFilename')
if filepath:
# Make sure we do handle a relative path, and not an absolute one (see D5143).
filepath = filepath.lstrip(os.path.sep).lstrip(os.path.altsep)
filepath = os.path.join(basedir, filepath)
else:
filepath = elem_find_first_string(fbx_obj, b'FileName')
if not filepath:
filepath = elem_find_first_string(fbx_obj, b'Filename')
if not filepath:
print("Error, could not find any file path in ", fbx_obj)
print(" Falling back to: ", elem_name_utf8)
filepath = elem_name_utf8
else :
filepath = filepath.replace('\\', '/') if (os.sep == '/') else filepath.replace('/', '\\')
image = image_cache.get(filepath)
if image is not None:
# Data is only embedded once, we may have already created the image but still be missing its data!
if not image.has_data:
pack_data_from_content(image, fbx_obj)
return image
image = image_utils.load_image(
filepath,
dirname=basedir,
place_holder=True,
recursive=settings.use_image_search,
)
# Try to use embedded data, if available!
pack_data_from_content(image, fbx_obj)
image_cache[filepath] = image
# name can be ../a/b/c
image.name = os.path.basename(elem_name_utf8)
if settings.use_custom_props:
blen_read_custom_properties(fbx_obj, image, settings)
return image
def blen_read_camera(fbx_tmpl, fbx_obj, global_scale):
# meters to inches
M2I = 0.0393700787
elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'NodeAttribute')
fbx_props = (elem_find_first(fbx_obj, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
camera = bpy.data.cameras.new(name=elem_name_utf8)
camera.type = 'ORTHO' if elem_props_get_enum(fbx_props, b'CameraProjectionType', 0) == 1 else 'PERSP'
camera.lens = elem_props_get_number(fbx_props, b'FocalLength', 35.0)
camera.sensor_width = elem_props_get_number(fbx_props, b'FilmWidth', 32.0 * M2I) / M2I
camera.sensor_height = elem_props_get_number(fbx_props, b'FilmHeight', 32.0 * M2I) / M2I
camera.ortho_scale = elem_props_get_number(fbx_props, b'OrthoZoom', 1.0)
filmaspect = camera.sensor_width / camera.sensor_height
# film offset
camera.shift_x = elem_props_get_number(fbx_props, b'FilmOffsetX', 0.0) / (M2I * camera.sensor_width)
camera.shift_y = elem_props_get_number(fbx_props, b'FilmOffsetY', 0.0) / (M2I * camera.sensor_height * filmaspect)
camera.clip_start = elem_props_get_number(fbx_props, b'NearPlane', 0.01) * global_scale
camera.clip_end = elem_props_get_number(fbx_props, b'FarPlane', 100.0) * global_scale
return camera
def blen_read_light(fbx_tmpl, fbx_obj, global_scale):
import math
elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'NodeAttribute')
fbx_props = (elem_find_first(fbx_obj, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
light_type = {
0: 'POINT',
1: 'SUN',
2: 'SPOT'}.get(elem_props_get_enum(fbx_props, b'LightType', 0), 'POINT')
lamp = bpy.data.lights.new(name=elem_name_utf8, type=light_type)
if light_type == 'SPOT':
spot_size = elem_props_get_number(fbx_props, b'OuterAngle', None)
if spot_size is None:
# Deprecated.
spot_size = elem_props_get_number(fbx_props, b'Cone angle', 45.0)
lamp.spot_size = math.radians(spot_size)
spot_blend = elem_props_get_number(fbx_props, b'InnerAngle', None)
if spot_blend is None:
# Deprecated.
spot_blend = elem_props_get_number(fbx_props, b'HotSpot', 45.0)
lamp.spot_blend = 1.0 - (spot_blend / spot_size)
# TODO, cycles nodes???
lamp.color = elem_props_get_color_rgb(fbx_props, b'Color', (1.0, 1.0, 1.0))
lamp.energy = elem_props_get_number(fbx_props, b'Intensity', 100.0) / 100.0
lamp.distance = elem_props_get_number(fbx_props, b'DecayStart', 25.0) * global_scale
lamp.use_shadow = elem_props_get_bool(fbx_props, b'CastShadow', True)
if hasattr(lamp, "cycles"):
lamp.cycles.cast_shadow = lamp.use_shadow
# Keeping this for now, but this is not used nor exposed anymore afaik...
lamp.shadow_color = elem_props_get_color_rgb(fbx_props, b'ShadowColor', (0.0, 0.0, 0.0))
return lamp
# ### Import Utility class
class FbxImportHelperNode:
"""
Temporary helper node to store a hierarchy of fbxNode objects before building Objects, Armatures and Bones.
It tries to keep the correction data in one place so it can be applied consistently to the imported data.
"""
__slots__ = (
'_parent', 'anim_compensation_matrix', 'is_global_animation', 'armature_setup', 'armature', 'bind_matrix',
'bl_bone', 'bl_data', 'bl_obj', 'bone_child_matrix', 'children', 'clusters',
'fbx_elem', 'fbx_name', 'fbx_transform_data', 'fbx_type',
'is_armature', 'has_bone_children', 'is_bone', 'is_root', 'is_leaf',
'matrix', 'matrix_as_parent', 'matrix_geom', 'meshes', 'post_matrix', 'pre_matrix')
def __init__(self, fbx_elem, bl_data, fbx_transform_data, is_bone):
self.fbx_name = elem_name_ensure_class(fbx_elem, b'Model') if fbx_elem else 'Unknown'
self.fbx_type = fbx_elem.props[2] if fbx_elem else None
self.fbx_elem = fbx_elem
self.bl_obj = None
self.bl_data = bl_data
self.bl_bone = None # Name of bone if this is a bone (this may be different to fbx_name if there was a name conflict in Blender!)
self.fbx_transform_data = fbx_transform_data
self.is_root = False
self.is_bone = is_bone
self.is_armature = False
self.armature = None # For bones only, relevant armature node.
self.has_bone_children = False # True if the hierarchy below this node contains bones, important to support mixed hierarchies.
self.is_leaf = False # True for leaf-bones added to the end of some bone chains to set the lengths.
self.pre_matrix = None # correction matrix that needs to be applied before the FBX transform
self.bind_matrix = None # for bones this is the matrix used to bind to the skin
if fbx_transform_data:
self.matrix, self.matrix_as_parent, self.matrix_geom = blen_read_object_transform_do(fbx_transform_data)
else:
self.matrix, self.matrix_as_parent, self.matrix_geom = (None, None, None)
self.post_matrix = None # correction matrix that needs to be applied after the FBX transform
self.bone_child_matrix = None # Objects attached to a bone end not the beginning, this matrix corrects for that
# XXX Those two are to handle the fact that rigged meshes are not linked to their armature in FBX, which implies
# that their animation is in global space (afaik...).
# This is actually not really solvable currently, since anim_compensation_matrix is not valid if armature
# itself is animated (we'd have to recompute global-to-local anim_compensation_matrix for each frame,
# and for each armature action... beyond being an insane work).
# Solution for now: do not read rigged meshes animations at all! sic...
self.anim_compensation_matrix = None # a mesh moved in the hierarchy may have a different local matrix. This compensates animations for this.
self.is_global_animation = False
self.meshes = None # List of meshes influenced by this bone.
self.clusters = [] # Deformer Cluster nodes
self.armature_setup = {} # mesh and armature matrix when the mesh was bound
self._parent = None
self.children = []
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, value):
if self._parent is not None:
self._parent.children.remove(self)
self._parent = value
if self._parent is not None:
self._parent.children.append(self)
@property
def ignore(self):
# Separating leaf status from ignore status itself.
# Currently they are equivalent, but this may change in future.
return self.is_leaf
def __repr__(self):
if self.fbx_elem:
return self.fbx_elem.props[1].decode()
else:
return "None"
def print_info(self, indent=0):
print(" " * indent + (self.fbx_name if self.fbx_name else "(Null)")
+ ("[root]" if self.is_root else "")
+ ("[leaf]" if self.is_leaf else "")
+ ("[ignore]" if self.ignore else "")
+ ("[armature]" if self.is_armature else "")
+ ("[bone]" if self.is_bone else "")
+ ("[HBC]" if self.has_bone_children else "")
)
for c in self.children:
c.print_info(indent + 1)
def mark_leaf_bones(self):
if self.is_bone and len(self.children) == 1:
child = self.children[0]
if child.is_bone and len(child.children) == 0:
child.is_leaf = True
for child in self.children:
child.mark_leaf_bones()
def do_bake_transform(self, settings):
return (settings.bake_space_transform and self.fbx_type in (b'Mesh', b'Null') and
not self.is_armature and not self.is_bone)
def find_correction_matrix(self, settings, parent_correction_inv=None):
from bpy_extras.io_utils import axis_conversion
if self.parent and (self.parent.is_root or self.parent.do_bake_transform(settings)):
self.pre_matrix = settings.global_matrix
if parent_correction_inv:
self.pre_matrix = parent_correction_inv @ (self.pre_matrix if self.pre_matrix else Matrix())
correction_matrix = None
if self.is_bone:
if settings.automatic_bone_orientation:
# find best orientation to align bone with
bone_children = tuple(child for child in self.children if child.is_bone)
if len(bone_children) == 0:
# no children, inherit the correction from parent (if possible)
if self.parent and self.parent.is_bone:
correction_matrix = parent_correction_inv.inverted() if parent_correction_inv else None
else:
# else find how best to rotate the bone to align the Y axis with the children
best_axis = (1, 0, 0)
if len(bone_children) == 1:
vec = bone_children[0].get_bind_matrix().to_translation()
best_axis = Vector((0, 0, 1 if vec[2] >= 0 else -1))
if abs(vec[0]) > abs(vec[1]):
if abs(vec[0]) > abs(vec[2]):
best_axis = Vector((1 if vec[0] >= 0 else -1, 0, 0))
elif abs(vec[1]) > abs(vec[2]):
best_axis = Vector((0, 1 if vec[1] >= 0 else -1, 0))
else:
# get the child directions once because they may be checked several times
child_locs = (child.get_bind_matrix().to_translation() for child in bone_children)
child_locs = tuple(loc.normalized() for loc in child_locs if loc.magnitude > 0.0)
# I'm not sure which one I like better...
if False:
best_angle = -1.0
for i in range(6):
a = i // 2
s = -1 if i % 2 == 1 else 1
test_axis = Vector((s if a == 0 else 0, s if a == 1 else 0, s if a == 2 else 0))
# find max angle to children
max_angle = 1.0
for loc in child_locs:
max_angle = min(max_angle, test_axis.dot(loc))
# is it better than the last one?
if best_angle < max_angle:
best_angle = max_angle
best_axis = test_axis
else:
best_angle = -1.0
for vec in child_locs:
test_axis = Vector((0, 0, 1 if vec[2] >= 0 else -1))
if abs(vec[0]) > abs(vec[1]):
if abs(vec[0]) > abs(vec[2]):
test_axis = Vector((1 if vec[0] >= 0 else -1, 0, 0))
elif abs(vec[1]) > abs(vec[2]):
test_axis = Vector((0, 1 if vec[1] >= 0 else -1, 0))
# find max angle to children
max_angle = 1.0
for loc in child_locs:
max_angle = min(max_angle, test_axis.dot(loc))
# is it better than the last one?
if best_angle < max_angle:
best_angle = max_angle
best_axis = test_axis
# convert best_axis to axis string
to_up = 'Z' if best_axis[2] >= 0 else '-Z'
if abs(best_axis[0]) > abs(best_axis[1]):
if abs(best_axis[0]) > abs(best_axis[2]):
to_up = 'X' if best_axis[0] >= 0 else '-X'
elif abs(best_axis[1]) > abs(best_axis[2]):
to_up = 'Y' if best_axis[1] >= 0 else '-Y'
to_forward = 'X' if to_up not in {'X', '-X'} else 'Y'
# Build correction matrix
if (to_up, to_forward) != ('Y', 'X'):
correction_matrix = axis_conversion(from_forward='X',
from_up='Y',
to_forward=to_forward,
to_up=to_up,
).to_4x4()
else:
correction_matrix = settings.bone_correction_matrix
else:
# camera and light can be hard wired
if self.fbx_type == b'Camera':
correction_matrix = MAT_CONVERT_CAMERA
elif self.fbx_type == b'Light':
correction_matrix = MAT_CONVERT_LIGHT
self.post_matrix = correction_matrix
if self.do_bake_transform(settings):
self.post_matrix = settings.global_matrix_inv @ (self.post_matrix if self.post_matrix else Matrix())
# process children
correction_matrix_inv = correction_matrix.inverted_safe() if correction_matrix else None
for child in self.children:
child.find_correction_matrix(settings, correction_matrix_inv)
def find_armature_bones(self, armature):
for child in self.children:
if child.is_bone:
child.armature = armature
child.find_armature_bones(armature)
def find_armatures(self):
needs_armature = False
for child in self.children:
if child.is_bone:
needs_armature = True
break
if needs_armature:
if self.fbx_type in {b'Null', b'Root'}:
# if empty then convert into armature
self.is_armature = True
armature = self
else:
# otherwise insert a new node
# XXX Maybe in case self is virtual FBX root node, we should instead add one armature per bone child?
armature = FbxImportHelperNode(None, None, None, False)
armature.fbx_name = "Armature"
armature.is_armature = True
for child in tuple(self.children):
if child.is_bone:
child.parent = armature
armature.parent = self
armature.find_armature_bones(armature)
for child in self.children:
if child.is_armature or child.is_bone:
continue
child.find_armatures()
def find_bone_children(self):
has_bone_children = False
for child in self.children:
has_bone_children |= child.find_bone_children()
self.has_bone_children = has_bone_children
return self.is_bone or has_bone_children
def find_fake_bones(self, in_armature=False):
if in_armature and not self.is_bone and self.has_bone_children:
self.is_bone = True
# if we are not a null node we need an intermediate node for the data
if self.fbx_type not in {b'Null', b'Root'}:
node = FbxImportHelperNode(self.fbx_elem, self.bl_data, None, False)
self.fbx_elem = None
self.bl_data = None
# transfer children
for child in self.children:
if child.is_bone or child.has_bone_children:
continue
child.parent = node
# attach to parent
node.parent = self
if self.is_armature:
in_armature = True
for child in self.children:
child.find_fake_bones(in_armature)
def get_world_matrix_as_parent(self):
matrix = self.parent.get_world_matrix_as_parent() if self.parent else Matrix()
if self.matrix_as_parent:
matrix = matrix @ self.matrix_as_parent
return matrix
def get_world_matrix(self):
matrix = self.parent.get_world_matrix_as_parent() if self.parent else Matrix()
if self.matrix:
matrix = matrix @ self.matrix
return matrix
def get_matrix(self):
matrix = self.matrix if self.matrix else Matrix()
if self.pre_matrix:
matrix = self.pre_matrix @ matrix
if self.post_matrix:
matrix = matrix @ self.post_matrix
return matrix
def get_bind_matrix(self):
matrix = self.bind_matrix if self.bind_matrix else Matrix()
if self.pre_matrix:
matrix = self.pre_matrix @ matrix
if self.post_matrix:
matrix = matrix @ self.post_matrix
return matrix
def make_bind_pose_local(self, parent_matrix=None):
if parent_matrix is None:
parent_matrix = Matrix()
if self.bind_matrix:
bind_matrix = parent_matrix.inverted_safe() @ self.bind_matrix
else:
bind_matrix = self.matrix.copy() if self.matrix else None
self.bind_matrix = bind_matrix
if bind_matrix:
parent_matrix = parent_matrix @ bind_matrix
for child in self.children:
child.make_bind_pose_local(parent_matrix)
def collect_skeleton_meshes(self, meshes):
for _, m in self.clusters:
meshes.update(m)
for child in self.children:
child.collect_skeleton_meshes(meshes)
def collect_armature_meshes(self):
if self.is_armature:
armature_matrix_inv = self.get_world_matrix().inverted_safe()
meshes = set()
for child in self.children:
# Children meshes may be linked to children armatures, in which case we do not want to link them
# to a parent one. See T70244.
child.collect_armature_meshes()
if not child.meshes:
child.collect_skeleton_meshes(meshes)
for m in meshes:
old_matrix = m.matrix
m.matrix = armature_matrix_inv @ m.get_world_matrix()
m.anim_compensation_matrix = old_matrix.inverted_safe() @ m.matrix
m.is_global_animation = True
m.parent = self
self.meshes = meshes
else:
for child in self.children:
child.collect_armature_meshes()
def build_skeleton(self, arm, parent_matrix, parent_bone_size=1, force_connect_children=False):
def child_connect(par_bone, child_bone, child_head, connect_ctx):
# child_bone or child_head may be None.
force_connect_children, connected = connect_ctx
if child_bone is not None:
child_bone.parent = par_bone
child_head = child_bone.head
if similar_values_iter(par_bone.tail, child_head):
if child_bone is not None:
child_bone.use_connect = True
# Disallow any force-connection at this level from now on, since that child was 'really'
# connected, we do not want to move current bone's tail anymore!
connected = None
elif force_connect_children and connected is not None:
# We only store position where tail of par_bone should be in the end.
# Actual tail moving and force connection of compatible child bones will happen
# once all have been checked.
if connected is ...:
connected = ([child_head.copy(), 1], [child_bone] if child_bone is not None else [])
else:
connected[0][0] += child_head
connected[0][1] += 1
if child_bone is not None:
connected[1].append(child_bone)
connect_ctx[1] = connected
def child_connect_finalize(par_bone, connect_ctx):
force_connect_children, connected = connect_ctx
# Do nothing if force connection is not enabled!
if force_connect_children and connected is not None and connected is not ...:
# Here again we have to be wary about zero-length bones!!!
par_tail = connected[0][0] / connected[0][1]
if (par_tail - par_bone.head).magnitude < 1e-2:
par_bone_vec = (par_bone.tail - par_bone.head).normalized()
par_tail = par_bone.head + par_bone_vec * 0.01
par_bone.tail = par_tail
for child_bone in connected[1]:
if similar_values_iter(par_tail, child_bone.head):
child_bone.use_connect = True
# Create the (edit)bone.
bone = arm.bl_data.edit_bones.new(name=self.fbx_name)
bone.select = True
self.bl_obj = arm.bl_obj
self.bl_data = arm.bl_data
self.bl_bone = bone.name # Could be different from the FBX name!
# get average distance to children
bone_size = 0.0
bone_count = 0
for child in self.children:
if child.is_bone:
bone_size += child.get_bind_matrix().to_translation().magnitude
bone_count += 1
if bone_count > 0:
bone_size /= bone_count
else:
bone_size = parent_bone_size
# So that our bone gets its final length, but still Y-aligned in armature space.
# 0-length bones are automatically collapsed into their parent when you leave edit mode,
# so this enforces a minimum length.
bone_tail = Vector((0.0, 1.0, 0.0)) * max(0.01, bone_size)
bone.tail = bone_tail
# And rotate/move it to its final "rest pose".
bone_matrix = parent_matrix @ self.get_bind_matrix().normalized()
bone.matrix = bone_matrix
# Correction for children attached to a bone. FBX expects to attach to the head of a bone,
# while Blender attaches to the tail.
self.bone_child_matrix = Matrix.Translation(-bone_tail)
connect_ctx = [force_connect_children, ...]
for child in self.children:
if child.is_leaf and force_connect_children:
# Arggggggggggggggggg! We do not want to create this bone, but we need its 'virtual head' location
# to orient current one!!!
child_head = (bone_matrix @ child.get_bind_matrix().normalized()).translation
child_connect(bone, None, child_head, connect_ctx)
elif child.is_bone and not child.ignore:
child_bone = child.build_skeleton(arm, bone_matrix, bone_size,
force_connect_children=force_connect_children)
# Connection to parent.
child_connect(bone, child_bone, None, connect_ctx)
child_connect_finalize(bone, connect_ctx)
return bone
def build_node_obj(self, fbx_tmpl, settings):
if self.bl_obj:
return self.bl_obj
if self.is_bone or not self.fbx_elem:
return None
# create when linking since we need object data
elem_name_utf8 = self.fbx_name
# Object data must be created already
self.bl_obj = obj = bpy.data.objects.new(name=elem_name_utf8, object_data=self.bl_data)
fbx_props = (elem_find_first(self.fbx_elem, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
# ----
# Misc Attributes
obj.color[0:3] = elem_props_get_color_rgb(fbx_props, b'Color', (0.8, 0.8, 0.8))
obj.hide_viewport = not bool(elem_props_get_visibility(fbx_props, b'Visibility', 1.0))
obj.matrix_basis = self.get_matrix()
if settings.use_custom_props:
blen_read_custom_properties(self.fbx_elem, obj, settings)
return obj
def build_skeleton_children(self, fbx_tmpl, settings, scene, view_layer):
if self.is_bone:
for child in self.children:
if child.ignore:
continue
child.build_skeleton_children(fbx_tmpl, settings, scene, view_layer)
return None
else:
# child is not a bone
obj = self.build_node_obj(fbx_tmpl, settings)
if obj is None:
return None
for child in self.children:
if child.ignore:
continue
child.build_skeleton_children(fbx_tmpl, settings, scene, view_layer)
# instance in scene
view_layer.active_layer_collection.collection.objects.link(obj)
obj.select_set(True)
return obj
def link_skeleton_children(self, fbx_tmpl, settings, scene):
if self.is_bone:
for child in self.children:
if child.ignore:
continue
child_obj = child.bl_obj
if child_obj and child_obj != self.bl_obj:
child_obj.parent = self.bl_obj # get the armature the bone belongs to
child_obj.parent_bone = self.bl_bone
child_obj.parent_type = 'BONE'
child_obj.matrix_parent_inverse = Matrix()
# Blender attaches to the end of a bone, while FBX attaches to the start.
# bone_child_matrix corrects for that.
if child.pre_matrix:
child.pre_matrix = self.bone_child_matrix @ child.pre_matrix
else:
child.pre_matrix = self.bone_child_matrix
child_obj.matrix_basis = child.get_matrix()
child.link_skeleton_children(fbx_tmpl, settings, scene)
return None
else:
obj = self.bl_obj
for child in self.children:
if child.ignore:
continue
child_obj = child.link_skeleton_children(fbx_tmpl, settings, scene)
if child_obj:
child_obj.parent = obj
return obj
def set_pose_matrix(self, arm):
pose_bone = arm.bl_obj.pose.bones[self.bl_bone]
pose_bone.matrix_basis = self.get_bind_matrix().inverted_safe() @ self.get_matrix()
for child in self.children:
if child.ignore:
continue
if child.is_bone:
child.set_pose_matrix(arm)
def merge_weights(self, combined_weights, fbx_cluster):
indices = elem_prop_first(elem_find_first(fbx_cluster, b'Indexes', default=None), default=())
weights = elem_prop_first(elem_find_first(fbx_cluster, b'Weights', default=None), default=())
for index, weight in zip(indices, weights):
w = combined_weights.get(index)
if w is None:
combined_weights[index] = [weight]
else:
w.append(weight)
def set_bone_weights(self):
ignored_children = tuple(child for child in self.children
if child.is_bone and child.ignore and len(child.clusters) > 0)
if len(ignored_children) > 0:
# If we have an ignored child bone we need to merge their weights into the current bone weights.
# This can happen both intentionally and accidentally when skinning a model. Either way, they
# need to be moved into a parent bone or they cause animation glitches.
for fbx_cluster, meshes in self.clusters:
combined_weights = {}
self.merge_weights(combined_weights, fbx_cluster)
for child in ignored_children:
for child_cluster, child_meshes in child.clusters:
if not meshes.isdisjoint(child_meshes):
self.merge_weights(combined_weights, child_cluster)
# combine child weights
indices = []
weights = []
for i, w in combined_weights.items():
indices.append(i)
if len(w) > 1:
weights.append(sum(w) / len(w))
else:
weights.append(w[0])
add_vgroup_to_objects(indices, weights, self.bl_bone, [node.bl_obj for node in meshes])
# clusters that drive meshes not included in a parent don't need to be merged
all_meshes = set().union(*[meshes for _, meshes in self.clusters])
for child in ignored_children:
for child_cluster, child_meshes in child.clusters:
if all_meshes.isdisjoint(child_meshes):
indices = elem_prop_first(elem_find_first(child_cluster, b'Indexes', default=None), default=())
weights = elem_prop_first(elem_find_first(child_cluster, b'Weights', default=None), default=())
add_vgroup_to_objects(indices, weights, self.bl_bone, [node.bl_obj for node in child_meshes])
else:
# set the vertex weights on meshes
for fbx_cluster, meshes in self.clusters:
indices = elem_prop_first(elem_find_first(fbx_cluster, b'Indexes', default=None), default=())
weights = elem_prop_first(elem_find_first(fbx_cluster, b'Weights', default=None), default=())
add_vgroup_to_objects(indices, weights, self.bl_bone, [node.bl_obj for node in meshes])
for child in self.children:
if child.is_bone and not child.ignore:
child.set_bone_weights()
def build_hierarchy(self, fbx_tmpl, settings, scene, view_layer):
if self.is_armature:
# create when linking since we need object data
elem_name_utf8 = self.fbx_name
self.bl_data = arm_data = bpy.data.armatures.new(name=elem_name_utf8)
# Object data must be created already
self.bl_obj = arm = bpy.data.objects.new(name=elem_name_utf8, object_data=arm_data)
arm.matrix_basis = self.get_matrix()
if self.fbx_elem:
fbx_props = (elem_find_first(self.fbx_elem, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
if settings.use_custom_props:
blen_read_custom_properties(self.fbx_elem, arm, settings)
# instance in scene
view_layer.active_layer_collection.collection.objects.link(arm)
arm.select_set(True)
# Add bones:
# Switch to Edit mode.
view_layer.objects.active = arm
is_hidden = arm.hide_viewport
arm.hide_viewport = False # Can't switch to Edit mode hidden objects...
bpy.ops.object.mode_set(mode='EDIT')
for child in self.children:
if child.ignore:
continue
if child.is_bone:
child.build_skeleton(self, Matrix(), force_connect_children=settings.force_connect_children)
bpy.ops.object.mode_set(mode='OBJECT')
arm.hide_viewport = is_hidden
# Set pose matrix
for child in self.children:
if child.ignore:
continue
if child.is_bone:
child.set_pose_matrix(self)
# Add bone children:
for child in self.children:
if child.ignore:
continue
child_obj = child.build_skeleton_children(fbx_tmpl, settings, scene, view_layer)
return arm
elif self.fbx_elem and not self.is_bone:
obj = self.build_node_obj(fbx_tmpl, settings)
# walk through children
for child in self.children:
child.build_hierarchy(fbx_tmpl, settings, scene, view_layer)
# instance in scene
view_layer.active_layer_collection.collection.objects.link(obj)
obj.select_set(True)
return obj
else:
for child in self.children:
child.build_hierarchy(fbx_tmpl, settings, scene, view_layer)
return None
def link_hierarchy(self, fbx_tmpl, settings, scene):
if self.is_armature:
arm = self.bl_obj
# Link bone children:
for child in self.children:
if child.ignore:
continue
child_obj = child.link_skeleton_children(fbx_tmpl, settings, scene)
if child_obj:
child_obj.parent = arm
# Add armature modifiers to the meshes
if self.meshes:
for mesh in self.meshes:
(mmat, amat) = mesh.armature_setup[self]
me_obj = mesh.bl_obj
# bring global armature & mesh matrices into *Blender* global space.
# Note: Usage of matrix_geom (local 'diff' transform) here is quite brittle.
# Among other things, why in hell isn't it taken into account by bindpose & co???
# Probably because org app (max) handles it completely aside from any parenting stuff,
# which we obviously cannot do in Blender. :/
if amat is None:
amat = self.bind_matrix
amat = settings.global_matrix @ (Matrix() if amat is None else amat)
if self.matrix_geom:
amat = amat @ self.matrix_geom
mmat = settings.global_matrix @ mmat
if mesh.matrix_geom:
mmat = mmat @ mesh.matrix_geom
# Now that we have armature and mesh in there (global) bind 'state' (matrix),
# we can compute inverse parenting matrix of the mesh.
me_obj.matrix_parent_inverse = amat.inverted_safe() @ mmat @ me_obj.matrix_basis.inverted_safe()
mod = mesh.bl_obj.modifiers.new(arm.name, 'ARMATURE')
mod.object = arm
# Add bone weights to the deformers
for child in self.children:
if child.ignore:
continue
if child.is_bone:
child.set_bone_weights()
return arm
elif self.bl_obj:
obj = self.bl_obj
# walk through children
for child in self.children:
child_obj = child.link_hierarchy(fbx_tmpl, settings, scene)
if child_obj:
child_obj.parent = obj
return obj
else:
for child in self.children:
child.link_hierarchy(fbx_tmpl, settings, scene)
return None
def load(operator, context, filepath="",
use_manual_orientation=False,
axis_forward='-Z',
axis_up='Y',
global_scale=1.0,
bake_space_transform=False,
use_custom_normals=True,
use_image_search=False,
use_alpha_decals=False,
decal_offset=0.0,
use_anim=True,
anim_offset=1.0,
use_subsurf=False,
use_custom_props=True,
use_custom_props_enum_as_string=True,
ignore_leaf_bones=False,
force_connect_children=False,
automatic_bone_orientation=False,
primary_bone_axis='Y',
secondary_bone_axis='X',
use_prepost_rot=True):
global fbx_elem_nil
fbx_elem_nil = FBXElem('', (), (), ())
import os
import time
from bpy_extras.io_utils import axis_conversion
from . import parse_fbx
from .fbx_utils import RIGHT_HAND_AXES, FBX_FRAMERATES
start_time_proc = time.process_time()
start_time_sys = time.time()
perfmon = PerfMon()
perfmon.level_up()
perfmon.step("FBX Import: start importing %s" % filepath)
perfmon.level_up()
# Detect ASCII files.
# Typically it's bad practice to fail silently on any error,
# however the file may fail to read for many reasons,
# and this situation is handled later in the code,
# right now we only want to know if the file successfully reads as ascii.
try:
with open(filepath, 'r', encoding="utf-8") as fh:
fh.read(24)
is_ascii = True
except Exception:
is_ascii = False
if is_ascii:
operator.report({'ERROR'}, "ASCII FBX files are not supported %r" % filepath)
return {'CANCELLED'}
del is_ascii
# End ascii detection.
try:
elem_root, version = parse_fbx.parse(filepath)
except Exception as e:
import traceback
traceback.print_exc()
operator.report({'ERROR'}, "Couldn't open file %r (%s)" % (filepath, e))
return {'CANCELLED'}
if version < 7100:
operator.report({'ERROR'}, "Version %r unsupported, must be %r or later" % (version, 7100))
return {'CANCELLED'}
print("FBX version: %r" % version)
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
# deselect all
if bpy.ops.object.select_all.poll():
bpy.ops.object.select_all(action='DESELECT')
basedir = os.path.dirname(filepath)
nodal_material_wrap_map = {}
image_cache = {}
# Tables: (FBX_byte_id -> [FBX_data, None or Blender_datablock])
fbx_table_nodes = {}
if use_alpha_decals:
material_decals = set()
else:
material_decals = None
scene = context.scene
view_layer = context.view_layer
# #### Get some info from GlobalSettings.
perfmon.step("FBX import: Prepare...")
fbx_settings = elem_find_first(elem_root, b'GlobalSettings')
fbx_settings_props = elem_find_first(fbx_settings, b'Properties70')
if fbx_settings is None or fbx_settings_props is None:
operator.report({'ERROR'}, "No 'GlobalSettings' found in file %r" % filepath)
return {'CANCELLED'}
# FBX default base unit seems to be the centimeter, while raw Blender Unit is equivalent to the meter...
unit_scale = elem_props_get_number(fbx_settings_props, b'UnitScaleFactor', 1.0)
unit_scale_org = elem_props_get_number(fbx_settings_props, b'OriginalUnitScaleFactor', 1.0)
global_scale *= (unit_scale / units_blender_to_fbx_factor(context.scene))
# Compute global matrix and scale.
if not use_manual_orientation:
axis_forward = (elem_props_get_integer(fbx_settings_props, b'FrontAxis', 1),
elem_props_get_integer(fbx_settings_props, b'FrontAxisSign', 1))
axis_up = (elem_props_get_integer(fbx_settings_props, b'UpAxis', 2),
elem_props_get_integer(fbx_settings_props, b'UpAxisSign', 1))
axis_coord = (elem_props_get_integer(fbx_settings_props, b'CoordAxis', 0),
elem_props_get_integer(fbx_settings_props, b'CoordAxisSign', 1))
axis_key = (axis_up, axis_forward, axis_coord)
axis_up, axis_forward = {v: k for k, v in RIGHT_HAND_AXES.items()}.get(axis_key, ('Z', 'Y'))
global_matrix = (Matrix.Scale(global_scale, 4) @
axis_conversion(from_forward=axis_forward, from_up=axis_up).to_4x4())
# To cancel out unwanted rotation/scale on nodes.
global_matrix_inv = global_matrix.inverted()
# For transforming mesh normals.
global_matrix_inv_transposed = global_matrix_inv.transposed()
# Compute bone correction matrix
bone_correction_matrix = None # None means no correction/identity
if not automatic_bone_orientation:
if (primary_bone_axis, secondary_bone_axis) != ('Y', 'X'):
bone_correction_matrix = axis_conversion(from_forward='X',
from_up='Y',
to_forward=secondary_bone_axis,
to_up=primary_bone_axis,
).to_4x4()
# Compute framerate settings.
custom_fps = elem_props_get_number(fbx_settings_props, b'CustomFrameRate', 25.0)
time_mode = elem_props_get_enum(fbx_settings_props, b'TimeMode')
real_fps = {eid: val for val, eid in FBX_FRAMERATES[1:]}.get(time_mode, custom_fps)
if real_fps <= 0.0:
real_fps = 25.0
scene.render.fps = round(real_fps)
scene.render.fps_base = scene.render.fps / real_fps
# store global settings that need to be accessed during conversion
settings = FBXImportSettings(
operator.report, (axis_up, axis_forward), global_matrix, global_scale,
bake_space_transform, global_matrix_inv, global_matrix_inv_transposed,
use_custom_normals, use_image_search,
use_alpha_decals, decal_offset,
use_anim, anim_offset,
use_subsurf,
use_custom_props, use_custom_props_enum_as_string,
nodal_material_wrap_map, image_cache,
ignore_leaf_bones, force_connect_children, automatic_bone_orientation, bone_correction_matrix,
use_prepost_rot,
)
# #### And now, the "real" data.
perfmon.step("FBX import: Templates...")
fbx_defs = elem_find_first(elem_root, b'Definitions') # can be None
fbx_nodes = elem_find_first(elem_root, b'Objects')
fbx_connections = elem_find_first(elem_root, b'Connections')
if fbx_nodes is None:
operator.report({'ERROR'}, "No 'Objects' found in file %r" % filepath)
return {'CANCELLED'}
if fbx_connections is None:
operator.report({'ERROR'}, "No 'Connections' found in file %r" % filepath)
return {'CANCELLED'}
# ----
# First load property templates
# Load 'PropertyTemplate' values.
# Key is a tuple, (ObjectType, FBXNodeType)
# eg, (b'Texture', b'KFbxFileTexture')
# (b'Geometry', b'KFbxMesh')
fbx_templates = {}
def _():
if fbx_defs is not None:
for fbx_def in fbx_defs.elems:
if fbx_def.id == b'ObjectType':
for fbx_subdef in fbx_def.elems:
if fbx_subdef.id == b'PropertyTemplate':
assert(fbx_def.props_type == b'S')
assert(fbx_subdef.props_type == b'S')
# (b'Texture', b'KFbxFileTexture') - eg.
key = fbx_def.props[0], fbx_subdef.props[0]
fbx_templates[key] = fbx_subdef
_(); del _
def fbx_template_get(key):
ret = fbx_templates.get(key, fbx_elem_nil)
if ret is fbx_elem_nil:
# Newest FBX (7.4 and above) use no more 'K' in their type names...
key = (key[0], key[1][1:])
return fbx_templates.get(key, fbx_elem_nil)
return ret
perfmon.step("FBX import: Nodes...")
# ----
# Build FBX node-table
def _():
for fbx_obj in fbx_nodes.elems:
# TODO, investigate what other items after first 3 may be
assert(fbx_obj.props_type[:3] == b'LSS')
fbx_uuid = elem_uuid(fbx_obj)
fbx_table_nodes[fbx_uuid] = [fbx_obj, None]
_(); del _
# ----
# Load in the data
# http://download.autodesk.com/us/fbx/20112/FBX_SDK_HELP/index.html?url=
# WS73099cc142f487551fea285e1221e4f9ff8-7fda.htm,topicNumber=d0e6388
perfmon.step("FBX import: Connections...")
fbx_connection_map = {}
fbx_connection_map_reverse = {}
def _():
for fbx_link in fbx_connections.elems:
c_type = fbx_link.props[0]
if fbx_link.props_type[1:3] == b'LL':
c_src, c_dst = fbx_link.props[1:3]
fbx_connection_map.setdefault(c_src, []).append((c_dst, fbx_link))
fbx_connection_map_reverse.setdefault(c_dst, []).append((c_src, fbx_link))
_(); del _
perfmon.step("FBX import: Meshes...")
# ----
# Load mesh data
def _():
fbx_tmpl = fbx_template_get((b'Geometry', b'KFbxMesh'))
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Geometry':
continue
if fbx_obj.props[-1] == b'Mesh':
assert(blen_data is None)
fbx_item[1] = blen_read_geom(fbx_tmpl, fbx_obj, settings)
_(); del _
perfmon.step("FBX import: Materials & Textures...")
# ----
# Load material data
def _():
fbx_tmpl = fbx_template_get((b'Material', b'KFbxSurfacePhong'))
# b'KFbxSurfaceLambert'
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Material':
continue
assert(blen_data is None)
fbx_item[1] = blen_read_material(fbx_tmpl, fbx_obj, settings)
_(); del _
# ----
# Load image & textures data
def _():
fbx_tmpl_tex = fbx_template_get((b'Texture', b'KFbxFileTexture'))
fbx_tmpl_img = fbx_template_get((b'Video', b'KFbxVideo'))
# Important to run all 'Video' ones first, embedded images are stored in those nodes.
# XXX Note we simplify things here, assuming both matching Video and Texture will use same file path,
# this may be a bit weak, if issue arise we'll fallback to plain connection stuff...
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Video':
continue
fbx_item[1] = blen_read_texture_image(fbx_tmpl_img, fbx_obj, basedir, settings)
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Texture':
continue
fbx_item[1] = blen_read_texture_image(fbx_tmpl_tex, fbx_obj, basedir, settings)
_(); del _
perfmon.step("FBX import: Cameras & Lamps...")
# ----
# Load camera data
def _():
fbx_tmpl = fbx_template_get((b'NodeAttribute', b'KFbxCamera'))
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'NodeAttribute':
continue
if fbx_obj.props[-1] == b'Camera':
assert(blen_data is None)
fbx_item[1] = blen_read_camera(fbx_tmpl, fbx_obj, global_scale)
_(); del _
# ----
# Load lamp data
def _():
fbx_tmpl = fbx_template_get((b'NodeAttribute', b'KFbxLight'))
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'NodeAttribute':
continue
if fbx_obj.props[-1] == b'Light':
assert(blen_data is None)
fbx_item[1] = blen_read_light(fbx_tmpl, fbx_obj, global_scale)
_(); del _
# ----
# Connections
def connection_filter_ex(fbx_uuid, fbx_id, dct):
return [(c_found[0], c_found[1], c_type)
for (c_uuid, c_type) in dct.get(fbx_uuid, ())
# 0 is used for the root node, which isnt in fbx_table_nodes
for c_found in (() if c_uuid == 0 else (fbx_table_nodes.get(c_uuid, (None, None)),))
if (fbx_id is None) or (c_found[0] and c_found[0].id == fbx_id)]
def connection_filter_forward(fbx_uuid, fbx_id):
return connection_filter_ex(fbx_uuid, fbx_id, fbx_connection_map)
def connection_filter_reverse(fbx_uuid, fbx_id):
return connection_filter_ex(fbx_uuid, fbx_id, fbx_connection_map_reverse)
perfmon.step("FBX import: Objects & Armatures...")
# -- temporary helper hierarchy to build armatures and objects from
# lookup from uuid to helper node. Used to build parent-child relations and later to look up animated nodes.
fbx_helper_nodes = {}
def _():
# We build an intermediate hierarchy used to:
# - Calculate and store bone orientation correction matrices. The same matrices will be reused for animation.
# - Find/insert armature nodes.
# - Filter leaf bones.
# create scene root
fbx_helper_nodes[0] = root_helper = FbxImportHelperNode(None, None, None, False)
root_helper.is_root = True
# add fbx nodes
fbx_tmpl = fbx_template_get((b'Model', b'KFbxNode'))
for a_uuid, a_item in fbx_table_nodes.items():
fbx_obj, bl_data = a_item
if fbx_obj is None or fbx_obj.id != b'Model':
continue
fbx_props = (elem_find_first(fbx_obj, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
transform_data = blen_read_object_transform_preprocess(fbx_props, fbx_obj, Matrix(), use_prepost_rot)
# Note: 'Root' "bones" are handled as (armature) objects.
# Note: See T46912 for first FBX file I ever saw with 'Limb' bones - thought those were totally deprecated.
is_bone = fbx_obj.props[2] in {b'LimbNode', b'Limb'}
fbx_helper_nodes[a_uuid] = FbxImportHelperNode(fbx_obj, bl_data, transform_data, is_bone)
# add parent-child relations and add blender data to the node
for fbx_link in fbx_connections.elems:
if fbx_link.props[0] != b'OO':
continue
if fbx_link.props_type[1:3] == b'LL':
c_src, c_dst = fbx_link.props[1:3]
parent = fbx_helper_nodes.get(c_dst)
if parent is None:
continue
child = fbx_helper_nodes.get(c_src)
if child is None:
# add blender data (meshes, lights, cameras, etc.) to a helper node
fbx_sdata, bl_data = p_item = fbx_table_nodes.get(c_src, (None, None))
if fbx_sdata is None:
continue
if fbx_sdata.id not in {b'Geometry', b'NodeAttribute'}:
continue
parent.bl_data = bl_data
else:
# set parent
child.parent = parent
# find armatures (either an empty below a bone or a new node inserted at the bone
root_helper.find_armatures()
# mark nodes that have bone children
root_helper.find_bone_children()
# mark nodes that need a bone to attach child-bones to
root_helper.find_fake_bones()
# mark leaf nodes that are only required to mark the end of their parent bone
if settings.ignore_leaf_bones:
root_helper.mark_leaf_bones()
# What a mess! Some bones have several BindPoses, some have none, clusters contain a bind pose as well,
# and you can have several clusters per bone!
# Maybe some conversion can be applied to put them all into the same frame of reference?
# get the bind pose from pose elements
for a_uuid, a_item in fbx_table_nodes.items():
fbx_obj, bl_data = a_item
if fbx_obj is None:
continue
if fbx_obj.id != b'Pose':
continue
if fbx_obj.props[2] != b'BindPose':
continue
for fbx_pose_node in fbx_obj.elems:
if fbx_pose_node.id != b'PoseNode':
continue
node_elem = elem_find_first(fbx_pose_node, b'Node')
node = elem_uuid(node_elem)
matrix_elem = elem_find_first(fbx_pose_node, b'Matrix')
matrix = array_to_matrix4(matrix_elem.props[0]) if matrix_elem else None
bone = fbx_helper_nodes.get(node)
if bone and matrix:
# Store the matrix in the helper node.
# There may be several bind pose matrices for the same node, but in tests they seem to be identical.
bone.bind_matrix = matrix # global space
# get clusters and bind pose
for helper_uuid, helper_node in fbx_helper_nodes.items():
if not helper_node.is_bone:
continue
for cluster_uuid, cluster_link in fbx_connection_map.get(helper_uuid, ()):
if cluster_link.props[0] != b'OO':
continue
fbx_cluster, _ = fbx_table_nodes.get(cluster_uuid, (None, None))
if fbx_cluster is None or fbx_cluster.id != b'Deformer' or fbx_cluster.props[2] != b'Cluster':
continue
# Get the bind pose from the cluster:
tx_mesh_elem = elem_find_first(fbx_cluster, b'Transform', default=None)
tx_mesh = array_to_matrix4(tx_mesh_elem.props[0]) if tx_mesh_elem else Matrix()
tx_bone_elem = elem_find_first(fbx_cluster, b'TransformLink', default=None)
tx_bone = array_to_matrix4(tx_bone_elem.props[0]) if tx_bone_elem else None
tx_arm_elem = elem_find_first(fbx_cluster, b'TransformAssociateModel', default=None)
tx_arm = array_to_matrix4(tx_arm_elem.props[0]) if tx_arm_elem else None
mesh_matrix = tx_mesh
armature_matrix = tx_arm
if tx_bone:
mesh_matrix = tx_bone @ mesh_matrix
helper_node.bind_matrix = tx_bone # overwrite the bind matrix
# Get the meshes driven by this cluster: (Shouldn't that be only one?)
meshes = set()
for skin_uuid, skin_link in fbx_connection_map.get(cluster_uuid):
if skin_link.props[0] != b'OO':
continue
fbx_skin, _ = fbx_table_nodes.get(skin_uuid, (None, None))
if fbx_skin is None or fbx_skin.id != b'Deformer' or fbx_skin.props[2] != b'Skin':
continue
for mesh_uuid, mesh_link in fbx_connection_map.get(skin_uuid):
if mesh_link.props[0] != b'OO':
continue
fbx_mesh, _ = fbx_table_nodes.get(mesh_uuid, (None, None))
if fbx_mesh is None or fbx_mesh.id != b'Geometry' or fbx_mesh.props[2] != b'Mesh':
continue
for object_uuid, object_link in fbx_connection_map.get(mesh_uuid):
if object_link.props[0] != b'OO':
continue
mesh_node = fbx_helper_nodes[object_uuid]
if mesh_node:
# ----
# If we get a valid mesh matrix (in bone space), store armature and
# mesh global matrices, we need them to compute mesh's matrix_parent_inverse
# when actually binding them via the modifier.
# Note we assume all bones were bound with the same mesh/armature (global) matrix,
# we do not support otherwise in Blender anyway!
mesh_node.armature_setup[helper_node.armature] = (mesh_matrix, armature_matrix)
meshes.add(mesh_node)
helper_node.clusters.append((fbx_cluster, meshes))
# convert bind poses from global space into local space
root_helper.make_bind_pose_local()
# collect armature meshes
root_helper.collect_armature_meshes()
# find the correction matrices to align FBX objects with their Blender equivalent
root_helper.find_correction_matrix(settings)
# build the Object/Armature/Bone hierarchy
root_helper.build_hierarchy(fbx_tmpl, settings, scene, view_layer)
# Link the Object/Armature/Bone hierarchy
root_helper.link_hierarchy(fbx_tmpl, settings, scene)
# root_helper.print_info(0)
_(); del _
perfmon.step("FBX import: ShapeKeys...")
# We can handle shapes.
blend_shape_channels = {} # We do not need Shapes themselves, but keyblocks, for anim.
def _():
fbx_tmpl = fbx_template_get((b'Geometry', b'KFbxShape'))
for s_uuid, s_item in fbx_table_nodes.items():
fbx_sdata, bl_sdata = s_item = fbx_table_nodes.get(s_uuid, (None, None))
if fbx_sdata is None or fbx_sdata.id != b'Geometry' or fbx_sdata.props[2] != b'Shape':
continue
# shape -> blendshapechannel -> blendshape -> mesh.
for bc_uuid, bc_ctype in fbx_connection_map.get(s_uuid, ()):
if bc_ctype.props[0] != b'OO':
continue
fbx_bcdata, _bl_bcdata = fbx_table_nodes.get(bc_uuid, (None, None))
if fbx_bcdata is None or fbx_bcdata.id != b'Deformer' or fbx_bcdata.props[2] != b'BlendShapeChannel':
continue
meshes = []
objects = []
for bs_uuid, bs_ctype in fbx_connection_map.get(bc_uuid, ()):
if bs_ctype.props[0] != b'OO':
continue
fbx_bsdata, _bl_bsdata = fbx_table_nodes.get(bs_uuid, (None, None))
if fbx_bsdata is None or fbx_bsdata.id != b'Deformer' or fbx_bsdata.props[2] != b'BlendShape':
continue
for m_uuid, m_ctype in fbx_connection_map.get(bs_uuid, ()):
if m_ctype.props[0] != b'OO':
continue
fbx_mdata, bl_mdata = fbx_table_nodes.get(m_uuid, (None, None))
if fbx_mdata is None or fbx_mdata.id != b'Geometry' or fbx_mdata.props[2] != b'Mesh':
continue
# Blenmeshes are assumed already created at that time!
assert(isinstance(bl_mdata, bpy.types.Mesh))
# And we have to find all objects using this mesh!
objects = []
for o_uuid, o_ctype in fbx_connection_map.get(m_uuid, ()):
if o_ctype.props[0] != b'OO':
continue
node = fbx_helper_nodes[o_uuid]
if node:
objects.append(node)
meshes.append((bl_mdata, objects))
# BlendShape deformers are only here to connect BlendShapeChannels to meshes, nothing else to do.
# keyblocks is a list of tuples (mesh, keyblock) matching that shape/blendshapechannel, for animation.
keyblocks = blen_read_shape(fbx_tmpl, fbx_sdata, fbx_bcdata, meshes, scene)
blend_shape_channels[bc_uuid] = keyblocks
_(); del _
if settings.use_subsurf:
perfmon.step("FBX import: Subdivision surfaces")
# Look through connections for subsurf in meshes and add it to the parent object
def _():
for fbx_link in fbx_connections.elems:
if fbx_link.props[0] != b'OO':
continue
if fbx_link.props_type[1:3] == b'LL':
c_src, c_dst = fbx_link.props[1:3]
parent = fbx_helper_nodes.get(c_dst)
if parent is None:
continue
child = fbx_helper_nodes.get(c_src)
if child is None:
fbx_sdata, bl_data = fbx_table_nodes.get(c_src, (None, None))
if fbx_sdata.id != b'Geometry':
continue
preview_levels = elem_prop_first(elem_find_first(fbx_sdata, b'PreviewDivisionLevels'))
render_levels = elem_prop_first(elem_find_first(fbx_sdata, b'RenderDivisionLevels'))
if isinstance(preview_levels, int) and isinstance(render_levels, int):
mod = parent.bl_obj.modifiers.new('subsurf', 'SUBSURF')
mod.levels = preview_levels
mod.render_levels = render_levels
_(); del _
if use_anim:
perfmon.step("FBX import: Animations...")
# Animation!
def _():
fbx_tmpl_astack = fbx_template_get((b'AnimationStack', b'FbxAnimStack'))
fbx_tmpl_alayer = fbx_template_get((b'AnimationLayer', b'FbxAnimLayer'))
stacks = {}
# AnimationStacks.
for as_uuid, fbx_asitem in fbx_table_nodes.items():
fbx_asdata, _blen_data = fbx_asitem
if fbx_asdata.id != b'AnimationStack' or fbx_asdata.props[2] != b'':
continue
stacks[as_uuid] = (fbx_asitem, {})
# AnimationLayers
# (mixing is completely ignored for now, each layer results in an independent set of actions).
def get_astacks_from_alayer(al_uuid):
for as_uuid, as_ctype in fbx_connection_map.get(al_uuid, ()):
if as_ctype.props[0] != b'OO':
continue
fbx_asdata, _bl_asdata = fbx_table_nodes.get(as_uuid, (None, None))
if (fbx_asdata is None or fbx_asdata.id != b'AnimationStack' or
fbx_asdata.props[2] != b'' or as_uuid not in stacks):
continue
yield as_uuid
for al_uuid, fbx_alitem in fbx_table_nodes.items():
fbx_aldata, _blen_data = fbx_alitem
if fbx_aldata.id != b'AnimationLayer' or fbx_aldata.props[2] != b'':
continue
for as_uuid in get_astacks_from_alayer(al_uuid):
_fbx_asitem, alayers = stacks[as_uuid]
alayers[al_uuid] = (fbx_alitem, {})
# AnimationCurveNodes (also the ones linked to actual animated data!).
curvenodes = {}
for acn_uuid, fbx_acnitem in fbx_table_nodes.items():
fbx_acndata, _blen_data = fbx_acnitem
if fbx_acndata.id != b'AnimationCurveNode' or fbx_acndata.props[2] != b'':
continue
cnode = curvenodes[acn_uuid] = {}
items = []
for n_uuid, n_ctype in fbx_connection_map.get(acn_uuid, ()):
if n_ctype.props[0] != b'OP':
continue
lnk_prop = n_ctype.props[3]
if lnk_prop in {b'Lcl Translation', b'Lcl Rotation', b'Lcl Scaling'}:
# n_uuid can (????) be linked to root '0' node, instead of a mere object node... See T41712.
ob = fbx_helper_nodes.get(n_uuid, None)
if ob is None or ob.is_root:
continue
items.append((ob, lnk_prop))
elif lnk_prop == b'DeformPercent': # Shape keys.
keyblocks = blend_shape_channels.get(n_uuid, None)
if keyblocks is None:
continue
items += [(kb, lnk_prop) for kb in keyblocks]
elif lnk_prop == b'FocalLength': # Camera lens.
from bpy.types import Camera
fbx_item = fbx_table_nodes.get(n_uuid, None)
if fbx_item is None or not isinstance(fbx_item[1], Camera):
continue
cam = fbx_item[1]
items.append((cam, lnk_prop))
elif lnk_prop == b'DiffuseColor':
from bpy.types import Material
fbx_item = fbx_table_nodes.get(n_uuid, None)
if fbx_item is None or not isinstance(fbx_item[1], Material):
continue
mat = fbx_item[1]
items.append((mat, lnk_prop))
print("WARNING! Importing material's animation is not supported for Nodal materials...")
for al_uuid, al_ctype in fbx_connection_map.get(acn_uuid, ()):
if al_ctype.props[0] != b'OO':
continue
fbx_aldata, _blen_aldata = fbx_alitem = fbx_table_nodes.get(al_uuid, (None, None))
if fbx_aldata is None or fbx_aldata.id != b'AnimationLayer' or fbx_aldata.props[2] != b'':
continue
for as_uuid in get_astacks_from_alayer(al_uuid):
_fbx_alitem, anim_items = stacks[as_uuid][1][al_uuid]
assert(_fbx_alitem == fbx_alitem)
for item, item_prop in items:
# No need to keep curvenode FBX data here, contains nothing useful for us.
anim_items.setdefault(item, {})[acn_uuid] = (cnode, item_prop)
# AnimationCurves (real animation data).
for ac_uuid, fbx_acitem in fbx_table_nodes.items():
fbx_acdata, _blen_data = fbx_acitem
if fbx_acdata.id != b'AnimationCurve' or fbx_acdata.props[2] != b'':
continue
for acn_uuid, acn_ctype in fbx_connection_map.get(ac_uuid, ()):
if acn_ctype.props[0] != b'OP':
continue
fbx_acndata, _bl_acndata = fbx_table_nodes.get(acn_uuid, (None, None))
if (fbx_acndata is None or fbx_acndata.id != b'AnimationCurveNode' or
fbx_acndata.props[2] != b'' or acn_uuid not in curvenodes):
continue
# Note this is an infamous simplification of the compound props stuff,
# seems to be standard naming but we'll probably have to be smarter to handle more exotic files?
channel = {
b'd|X': 0, b'd|Y': 1, b'd|Z': 2,
b'd|DeformPercent': 0,
b'd|FocalLength': 0
}.get(acn_ctype.props[3], None)
if channel is None:
continue
curvenodes[acn_uuid][ac_uuid] = (fbx_acitem, channel)
# And now that we have sorted all this, apply animations!
blen_read_animations(fbx_tmpl_astack, fbx_tmpl_alayer, stacks, scene, settings.anim_offset)
_(); del _
perfmon.step("FBX import: Assign materials...")
def _():
# link Material's to Geometry (via Model's)
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Geometry':
continue
mesh = fbx_table_nodes.get(fbx_uuid, (None, None))[1]
# can happen in rare cases
if mesh is None:
continue
# In Blender, we link materials to data, typically (meshes), while in FBX they are linked to objects...
# So we have to be careful not to re-add endlessly the same material to a mesh!
# This can easily happen with 'baked' dupliobjects, see T44386.
# TODO: add an option to link materials to objects in Blender instead?
done_materials = set()
for (fbx_lnk, fbx_lnk_item, fbx_lnk_type) in connection_filter_forward(fbx_uuid, b'Model'):
# link materials
fbx_lnk_uuid = elem_uuid(fbx_lnk)
for (fbx_lnk_material, material, fbx_lnk_material_type) in connection_filter_reverse(fbx_lnk_uuid, b'Material'):
if material not in done_materials:
mesh.materials.append(material)
done_materials.add(material)
# We have to validate mesh polygons' ma_idx, see T41015!
# Some FBX seem to have an extra 'default' material which is not defined in FBX file.
if mesh.validate_material_indices():
print("WARNING: mesh '%s' had invalid material indices, those were reset to first material" % mesh.name)
_(); del _
perfmon.step("FBX import: Assign textures...")
def _():
material_images = {}
fbx_tmpl = fbx_template_get((b'Material', b'KFbxSurfacePhong'))
# b'KFbxSurfaceLambert'
def texture_mapping_set(fbx_obj, node_texture):
assert(fbx_obj.id == b'Texture')
fbx_props = (elem_find_first(fbx_obj, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
loc = elem_props_get_vector_3d(fbx_props, b'Translation', (0.0, 0.0, 0.0))
rot = tuple(-r for r in elem_props_get_vector_3d(fbx_props, b'Rotation', (0.0, 0.0, 0.0)))
scale = tuple(((1.0 / s) if s != 0.0 else 1.0)
for s in elem_props_get_vector_3d(fbx_props, b'Scaling', (1.0, 1.0, 1.0)))
clamp = (bool(elem_props_get_enum(fbx_props, b'WrapModeU', 0)) or
bool(elem_props_get_enum(fbx_props, b'WrapModeV', 0)))
if (loc == (0.0, 0.0, 0.0) and
rot == (0.0, 0.0, 0.0) and
scale == (1.0, 1.0, 1.0) and
clamp == False):
return
node_texture.translation = loc
node_texture.rotation = rot
node_texture.scale = scale
if clamp:
node_texture.extension = 'EXTEND'
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Material':
continue
material = fbx_table_nodes.get(fbx_uuid, (None, None))[1]
for (fbx_lnk,
image,
fbx_lnk_type) in connection_filter_reverse(fbx_uuid, b'Texture'):
if fbx_lnk_type.props[0] == b'OP':
lnk_type = fbx_lnk_type.props[3]
ma_wrap = nodal_material_wrap_map[material]
if lnk_type in {b'DiffuseColor', b'3dsMax|maps|texmap_diffuse'}:
ma_wrap.base_color_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.base_color_texture)
elif lnk_type in {b'SpecularColor', b'SpecularFactor'}:
# Intensity actually, not color...
ma_wrap.specular_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.specular_texture)
elif lnk_type in {b'ReflectionColor', b'ReflectionFactor', b'3dsMax|maps|texmap_reflection'}:
# Intensity actually, not color...
ma_wrap.metallic_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.metallic_texture)
elif lnk_type in {b'TransparentColor', b'TransparentFactor'}:
ma_wrap.alpha_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.alpha_texture)
if use_alpha_decals:
material_decals.add(material)
elif lnk_type == b'ShininessExponent':
# That is probably reversed compared to expected results? TODO...
ma_wrap.roughness_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.roughness_texture)
# XXX, applications abuse bump!
elif lnk_type in {b'NormalMap', b'Bump', b'3dsMax|maps|texmap_bump'}:
ma_wrap.normalmap_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.normalmap_texture)
"""
elif lnk_type == b'Bump':
# TODO displacement...
"""
elif lnk_type in {b'EmissiveColor'}:
ma_wrap.emission_color_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.emission_color_texture)
else:
print("WARNING: material link %r ignored" % lnk_type)
material_images.setdefault(material, {})[lnk_type] = image
# Check if the diffuse image has an alpha channel,
# if so, use the alpha channel.
# Note: this could be made optional since images may have alpha but be entirely opaque
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Material':
continue
material = fbx_table_nodes.get(fbx_uuid, (None, None))[1]
image = material_images.get(material, {}).get(b'DiffuseColor', None)
# do we have alpha?
if image and image.depth == 32:
if use_alpha_decals:
material_decals.add(material)
ma_wrap = nodal_material_wrap_map[material]
ma_wrap.alpha_texture.use_alpha = True
ma_wrap.alpha_texture.copy_from(ma_wrap.base_color_texture)
# Propagate mapping from diffuse to all other channels which have none defined.
# XXX Commenting for now, I do not really understand the logic here, why should diffuse mapping
# be applied to all others if not defined for them???
# ~ ma_wrap = nodal_material_wrap_map[material]
# ~ ma_wrap.mapping_set_from_diffuse()
_(); del _
perfmon.step("FBX import: Cycles z-offset workaround...")
def _():
# Annoying workaround for cycles having no z-offset
if material_decals and use_alpha_decals:
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Geometry':
continue
if fbx_obj.props[-1] == b'Mesh':
mesh = fbx_item[1]
if decal_offset != 0.0:
for material in mesh.materials:
if material in material_decals:
for v in mesh.vertices:
v.co += v.normal * decal_offset
break
for obj in (obj for obj in bpy.data.objects if obj.data == mesh):
obj.cycles_visibility.shadow = False
_(); del _
perfmon.level_down()
perfmon.level_down("Import finished.")
return {'FINISHED'}
| 43.090195 | 157 | 0.600672 | lem,
)
from .fbx_utils import (
PerfMon,
units_blender_to_fbx_factor,
units_convertor_iter,
array_to_matrix4,
similar_values,
similar_values_iter,
FBXImportSettings,
)
fbx_elem_nil = None
convert_deg_to_rad_iter = units_convertor_iter("degree", "radian")
MAT_CONVERT_BONE = fbx_utils.MAT_CONVERT_BONE.inverted()
MAT_CONVERT_LIGHT = fbx_utils.MAT_CONVERT_LIGHT.inverted()
MAT_CONVERT_CAMERA = fbx_utils.MAT_CONVERT_CAMERA.inverted()
def validate_blend_names(name):
assert(type(name) == bytes)
if len(name) > 63:
import hashlib
h = hashlib.sha1(name).hexdigest()
n = 55
name_utf8 = name[:n].decode('utf-8', 'replace') + "_" + h[:7]
while len(name_utf8.encode()) > 63:
n -= 1
name_utf8 = name[:n].decode('utf-8', 'replace') + "_" + h[:7]
return name_utf8
else:
return name.decode('utf-8', 'replace')
def elem_find_first(elem, id_search, default=None):
for fbx_item in elem.elems:
if fbx_item.id == id_search:
return fbx_item
return default
def elem_find_iter(elem, id_search):
for fbx_item in elem.elems:
if fbx_item.id == id_search:
yield fbx_item
def elem_find_first_string(elem, id_search):
fbx_item = elem_find_first(elem, id_search)
if fbx_item is not None and fbx_item.props:
assert(len(fbx_item.props) == 1)
assert(fbx_item.props_type[0] == data_types.STRING)
return fbx_item.props[0].decode('utf-8', 'replace')
return None
def elem_find_first_string_as_bytes(elem, id_search):
fbx_item = elem_find_first(elem, id_search)
if fbx_item is not None and fbx_item.props:
assert(len(fbx_item.props) == 1)
assert(fbx_item.props_type[0] == data_types.STRING)
return fbx_item.props[0]
return None
def elem_find_first_bytes(elem, id_search, decode=True):
fbx_item = elem_find_first(elem, id_search)
if fbx_item is not None and fbx_item.props:
assert(len(fbx_item.props) == 1)
assert(fbx_item.props_type[0] == data_types.BYTES)
return fbx_item.props[0]
return None
def elem_repr(elem):
return "%s: props[%d=%r], elems=(%r)" % (
elem.id,
len(elem.props),
", ".join([repr(p) for p in elem.props]),
b", ".join([e.id for e in elem.elems]),
)
def elem_split_name_class(elem):
assert(elem.props_type[-2] == data_types.STRING)
elem_name, elem_class = elem.props[-2].split(b'\x00\x01')
return elem_name, elem_class
def elem_name_ensure_class(elem, clss=...):
elem_name, elem_class = elem_split_name_class(elem)
if clss is not ...:
assert(elem_class == clss)
return validate_blend_names(elem_name)
def elem_name_ensure_classes(elem, clss=...):
elem_name, elem_class = elem_split_name_class(elem)
if clss is not ...:
assert(elem_class in clss)
return validate_blend_names(elem_name)
def elem_split_name_class_nodeattr(elem):
assert(elem.props_type[-2] == data_types.STRING)
elem_name, elem_class = elem.props[-2].split(b'\x00\x01')
assert(elem_class == b'NodeAttribute')
assert(elem.props_type[-1] == data_types.STRING)
elem_class = elem.props[-1]
return elem_name, elem_class
def elem_uuid(elem):
assert(elem.props_type[0] == data_types.INT64)
return elem.props[0]
def elem_prop_first(elem, default=None):
return elem.props[0] if (elem is not None) and elem.props else default
def elem_props_find_first(elem, elem_prop_id):
if elem is None:
return None
if type(elem) is not FBXElem:
assert(type(elem) is tuple)
for e in elem:
result = elem_props_find_first(e, elem_prop_id)
if result is not None:
return result
assert(len(elem) > 0)
return None
for subelem in elem.elems:
assert(subelem.id == b'P')
if subelem.props[0] == elem_prop_id:
return subelem
return None
def elem_props_get_color_rgb(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
if elem_prop.props[1] == b'Color':
assert(elem_prop.props[1] == b'Color')
assert(elem_prop.props[2] == b'')
else:
assert(elem_prop.props[1] == b'ColorRGB')
assert(elem_prop.props[2] == b'Color')
assert(elem_prop.props_type[4:7] == bytes((data_types.FLOAT64,)) * 3)
return elem_prop.props[4:7]
return default
def elem_props_get_vector_3d(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props_type[4:7] == bytes((data_types.FLOAT64,)) * 3)
return elem_prop.props[4:7]
return default
def elem_props_get_number(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
if elem_prop.props[1] == b'double':
assert(elem_prop.props[1] == b'double')
assert(elem_prop.props[2] == b'Number')
else:
assert(elem_prop.props[1] == b'Number')
assert(elem_prop.props[2] == b'')
assert(elem_prop.props_type[4] == data_types.FLOAT64)
return elem_prop.props[4]
return default
def elem_props_get_integer(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
if elem_prop.props[1] == b'int':
assert(elem_prop.props[1] == b'int')
assert(elem_prop.props[2] == b'Integer')
elif elem_prop.props[1] == b'ULongLong':
assert(elem_prop.props[1] == b'ULongLong')
assert(elem_prop.props[2] == b'')
assert(elem_prop.props_type[4] in {data_types.INT32, data_types.INT64})
return elem_prop.props[4]
return default
def elem_props_get_bool(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
assert(elem_prop.props[1] in {b'bool', b'Bool'})
assert(elem_prop.props[2] == b'')
assert(elem_prop.props_type[4] == data_types.INT32)
assert(elem_prop.props[4] in {0, 1})
return bool(elem_prop.props[4])
return default
def elem_props_get_enum(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
assert(elem_prop.props[1] == b'enum')
assert(elem_prop.props[2] == b'')
assert(elem_prop.props[3] == b'')
assert(elem_prop.props_type[4] == data_types.INT32)
return elem_prop.props[4]
return default
def elem_props_get_visibility(elem, elem_prop_id, default=None):
elem_prop = elem_props_find_first(elem, elem_prop_id)
if elem_prop is not None:
assert(elem_prop.props[0] == elem_prop_id)
assert(elem_prop.props[1] == b'Visibility')
assert(elem_prop.props[2] == b'')
assert(elem_prop.props_type[4] == data_types.FLOAT64)
return elem_prop.props[4]
return default
from collections import namedtuple
FBXTransformData = namedtuple("FBXTransformData", (
"loc", "geom_loc",
"rot", "rot_ofs", "rot_piv", "pre_rot", "pst_rot", "rot_ord", "rot_alt_mat", "geom_rot",
"sca", "sca_ofs", "sca_piv", "geom_sca",
))
def blen_read_custom_properties(fbx_obj, blen_obj, settings):
fbx_obj_props = elem_find_first(fbx_obj, b'Properties70')
if fbx_obj_props:
for fbx_prop in fbx_obj_props.elems:
assert(fbx_prop.id == b'P')
if b'U' in fbx_prop.props[3]:
if fbx_prop.props[0] == b'UDP3DSMAX':
# Special case for 3DS Max user properties:
assert(fbx_prop.props[1] == b'KString')
assert(fbx_prop.props_type[4] == data_types.STRING)
items = fbx_prop.props[4].decode('utf-8', 'replace')
for item in items.split('\r\n'):
if item:
prop_name, prop_value = item.split('=', 1)
prop_name = validate_blend_names(prop_name.strip().encode('utf-8'))
blen_obj[prop_name] = prop_value.strip()
else:
prop_name = validate_blend_names(fbx_prop.props[0])
prop_type = fbx_prop.props[1]
if prop_type in {b'Vector', b'Vector3D', b'Color', b'ColorRGB'}:
assert(fbx_prop.props_type[4:7] == bytes((data_types.FLOAT64,)) * 3)
blen_obj[prop_name] = fbx_prop.props[4:7]
elif prop_type in {b'Vector4', b'ColorRGBA'}:
assert(fbx_prop.props_type[4:8] == bytes((data_types.FLOAT64,)) * 4)
blen_obj[prop_name] = fbx_prop.props[4:8]
elif prop_type == b'Vector2D':
assert(fbx_prop.props_type[4:6] == bytes((data_types.FLOAT64,)) * 2)
blen_obj[prop_name] = fbx_prop.props[4:6]
elif prop_type in {b'Integer', b'int'}:
assert(fbx_prop.props_type[4] == data_types.INT32)
blen_obj[prop_name] = fbx_prop.props[4]
elif prop_type == b'KString':
assert(fbx_prop.props_type[4] == data_types.STRING)
blen_obj[prop_name] = fbx_prop.props[4].decode('utf-8', 'replace')
elif prop_type in {b'Number', b'double', b'Double'}:
assert(fbx_prop.props_type[4] == data_types.FLOAT64)
blen_obj[prop_name] = fbx_prop.props[4]
elif prop_type in {b'Float', b'float'}:
assert(fbx_prop.props_type[4] == data_types.FLOAT32)
blen_obj[prop_name] = fbx_prop.props[4]
elif prop_type in {b'Bool', b'bool'}:
assert(fbx_prop.props_type[4] == data_types.INT32)
blen_obj[prop_name] = fbx_prop.props[4] != 0
elif prop_type in {b'Enum', b'enum'}:
assert(fbx_prop.props_type[4:6] == bytes((data_types.INT32, data_types.STRING)))
val = fbx_prop.props[4]
if settings.use_custom_props_enum_as_string and fbx_prop.props[5]:
enum_items = fbx_prop.props[5].decode('utf-8', 'replace').split('~')
assert(val >= 0 and val < len(enum_items))
blen_obj[prop_name] = enum_items[val]
else:
blen_obj[prop_name] = val
else:
print ("WARNING: User property type '%s' is not supported" % prop_type.decode('utf-8', 'replace'))
def blen_read_object_transform_do(transform_data):
# This is a nightmare. FBX SDK uses Maya way to compute the transformation matrix of a node - utterly simple:
#
# WorldTransform = ParentWorldTransform @ T @ Roff @ Rp @ Rpre @ R @ Rpost @ Rp-1 @ Soff @ Sp @ S @ Sp-1
#
# Where all those terms are 4 x 4 matrices that contain:
# WorldTransform: Transformation matrix of the node in global space.
# ParentWorldTransform: Transformation matrix of the parent node in global space.
# T: Translation
# Roff: Rotation offset
# Rp: Rotation pivot
# Rpre: Pre-rotation
# R: Rotation
# Rpost: Post-rotation
# Rp-1: Inverse of the rotation pivot
# Soff: Scaling offset
# Sp: Scaling pivot
# S: Scaling
# Sp-1: Inverse of the scaling pivot
#
# But it was still too simple, and FBX notion of compatibility is... quite specific. So we also have to
# support 3DSMax way:
#
# WorldTransform = ParentWorldTransform @ T @ R @ S @ OT @ OR @ OS
#
# Where all those terms are 4 x 4 matrices that contain:
# WorldTransform: Transformation matrix of the node in global space
# ParentWorldTransform: Transformation matrix of the parent node in global space
# T: Translation
# R: Rotation
# S: Scaling
# OT: Geometric transform translation
# OR: Geometric transform rotation
# OS: Geometric transform translation
#
# Notes:
# Geometric transformations ***are not inherited***: ParentWorldTransform does not contain the OT, OR, OS
# of WorldTransform's parent node.
lcl_translation = Matrix.Translation(transform_data.loc)
geom_loc = Matrix.Translation(transform_data.geom_loc)
to_rot = lambda rot, rot_ord: Euler(convert_deg_to_rad_iter(rot), rot_ord).to_matrix().to_4x4()
lcl_rot = to_rot(transform_data.rot, transform_data.rot_ord) @ transform_data.rot_alt_mat
pre_rot = to_rot(transform_data.pre_rot, transform_data.rot_ord)
pst_rot = to_rot(transform_data.pst_rot, transform_data.rot_ord)
geom_rot = to_rot(transform_data.geom_rot, transform_data.rot_ord)
rot_ofs = Matrix.Translation(transform_data.rot_ofs)
rot_piv = Matrix.Translation(transform_data.rot_piv)
sca_ofs = Matrix.Translation(transform_data.sca_ofs)
sca_piv = Matrix.Translation(transform_data.sca_piv)
lcl_scale = Matrix()
lcl_scale[0][0], lcl_scale[1][1], lcl_scale[2][2] = transform_data.sca
geom_scale = Matrix();
geom_scale[0][0], geom_scale[1][1], geom_scale[2][2] = transform_data.geom_sca
base_mat = (
lcl_translation @
rot_ofs @
rot_piv @
pre_rot @
lcl_rot @
pst_rot @
rot_piv.inverted_safe() @
sca_ofs @
sca_piv @
lcl_scale @
sca_piv.inverted_safe()
)
geom_mat = geom_loc @ geom_rot @ geom_scale
return (base_mat @ geom_mat, base_mat, geom_mat)
def add_vgroup_to_objects(vg_indices, vg_weights, vg_name, objects):
assert(len(vg_indices) == len(vg_weights))
if vg_indices:
for obj in objects:
vg = obj.vertex_groups.get(vg_name)
if vg is None:
vg = obj.vertex_groups.new(name=vg_name)
for i, w in zip(vg_indices, vg_weights):
vg.add((i,), w, 'REPLACE')
def blen_read_object_transform_preprocess(fbx_props, fbx_obj, rot_alt_mat, use_prepost_rot):
const_vector_zero_3d = 0.0, 0.0, 0.0
const_vector_one_3d = 1.0, 1.0, 1.0
loc = list(elem_props_get_vector_3d(fbx_props, b'Lcl Translation', const_vector_zero_3d))
rot = list(elem_props_get_vector_3d(fbx_props, b'Lcl Rotation', const_vector_zero_3d))
sca = list(elem_props_get_vector_3d(fbx_props, b'Lcl Scaling', const_vector_one_3d))
geom_loc = list(elem_props_get_vector_3d(fbx_props, b'GeometricTranslation', const_vector_zero_3d))
geom_rot = list(elem_props_get_vector_3d(fbx_props, b'GeometricRotation', const_vector_zero_3d))
geom_sca = list(elem_props_get_vector_3d(fbx_props, b'GeometricScaling', const_vector_one_3d))
rot_ofs = elem_props_get_vector_3d(fbx_props, b'RotationOffset', const_vector_zero_3d)
rot_piv = elem_props_get_vector_3d(fbx_props, b'RotationPivot', const_vector_zero_3d)
sca_ofs = elem_props_get_vector_3d(fbx_props, b'ScalingOffset', const_vector_zero_3d)
sca_piv = elem_props_get_vector_3d(fbx_props, b'ScalingPivot', const_vector_zero_3d)
is_rot_act = elem_props_get_bool(fbx_props, b'RotationActive', False)
if is_rot_act:
if use_prepost_rot:
pre_rot = elem_props_get_vector_3d(fbx_props, b'PreRotation', const_vector_zero_3d)
pst_rot = elem_props_get_vector_3d(fbx_props, b'PostRotation', const_vector_zero_3d)
else:
pre_rot = const_vector_zero_3d
pst_rot = const_vector_zero_3d
rot_ord = {
0: 'XYZ',
1: 'XZY',
2: 'YZX',
3: 'YXZ',
4: 'ZXY',
5: 'ZYX',
6: 'XYZ',
}.get(elem_props_get_enum(fbx_props, b'RotationOrder', 0))
else:
pre_rot = const_vector_zero_3d
pst_rot = const_vector_zero_3d
rot_ord = 'XYZ'
return FBXTransformData(loc, geom_loc,
rot, rot_ofs, rot_piv, pre_rot, pst_rot, rot_ord, rot_alt_mat, geom_rot,
sca, sca_ofs, sca_piv, geom_sca)
def blen_read_animations_curves_iter(fbx_curves, blen_start_offset, fbx_start_offset, fps):
# of FBX curves later.
from .fbx_utils import FBX_KTIME
timefac = fps / FBX_KTIME
curves = tuple([0,
elem_prop_first(elem_find_first(c[2], b'KeyTime')),
elem_prop_first(elem_find_first(c[2], b'KeyValueFloat')),
c]
for c in fbx_curves)
allkeys = sorted({item for sublist in curves for item in sublist[1]})
for curr_fbxktime in allkeys:
curr_values = []
for item in curves:
idx, times, values, fbx_curve = item
if times[idx] < curr_fbxktime:
if idx >= 0:
idx += 1
if idx >= len(times):
# We have reached our last element for this curve, stay on it from now on...
idx = -1
item[0] = idx
if times[idx] >= curr_fbxktime:
if idx == 0:
curr_values.append((values[idx], fbx_curve))
else:
# Interpolate between this key and the previous one.
ifac = (curr_fbxktime - times[idx - 1]) / (times[idx] - times[idx - 1])
curr_values.append(((values[idx] - values[idx - 1]) * ifac + values[idx - 1], fbx_curve))
curr_blenkframe = (curr_fbxktime - fbx_start_offset) * timefac + blen_start_offset
yield (curr_blenkframe, curr_values)
def blen_read_animations_action_item(action, item, cnodes, fps, anim_offset):
from bpy.types import Object, PoseBone, ShapeKey, Material, Camera
from itertools import chain
fbx_curves = []
for curves, fbxprop in cnodes.values():
for (fbx_acdata, _blen_data), channel in curves.values():
fbx_curves.append((fbxprop, channel, fbx_acdata))
# Leave if no curves are attached (if a blender curve is attached to scale but without keys it defaults to 0).
if len(fbx_curves) == 0:
return
blen_curves = []
props = []
if isinstance(item, Material):
grpname = item.name
props = [("diffuse_color", 3, grpname or "Diffuse Color")]
elif isinstance(item, ShapeKey):
props = [(item.path_from_id("value"), 1, "Key")]
elif isinstance(item, Camera):
props = [(item.path_from_id("lens"), 1, "Camera")]
else: # Object or PoseBone:
if item.is_bone:
bl_obj = item.bl_obj.pose.bones[item.bl_bone]
else:
bl_obj = item.bl_obj
# We want to create actions for objects, but for bones we 'reuse' armatures' actions!
grpname = item.bl_obj.name
props = [(bl_obj.path_from_id("location"), 3, grpname or "Location"),
None,
(bl_obj.path_from_id("scale"), 3, grpname or "Scale")]
rot_mode = bl_obj.rotation_mode
if rot_mode == 'QUATERNION':
props[1] = (bl_obj.path_from_id("rotation_quaternion"), 4, grpname or "Quaternion Rotation")
elif rot_mode == 'AXIS_ANGLE':
props[1] = (bl_obj.path_from_id("rotation_axis_angle"), 4, grpname or "Axis Angle Rotation")
else:
props[1] = (bl_obj.path_from_id("rotation_euler"), 3, grpname or "Euler Rotation")
blen_curves = [action.fcurves.new(prop, index=channel, action_group=grpname)
for prop, nbr_channels, grpname in props for channel in range(nbr_channels)]
if isinstance(item, Material):
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps):
value = [0,0,0]
for v, (fbxprop, channel, _fbx_acdata) in values:
assert(fbxprop == b'DiffuseColor')
assert(channel in {0, 1, 2})
value[channel] = v
for fc, v in zip(blen_curves, value):
fc.keyframe_points.insert(frame, v, options={'NEEDED', 'FAST'}).interpolation = 'LINEAR'
elif isinstance(item, ShapeKey):
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps):
value = 0.0
for v, (fbxprop, channel, _fbx_acdata) in values:
assert(fbxprop == b'DeformPercent')
assert(channel == 0)
value = v / 100.0
for fc, v in zip(blen_curves, (value,)):
fc.keyframe_points.insert(frame, v, options={'NEEDED', 'FAST'}).interpolation = 'LINEAR'
elif isinstance(item, Camera):
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps):
value = 0.0
for v, (fbxprop, channel, _fbx_acdata) in values:
assert(fbxprop == b'FocalLength')
assert(channel == 0)
value = v
for fc, v in zip(blen_curves, (value,)):
fc.keyframe_points.insert(frame, v, options={'NEEDED', 'FAST'}).interpolation = 'LINEAR'
else:
if item.is_bone:
bl_obj = item.bl_obj.pose.bones[item.bl_bone]
else:
bl_obj = item.bl_obj
transform_data = item.fbx_transform_data
rot_eul_prev = bl_obj.rotation_euler.copy()
rot_quat_prev = bl_obj.rotation_quaternion.copy()
restmat_inv = item.get_bind_matrix().inverted_safe() if item.is_bone else None
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps):
for v, (fbxprop, channel, _fbx_acdata) in values:
if fbxprop == b'Lcl Translation':
transform_data.loc[channel] = v
elif fbxprop == b'Lcl Rotation':
transform_data.rot[channel] = v
elif fbxprop == b'Lcl Scaling':
transform_data.sca[channel] = v
mat, _, _ = blen_read_object_transform_do(transform_data)
if item.anim_compensation_matrix:
mat = mat @ item.anim_compensation_matrix
if item.pre_matrix:
mat = item.pre_matrix @ mat
if item.post_matrix:
mat = mat @ item.post_matrix
# And now, remove that rest pose matrix from current mat (also in parent space).
if restmat_inv:
mat = restmat_inv @ mat
# Now we have a virtual matrix of transform from AnimCurves, we can insert keyframes!
loc, rot, sca = mat.decompose()
if rot_mode == 'QUATERNION':
if rot_quat_prev.dot(rot) < 0.0:
rot = -rot
rot_quat_prev = rot
elif rot_mode == 'AXIS_ANGLE':
vec, ang = rot.to_axis_angle()
rot = ang, vec.x, vec.y, vec.z
else: # Euler
rot = rot.to_euler(rot_mode, rot_eul_prev)
rot_eul_prev = rot
for fc, value in zip(blen_curves, chain(loc, rot, sca)):
fc.keyframe_points.insert(frame, value, options={'NEEDED', 'FAST'}).interpolation = 'LINEAR'
# Since we inserted our keyframes in 'FAST' mode, we have to update the fcurves now.
for fc in blen_curves:
fc.update()
def blen_read_animations(fbx_tmpl_astack, fbx_tmpl_alayer, stacks, scene, anim_offset):
from bpy.types import ShapeKey, Material, Camera
actions = {}
for as_uuid, ((fbx_asdata, _blen_data), alayers) in stacks.items():
stack_name = elem_name_ensure_class(fbx_asdata, b'AnimStack')
for al_uuid, ((fbx_aldata, _blen_data), items) in alayers.items():
layer_name = elem_name_ensure_class(fbx_aldata, b'AnimLayer')
for item, cnodes in items.items():
if isinstance(item, Material):
id_data = item
elif isinstance(item, ShapeKey):
id_data = item.id_data
elif isinstance(item, Camera):
id_data = item
else:
id_data = item.bl_obj
# XXX Ignore rigged mesh animations - those are a nightmare to handle, see note about it in
# FbxImportHelperNode class definition.
if id_data and id_data.type == 'MESH' and id_data.parent and id_data.parent.type == 'ARMATURE':
continue
if id_data is None:
continue
# Create new action if needed (should always be needed, except for keyblocks from shapekeys cases).
key = (as_uuid, al_uuid, id_data)
action = actions.get(key)
if action is None:
action_name = "|".join((id_data.name, stack_name, layer_name))
actions[key] = action = bpy.data.actions.new(action_name)
action.use_fake_user = True
# If none yet assigned, assign this action to id_data.
if not id_data.animation_data:
id_data.animation_data_create()
if not id_data.animation_data.action:
id_data.animation_data.action = action
# And actually populate the action!
blen_read_animations_action_item(action, item, cnodes, scene.render.fps, anim_offset)
# ----
# Mesh
def blen_read_geom_layerinfo(fbx_layer):
return (
validate_blend_names(elem_find_first_string_as_bytes(fbx_layer, b'Name')),
elem_find_first_string_as_bytes(fbx_layer, b'MappingInformationType'),
elem_find_first_string_as_bytes(fbx_layer, b'ReferenceInformationType'),
)
def blen_read_geom_array_setattr(generator, blen_data, blen_attr, fbx_data, stride, item_size, descr, xform):
max_idx = len(blen_data) - 1
print_error = True
def check_skip(blen_idx, fbx_idx):
nonlocal print_error
if fbx_idx < 0: # Negative values mean 'skip'.
return True
if blen_idx > max_idx:
if print_error:
print("ERROR: too much data in this layer, compared to elements in mesh, skipping!")
print_error = False
return True
return False
if xform is not None:
if isinstance(blen_data, list):
if item_size == 1:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
blen_data[blen_idx] = xform(fbx_data[fbx_idx])
else:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
blen_data[blen_idx] = xform(fbx_data[fbx_idx:fbx_idx + item_size])
else:
if item_size == 1:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
setattr(blen_data[blen_idx], blen_attr, xform(fbx_data[fbx_idx]))
else:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
setattr(blen_data[blen_idx], blen_attr, xform(fbx_data[fbx_idx:fbx_idx + item_size]))
else:
if isinstance(blen_data, list):
if item_size == 1:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
blen_data[blen_idx] = fbx_data[fbx_idx]
else:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
blen_data[blen_idx] = fbx_data[fbx_idx:fbx_idx + item_size]
else:
if item_size == 1:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
setattr(blen_data[blen_idx], blen_attr, fbx_data[fbx_idx])
else:
def _process(blend_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx):
setattr(blen_data[blen_idx], blen_attr, fbx_data[fbx_idx:fbx_idx + item_size])
for blen_idx, fbx_idx in generator:
if check_skip(blen_idx, fbx_idx):
continue
_process(blen_data, blen_attr, fbx_data, xform, item_size, blen_idx, fbx_idx)
# generic generators.
def blen_read_geom_array_gen_allsame(data_len):
return zip(*(range(data_len), (0,) * data_len))
def blen_read_geom_array_gen_direct(fbx_data, stride):
fbx_data_len = len(fbx_data)
return zip(*(range(fbx_data_len // stride), range(0, fbx_data_len, stride)))
def blen_read_geom_array_gen_indextodirect(fbx_layer_index, stride):
return ((bi, fi * stride) for bi, fi in enumerate(fbx_layer_index))
def blen_read_geom_array_gen_direct_looptovert(mesh, fbx_data, stride):
fbx_data_len = len(fbx_data) // stride
loops = mesh.loops
for p in mesh.polygons:
for lidx in p.loop_indices:
vidx = loops[lidx].vertex_index
if vidx < fbx_data_len:
yield lidx, vidx * stride
# generic error printers.
def blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet=False):
if not quiet:
print("warning layer %r mapping type unsupported: %r" % (descr, fbx_layer_mapping))
def blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet=False):
if not quiet:
print("warning layer %r ref type unsupported: %r" % (descr, fbx_layer_ref))
def blen_read_geom_array_mapped_vert(
mesh, blen_data, blen_attr,
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
stride, item_size, descr,
xform=None, quiet=False,
):
if fbx_layer_mapping == b'ByVertice':
if fbx_layer_ref == b'Direct':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
elif fbx_layer_mapping == b'AllSame':
if fbx_layer_ref == b'IndexToDirect':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_allsame(len(blen_data)),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
else:
blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet)
return False
def blen_read_geom_array_mapped_edge(
mesh, blen_data, blen_attr,
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
stride, item_size, descr,
xform=None, quiet=False,
):
if fbx_layer_mapping == b'ByEdge':
if fbx_layer_ref == b'Direct':
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
elif fbx_layer_mapping == b'AllSame':
if fbx_layer_ref == b'IndexToDirect':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_allsame(len(blen_data)),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
else:
blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet)
return False
def blen_read_geom_array_mapped_polygon(
mesh, blen_data, blen_attr,
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
stride, item_size, descr,
xform=None, quiet=False,
):
if fbx_layer_mapping == b'ByPolygon':
if fbx_layer_ref == b'IndexToDirect':
# XXX Looks like we often get no fbx_layer_index in this case, shall not happen but happens...
# We fallback to 'Direct' mapping in this case.
#~ assert(fbx_layer_index is not None)
if fbx_layer_index is None:
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
else:
blen_read_geom_array_setattr(blen_read_geom_array_gen_indextodirect(fbx_layer_index, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
elif fbx_layer_ref == b'Direct':
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
elif fbx_layer_mapping == b'AllSame':
if fbx_layer_ref == b'IndexToDirect':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_allsame(len(blen_data)),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
else:
blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet)
return False
def blen_read_geom_array_mapped_polyloop(
mesh, blen_data, blen_attr,
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
stride, item_size, descr,
xform=None, quiet=False,
):
if fbx_layer_mapping == b'ByPolygonVertex':
if fbx_layer_ref == b'IndexToDirect':
# XXX Looks like we often get no fbx_layer_index in this case, shall not happen but happens...
# We fallback to 'Direct' mapping in this case.
#~ assert(fbx_layer_index is not None)
if fbx_layer_index is None:
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
else:
blen_read_geom_array_setattr(blen_read_geom_array_gen_indextodirect(fbx_layer_index, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
elif fbx_layer_ref == b'Direct':
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct(fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
elif fbx_layer_mapping == b'ByVertice':
if fbx_layer_ref == b'Direct':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_direct_looptovert(mesh, fbx_layer_data, stride),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
elif fbx_layer_mapping == b'AllSame':
if fbx_layer_ref == b'IndexToDirect':
assert(fbx_layer_index is None)
blen_read_geom_array_setattr(blen_read_geom_array_gen_allsame(len(blen_data)),
blen_data, blen_attr, fbx_layer_data, stride, item_size, descr, xform)
return True
blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet)
else:
blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet)
return False
def blen_read_geom_layer_material(fbx_obj, mesh):
fbx_layer = elem_find_first(fbx_obj, b'LayerElementMaterial')
if fbx_layer is None:
return
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
layer_id = b'Materials'
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id))
blen_data = mesh.polygons
blen_read_geom_array_mapped_polygon(
mesh, blen_data, "material_index",
fbx_layer_data, None,
fbx_layer_mapping, fbx_layer_ref,
1, 1, layer_id,
)
def blen_read_geom_layer_uv(fbx_obj, mesh):
for layer_id in (b'LayerElementUV',):
for fbx_layer in elem_find_iter(fbx_obj, layer_id):
# all should be valid
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, b'UV'))
fbx_layer_index = elem_prop_first(elem_find_first(fbx_layer, b'UVIndex'))
# Always init our new layers with (0, 0) UVs.
uv_lay = mesh.uv_layers.new(name=fbx_layer_name, do_init=False)
if uv_lay is None:
print("Failed to add {%r %r} UVLayer to %r (probably too many of them?)"
"" % (layer_id, fbx_layer_name, mesh.name))
continue
blen_data = uv_lay.data
# some valid files omit this data
if fbx_layer_data is None:
print("%r %r missing data" % (layer_id, fbx_layer_name))
continue
blen_read_geom_array_mapped_polyloop(
mesh, blen_data, "uv",
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
2, 2, layer_id,
)
def blen_read_geom_layer_color(fbx_obj, mesh):
# almost same as UV's
for layer_id in (b'LayerElementColor',):
for fbx_layer in elem_find_iter(fbx_obj, layer_id):
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, b'Colors'))
fbx_layer_index = elem_prop_first(elem_find_first(fbx_layer, b'ColorIndex'))
color_lay = mesh.vertex_colors.new(name=fbx_layer_name, do_init=False)
blen_data = color_lay.data
if fbx_layer_data is None:
print("%r %r missing data" % (layer_id, fbx_layer_name))
continue
blen_read_geom_array_mapped_polyloop(
mesh, blen_data, "color",
fbx_layer_data, fbx_layer_index,
fbx_layer_mapping, fbx_layer_ref,
4, 4, layer_id,
)
def blen_read_geom_layer_smooth(fbx_obj, mesh):
fbx_layer = elem_find_first(fbx_obj, b'LayerElementSmoothing')
if fbx_layer is None:
return False
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
layer_id = b'Smoothing'
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id))
if fbx_layer_data is None:
return False
if fbx_layer_mapping == b'ByEdge':
if not mesh.edges:
print("warning skipping sharp edges data, no valid edges...")
return False
blen_data = mesh.edges
blen_read_geom_array_mapped_edge(
mesh, blen_data, "use_edge_sharp",
fbx_layer_data, None,
fbx_layer_mapping, fbx_layer_ref,
1, 1, layer_id,
xform=lambda s: not s,
)
mesh.use_auto_smooth = True
return False
elif fbx_layer_mapping == b'ByPolygon':
blen_data = mesh.polygons
return blen_read_geom_array_mapped_polygon(
mesh, blen_data, "use_smooth",
fbx_layer_data, None,
fbx_layer_mapping, fbx_layer_ref,
1, 1, layer_id,
xform=lambda s: (s != 0),
)
else:
print("warning layer %r mapping type unsupported: %r" % (fbx_layer.id, fbx_layer_mapping))
return False
def blen_read_geom_layer_edge_crease(fbx_obj, mesh):
from math import sqrt
fbx_layer = elem_find_first(fbx_obj, b'LayerElementEdgeCrease')
if fbx_layer is None:
return False
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
if fbx_layer_mapping != b'ByEdge':
return False
layer_id = b'EdgeCrease'
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id))
if not mesh.edges:
print("warning skipping edge crease data, no valid edges...")
return False
if fbx_layer_mapping == b'ByEdge':
if not mesh.edges:
print("warning skipping edge crease data, no valid edges...")
return False
blen_data = mesh.edges
return blen_read_geom_array_mapped_edge(
mesh, blen_data, "crease",
fbx_layer_data, None,
fbx_layer_mapping, fbx_layer_ref,
1, 1, layer_id,
# so we need to compensate that to get similar results through FBX...
xform=sqrt,
)
else:
print("warning layer %r mapping type unsupported: %r" % (fbx_layer.id, fbx_layer_mapping))
return False
def blen_read_geom_layer_normal(fbx_obj, mesh, xform=None):
fbx_layer = elem_find_first(fbx_obj, b'LayerElementNormal')
if fbx_layer is None:
return False
(fbx_layer_name,
fbx_layer_mapping,
fbx_layer_ref,
) = blen_read_geom_layerinfo(fbx_layer)
layer_id = b'Normals'
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id))
fbx_layer_index = elem_prop_first(elem_find_first(fbx_layer, b'NormalsIndex'))
# try loops, then vertices.
tries = ((mesh.loops, "Loops", False, blen_read_geom_array_mapped_polyloop),
(mesh.polygons, "Polygons", True, blen_read_geom_array_mapped_polygon),
(mesh.vertices, "Vertices", True, blen_read_geom_array_mapped_vert))
for blen_data, blen_data_type, is_fake, func in tries:
bdata = [None] * len(blen_data) if is_fake else blen_data
if func(mesh, bdata, "normal",
fbx_layer_data, fbx_layer_index, fbx_layer_mapping, fbx_layer_ref, 3, 3, layer_id, xform, True):
if blen_data_type == "Polygons":
for pidx, p in enumerate(mesh.polygons):
for lidx in range(p.loop_start, p.loop_start + p.loop_total):
mesh.loops[lidx].normal[:] = bdata[pidx]
elif blen_data_type == "Vertices":
# We have to copy vnors to lnors! Far from elegant, but simple.
for l in mesh.loops:
l.normal[:] = bdata[l.vertex_index]
return True
blen_read_geom_array_error_mapping("normal", fbx_layer_mapping)
blen_read_geom_array_error_ref("normal", fbx_layer_ref)
return False
def blen_read_geom(fbx_tmpl, fbx_obj, settings):
from itertools import chain
import array
# Vertices are in object space, but we are post-multiplying all transforms with the inverse of the
# global matrix, so we need to apply the global matrix to the vertices to get the correct result.
geom_mat_co = settings.global_matrix if settings.bake_space_transform else None
# We need to apply the inverse transpose of the global matrix when transforming normals.
geom_mat_no = Matrix(settings.global_matrix_inv_transposed) if settings.bake_space_transform else None
if geom_mat_no is not None:
# Remove translation & scaling!
geom_mat_no.translation = Vector()
geom_mat_no.normalize()
# TODO, use 'fbx_tmpl'
elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'Geometry')
fbx_verts = elem_prop_first(elem_find_first(fbx_obj, b'Vertices'))
fbx_polys = elem_prop_first(elem_find_first(fbx_obj, b'PolygonVertexIndex'))
fbx_edges = elem_prop_first(elem_find_first(fbx_obj, b'Edges'))
if geom_mat_co is not None:
def _vcos_transformed_gen(raw_cos, m=None):
# Note: we could most likely get much better performances with numpy, but will leave this as TODO for now.
return chain(*(m @ Vector(v) for v in zip(*(iter(raw_cos),) * 3)))
fbx_verts = array.array(fbx_verts.typecode, _vcos_transformed_gen(fbx_verts, geom_mat_co))
if fbx_verts is None:
fbx_verts = ()
if fbx_polys is None:
fbx_polys = ()
mesh = bpy.data.meshes.new(name=elem_name_utf8)
mesh.vertices.add(len(fbx_verts) // 3)
mesh.vertices.foreach_set("co", fbx_verts)
if fbx_polys:
mesh.loops.add(len(fbx_polys))
poly_loop_starts = []
poly_loop_totals = []
poly_loop_prev = 0
for i, l in enumerate(mesh.loops):
index = fbx_polys[i]
if index < 0:
poly_loop_starts.append(poly_loop_prev)
poly_loop_totals.append((i - poly_loop_prev) + 1)
poly_loop_prev = i + 1
index ^= -1
l.vertex_index = index
mesh.polygons.add(len(poly_loop_starts))
mesh.polygons.foreach_set("loop_start", poly_loop_starts)
mesh.polygons.foreach_set("loop_total", poly_loop_totals)
blen_read_geom_layer_material(fbx_obj, mesh)
blen_read_geom_layer_uv(fbx_obj, mesh)
blen_read_geom_layer_color(fbx_obj, mesh)
if fbx_edges:
# edges in fact index the polygons (NOT the vertices)
import array
tot_edges = len(fbx_edges)
edges_conv = array.array('i', [0]) * (tot_edges * 2)
edge_index = 0
for i in fbx_edges:
e_a = fbx_polys[i]
if e_a >= 0:
e_b = fbx_polys[i + 1]
if e_b < 0:
e_b ^= -1
else:
# Last index of polygon, wrap back to the start.
# ideally we wouldn't have to search back,
j = i - 1
while j >= 0 and fbx_polys[j] >= 0:
j -= 1
e_a ^= -1
e_b = fbx_polys[j + 1]
edges_conv[edge_index] = e_a
edges_conv[edge_index + 1] = e_b
edge_index += 2
mesh.edges.add(tot_edges)
mesh.edges.foreach_set("vertices", edges_conv)
ok_smooth = blen_read_geom_layer_smooth(fbx_obj, mesh)
ok_crease = blen_read_geom_layer_edge_crease(fbx_obj, mesh)
ok_normals = False
if settings.use_custom_normals:
mesh.create_normals_split()
if geom_mat_no is None:
ok_normals = blen_read_geom_layer_normal(fbx_obj, mesh)
else:
def nortrans(v):
return geom_mat_no @ Vector(v)
ok_normals = blen_read_geom_layer_normal(fbx_obj, mesh, nortrans)
mesh.validate(clean_customdata=False)
if ok_normals:
clnors = array.array('f', [0.0] * (len(mesh.loops) * 3))
mesh.loops.foreach_get("normal", clnors)
if not ok_smooth:
mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons))
ok_smooth = True
mesh.normals_split_custom_set(tuple(zip(*(iter(clnors),) * 3)))
mesh.use_auto_smooth = True
else:
mesh.calc_normals()
if settings.use_custom_normals:
mesh.free_normals_split()
if not ok_smooth:
mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons))
if ok_crease:
mesh.use_customdata_edge_crease = True
if settings.use_custom_props:
blen_read_custom_properties(fbx_obj, mesh, settings)
return mesh
def blen_read_shape(fbx_tmpl, fbx_sdata, fbx_bcdata, meshes, scene):
elem_name_utf8 = elem_name_ensure_class(fbx_sdata, b'Geometry')
indices = elem_prop_first(elem_find_first(fbx_sdata, b'Indexes'), default=())
dvcos = tuple(co for co in zip(*[iter(elem_prop_first(elem_find_first(fbx_sdata, b'Vertices'), default=()))] * 3))
weight = elem_prop_first(elem_find_first(fbx_bcdata, b'DeformPercent'), default=100.0) / 100.0
vgweights = tuple(vgw / 100.0 for vgw in elem_prop_first(elem_find_first(fbx_bcdata, b'FullWeights'), default=()))
nbr_indices = len(indices)
if len(vgweights) == 1 and nbr_indices > 1:
vgweights = (vgweights[0],) * nbr_indices
assert(len(vgweights) == nbr_indices == len(dvcos))
create_vg = bool(set(vgweights) - {1.0})
keyblocks = []
for me, objects in meshes:
vcos = tuple((idx, me.vertices[idx].co + Vector(dvco)) for idx, dvco in zip(indices, dvcos))
objects = list({node.bl_obj for node in objects})
assert(objects)
if me.shape_keys is None:
objects[0].shape_key_add(name="Basis", from_mix=False)
kb = objects[0].shape_key_add(name=elem_name_utf8, from_mix=False)
me.shape_keys.use_relative = True
for idx, co in vcos:
kb.data[idx].co[:] = co
kb.value = weight
if create_vg:
vgoups = add_vgroup_to_objects(indices, vgweights, kb.name, objects)
kb.vertex_group = kb.name
keyblocks.append(kb)
return keyblocks
def blen_read_material(fbx_tmpl, fbx_obj, settings):
from bpy_extras import node_shader_utils
from math import sqrt
elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'Material')
nodal_material_wrap_map = settings.nodal_material_wrap_map
ma = bpy.data.materials.new(name=elem_name_utf8)
const_color_white = 1.0, 1.0, 1.0
const_color_black = 0.0, 0.0, 0.0
fbx_props = (elem_find_first(fbx_obj, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
fbx_props_no_template = (fbx_props[0], fbx_elem_nil)
ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=False, use_nodes=True)
ma_wrap.base_color = elem_props_get_color_rgb(fbx_props, b'DiffuseColor', const_color_white)
ma_wrap.specular = elem_props_get_number(fbx_props, b'SpecularFactor', 0.25) * 2.0
fbx_shininess = elem_props_get_number(fbx_props, b'Shininess', 20.0)
ma_wrap.roughness = 1.0 - (sqrt(fbx_shininess) / 10.0)
# However, there are some cases (from 3DSMax, see T65065), where we do have TransparencyFactor only defined
# in the template to 0.0, and then materials defining TransparentColor to pure white (1.0, 1.0, 1.0),
# and setting alpha value in Opacity... try to cope with that too. :((((
alpha = 1.0 - elem_props_get_number(fbx_props, b'TransparencyFactor', 0.0)
if (alpha == 1.0 or alpha == 0.0):
alpha = elem_props_get_number(fbx_props_no_template, b'Opacity', None)
if alpha is None:
alpha = 1.0 - elem_props_get_color_rgb(fbx_props, b'TransparentColor', const_color_black)[0]
ma_wrap.alpha = alpha
ma_wrap.metallic = elem_props_get_number(fbx_props, b'ReflectionFactor', 0.0)
# We have no metallic (a.k.a. reflection) color...
# elem_props_get_color_rgb(fbx_props, b'ReflectionColor', const_color_white)
ma_wrap.normalmap_strength = elem_props_get_number(fbx_props, b'BumpFactor', 1.0)
# For emission color we can take into account the factor, but only for default values, not in case of texture.
emission_factor = elem_props_get_number(fbx_props, b'EmissiveFactor', 1.0)
ma_wrap.emission_color = [c * emission_factor
for c in elem_props_get_color_rgb(fbx_props, b'EmissiveColor', const_color_black)]
nodal_material_wrap_map[ma] = ma_wrap
if settings.use_custom_props:
blen_read_custom_properties(fbx_obj, ma, settings)
return ma
# -------
# Image & Texture
def blen_read_texture_image(fbx_tmpl, fbx_obj, basedir, settings):
import os
from bpy_extras import image_utils
def pack_data_from_content(image, fbx_obj):
data = elem_find_first_bytes(fbx_obj, b'Content')
if (data):
data_len = len(data)
if (data_len):
image.pack(data=data, data_len=data_len)
elem_name_utf8 = elem_name_ensure_classes(fbx_obj, {b'Texture', b'Video'})
image_cache = settings.image_cache
# Yet another beautiful logic demonstration by Master FBX:
# * RelativeFilename in both Video and Texture nodes.
# * FileName in texture nodes.
# * Filename in video nodes.
# Aaaaaaaarrrrrrrrgggggggggggg!!!!!!!!!!!!!!
filepath = elem_find_first_string(fbx_obj, b'RelativeFilename')
if filepath:
# Make sure we do handle a relative path, and not an absolute one (see D5143).
filepath = filepath.lstrip(os.path.sep).lstrip(os.path.altsep)
filepath = os.path.join(basedir, filepath)
else:
filepath = elem_find_first_string(fbx_obj, b'FileName')
if not filepath:
filepath = elem_find_first_string(fbx_obj, b'Filename')
if not filepath:
print("Error, could not find any file path in ", fbx_obj)
print(" Falling back to: ", elem_name_utf8)
filepath = elem_name_utf8
else :
filepath = filepath.replace('\\', '/') if (os.sep == '/') else filepath.replace('/', '\\')
image = image_cache.get(filepath)
if image is not None:
# Data is only embedded once, we may have already created the image but still be missing its data!
if not image.has_data:
pack_data_from_content(image, fbx_obj)
return image
image = image_utils.load_image(
filepath,
dirname=basedir,
place_holder=True,
recursive=settings.use_image_search,
)
# Try to use embedded data, if available!
pack_data_from_content(image, fbx_obj)
image_cache[filepath] = image
# name can be ../a/b/c
image.name = os.path.basename(elem_name_utf8)
if settings.use_custom_props:
blen_read_custom_properties(fbx_obj, image, settings)
return image
def blen_read_camera(fbx_tmpl, fbx_obj, global_scale):
# meters to inches
M2I = 0.0393700787
elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'NodeAttribute')
fbx_props = (elem_find_first(fbx_obj, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
camera = bpy.data.cameras.new(name=elem_name_utf8)
camera.type = 'ORTHO' if elem_props_get_enum(fbx_props, b'CameraProjectionType', 0) == 1 else 'PERSP'
camera.lens = elem_props_get_number(fbx_props, b'FocalLength', 35.0)
camera.sensor_width = elem_props_get_number(fbx_props, b'FilmWidth', 32.0 * M2I) / M2I
camera.sensor_height = elem_props_get_number(fbx_props, b'FilmHeight', 32.0 * M2I) / M2I
camera.ortho_scale = elem_props_get_number(fbx_props, b'OrthoZoom', 1.0)
filmaspect = camera.sensor_width / camera.sensor_height
# film offset
camera.shift_x = elem_props_get_number(fbx_props, b'FilmOffsetX', 0.0) / (M2I * camera.sensor_width)
camera.shift_y = elem_props_get_number(fbx_props, b'FilmOffsetY', 0.0) / (M2I * camera.sensor_height * filmaspect)
camera.clip_start = elem_props_get_number(fbx_props, b'NearPlane', 0.01) * global_scale
camera.clip_end = elem_props_get_number(fbx_props, b'FarPlane', 100.0) * global_scale
return camera
def blen_read_light(fbx_tmpl, fbx_obj, global_scale):
import math
elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'NodeAttribute')
fbx_props = (elem_find_first(fbx_obj, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
light_type = {
0: 'POINT',
1: 'SUN',
2: 'SPOT'}.get(elem_props_get_enum(fbx_props, b'LightType', 0), 'POINT')
lamp = bpy.data.lights.new(name=elem_name_utf8, type=light_type)
if light_type == 'SPOT':
spot_size = elem_props_get_number(fbx_props, b'OuterAngle', None)
if spot_size is None:
# Deprecated.
spot_size = elem_props_get_number(fbx_props, b'Cone angle', 45.0)
lamp.spot_size = math.radians(spot_size)
spot_blend = elem_props_get_number(fbx_props, b'InnerAngle', None)
if spot_blend is None:
# Deprecated.
spot_blend = elem_props_get_number(fbx_props, b'HotSpot', 45.0)
lamp.spot_blend = 1.0 - (spot_blend / spot_size)
# TODO, cycles nodes???
lamp.color = elem_props_get_color_rgb(fbx_props, b'Color', (1.0, 1.0, 1.0))
lamp.energy = elem_props_get_number(fbx_props, b'Intensity', 100.0) / 100.0
lamp.distance = elem_props_get_number(fbx_props, b'DecayStart', 25.0) * global_scale
lamp.use_shadow = elem_props_get_bool(fbx_props, b'CastShadow', True)
if hasattr(lamp, "cycles"):
lamp.cycles.cast_shadow = lamp.use_shadow
# Keeping this for now, but this is not used nor exposed anymore afaik...
lamp.shadow_color = elem_props_get_color_rgb(fbx_props, b'ShadowColor', (0.0, 0.0, 0.0))
return lamp
# ### Import Utility class
class FbxImportHelperNode:
__slots__ = (
'_parent', 'anim_compensation_matrix', 'is_global_animation', 'armature_setup', 'armature', 'bind_matrix',
'bl_bone', 'bl_data', 'bl_obj', 'bone_child_matrix', 'children', 'clusters',
'fbx_elem', 'fbx_name', 'fbx_transform_data', 'fbx_type',
'is_armature', 'has_bone_children', 'is_bone', 'is_root', 'is_leaf',
'matrix', 'matrix_as_parent', 'matrix_geom', 'meshes', 'post_matrix', 'pre_matrix')
def __init__(self, fbx_elem, bl_data, fbx_transform_data, is_bone):
self.fbx_name = elem_name_ensure_class(fbx_elem, b'Model') if fbx_elem else 'Unknown'
self.fbx_type = fbx_elem.props[2] if fbx_elem else None
self.fbx_elem = fbx_elem
self.bl_obj = None
self.bl_data = bl_data
self.bl_bone = None # Name of bone if this is a bone (this may be different to fbx_name if there was a name conflict in Blender!)
self.fbx_transform_data = fbx_transform_data
self.is_root = False
self.is_bone = is_bone
self.is_armature = False
self.armature = None # For bones only, relevant armature node.
self.has_bone_children = False # True if the hierarchy below this node contains bones, important to support mixed hierarchies.
self.is_leaf = False # True for leaf-bones added to the end of some bone chains to set the lengths.
self.pre_matrix = None # correction matrix that needs to be applied before the FBX transform
self.bind_matrix = None # for bones this is the matrix used to bind to the skin
if fbx_transform_data:
self.matrix, self.matrix_as_parent, self.matrix_geom = blen_read_object_transform_do(fbx_transform_data)
else:
self.matrix, self.matrix_as_parent, self.matrix_geom = (None, None, None)
self.post_matrix = None # correction matrix that needs to be applied after the FBX transform
self.bone_child_matrix = None # Objects attached to a bone end not the beginning, this matrix corrects for that
# XXX Those two are to handle the fact that rigged meshes are not linked to their armature in FBX, which implies
# that their animation is in global space (afaik...).
# This is actually not really solvable currently, since anim_compensation_matrix is not valid if armature
# itself is animated (we'd have to recompute global-to-local anim_compensation_matrix for each frame,
self.anim_compensation_matrix = None
self.is_global_animation = False
self.meshes = None
self.clusters = []
self.armature_setup = {}
self._parent = None
self.children = []
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, value):
if self._parent is not None:
self._parent.children.remove(self)
self._parent = value
if self._parent is not None:
self._parent.children.append(self)
@property
def ignore(self):
return self.is_leaf
def __repr__(self):
if self.fbx_elem:
return self.fbx_elem.props[1].decode()
else:
return "None"
def print_info(self, indent=0):
print(" " * indent + (self.fbx_name if self.fbx_name else "(Null)")
+ ("[root]" if self.is_root else "")
+ ("[leaf]" if self.is_leaf else "")
+ ("[ignore]" if self.ignore else "")
+ ("[armature]" if self.is_armature else "")
+ ("[bone]" if self.is_bone else "")
+ ("[HBC]" if self.has_bone_children else "")
)
for c in self.children:
c.print_info(indent + 1)
def mark_leaf_bones(self):
if self.is_bone and len(self.children) == 1:
child = self.children[0]
if child.is_bone and len(child.children) == 0:
child.is_leaf = True
for child in self.children:
child.mark_leaf_bones()
def do_bake_transform(self, settings):
return (settings.bake_space_transform and self.fbx_type in (b'Mesh', b'Null') and
not self.is_armature and not self.is_bone)
def find_correction_matrix(self, settings, parent_correction_inv=None):
from bpy_extras.io_utils import axis_conversion
if self.parent and (self.parent.is_root or self.parent.do_bake_transform(settings)):
self.pre_matrix = settings.global_matrix
if parent_correction_inv:
self.pre_matrix = parent_correction_inv @ (self.pre_matrix if self.pre_matrix else Matrix())
correction_matrix = None
if self.is_bone:
if settings.automatic_bone_orientation:
bone_children = tuple(child for child in self.children if child.is_bone)
if len(bone_children) == 0:
if self.parent and self.parent.is_bone:
correction_matrix = parent_correction_inv.inverted() if parent_correction_inv else None
else:
best_axis = (1, 0, 0)
if len(bone_children) == 1:
vec = bone_children[0].get_bind_matrix().to_translation()
best_axis = Vector((0, 0, 1 if vec[2] >= 0 else -1))
if abs(vec[0]) > abs(vec[1]):
if abs(vec[0]) > abs(vec[2]):
best_axis = Vector((1 if vec[0] >= 0 else -1, 0, 0))
elif abs(vec[1]) > abs(vec[2]):
best_axis = Vector((0, 1 if vec[1] >= 0 else -1, 0))
else:
child_locs = (child.get_bind_matrix().to_translation() for child in bone_children)
child_locs = tuple(loc.normalized() for loc in child_locs if loc.magnitude > 0.0)
if False:
best_angle = -1.0
for i in range(6):
a = i // 2
s = -1 if i % 2 == 1 else 1
test_axis = Vector((s if a == 0 else 0, s if a == 1 else 0, s if a == 2 else 0))
# find max angle to children
max_angle = 1.0
for loc in child_locs:
max_angle = min(max_angle, test_axis.dot(loc))
# is it better than the last one?
if best_angle < max_angle:
best_angle = max_angle
best_axis = test_axis
else:
best_angle = -1.0
for vec in child_locs:
test_axis = Vector((0, 0, 1 if vec[2] >= 0 else -1))
if abs(vec[0]) > abs(vec[1]):
if abs(vec[0]) > abs(vec[2]):
test_axis = Vector((1 if vec[0] >= 0 else -1, 0, 0))
elif abs(vec[1]) > abs(vec[2]):
test_axis = Vector((0, 1 if vec[1] >= 0 else -1, 0))
# find max angle to children
max_angle = 1.0
for loc in child_locs:
max_angle = min(max_angle, test_axis.dot(loc))
# is it better than the last one?
if best_angle < max_angle:
best_angle = max_angle
best_axis = test_axis
# convert best_axis to axis string
to_up = 'Z' if best_axis[2] >= 0 else '-Z'
if abs(best_axis[0]) > abs(best_axis[1]):
if abs(best_axis[0]) > abs(best_axis[2]):
to_up = 'X' if best_axis[0] >= 0 else '-X'
elif abs(best_axis[1]) > abs(best_axis[2]):
to_up = 'Y' if best_axis[1] >= 0 else '-Y'
to_forward = 'X' if to_up not in {'X', '-X'} else 'Y'
# Build correction matrix
if (to_up, to_forward) != ('Y', 'X'):
correction_matrix = axis_conversion(from_forward='X',
from_up='Y',
to_forward=to_forward,
to_up=to_up,
).to_4x4()
else:
correction_matrix = settings.bone_correction_matrix
else:
# camera and light can be hard wired
if self.fbx_type == b'Camera':
correction_matrix = MAT_CONVERT_CAMERA
elif self.fbx_type == b'Light':
correction_matrix = MAT_CONVERT_LIGHT
self.post_matrix = correction_matrix
if self.do_bake_transform(settings):
self.post_matrix = settings.global_matrix_inv @ (self.post_matrix if self.post_matrix else Matrix())
# process children
correction_matrix_inv = correction_matrix.inverted_safe() if correction_matrix else None
for child in self.children:
child.find_correction_matrix(settings, correction_matrix_inv)
def find_armature_bones(self, armature):
for child in self.children:
if child.is_bone:
child.armature = armature
child.find_armature_bones(armature)
def find_armatures(self):
needs_armature = False
for child in self.children:
if child.is_bone:
needs_armature = True
break
if needs_armature:
if self.fbx_type in {b'Null', b'Root'}:
# if empty then convert into armature
self.is_armature = True
armature = self
else:
# otherwise insert a new node
# XXX Maybe in case self is virtual FBX root node, we should instead add one armature per bone child?
armature = FbxImportHelperNode(None, None, None, False)
armature.fbx_name = "Armature"
armature.is_armature = True
for child in tuple(self.children):
if child.is_bone:
child.parent = armature
armature.parent = self
armature.find_armature_bones(armature)
for child in self.children:
if child.is_armature or child.is_bone:
continue
child.find_armatures()
def find_bone_children(self):
has_bone_children = False
for child in self.children:
has_bone_children |= child.find_bone_children()
self.has_bone_children = has_bone_children
return self.is_bone or has_bone_children
def find_fake_bones(self, in_armature=False):
if in_armature and not self.is_bone and self.has_bone_children:
self.is_bone = True
# if we are not a null node we need an intermediate node for the data
if self.fbx_type not in {b'Null', b'Root'}:
node = FbxImportHelperNode(self.fbx_elem, self.bl_data, None, False)
self.fbx_elem = None
self.bl_data = None
# transfer children
for child in self.children:
if child.is_bone or child.has_bone_children:
continue
child.parent = node
# attach to parent
node.parent = self
if self.is_armature:
in_armature = True
for child in self.children:
child.find_fake_bones(in_armature)
def get_world_matrix_as_parent(self):
matrix = self.parent.get_world_matrix_as_parent() if self.parent else Matrix()
if self.matrix_as_parent:
matrix = matrix @ self.matrix_as_parent
return matrix
def get_world_matrix(self):
matrix = self.parent.get_world_matrix_as_parent() if self.parent else Matrix()
if self.matrix:
matrix = matrix @ self.matrix
return matrix
def get_matrix(self):
matrix = self.matrix if self.matrix else Matrix()
if self.pre_matrix:
matrix = self.pre_matrix @ matrix
if self.post_matrix:
matrix = matrix @ self.post_matrix
return matrix
def get_bind_matrix(self):
matrix = self.bind_matrix if self.bind_matrix else Matrix()
if self.pre_matrix:
matrix = self.pre_matrix @ matrix
if self.post_matrix:
matrix = matrix @ self.post_matrix
return matrix
def make_bind_pose_local(self, parent_matrix=None):
if parent_matrix is None:
parent_matrix = Matrix()
if self.bind_matrix:
bind_matrix = parent_matrix.inverted_safe() @ self.bind_matrix
else:
bind_matrix = self.matrix.copy() if self.matrix else None
self.bind_matrix = bind_matrix
if bind_matrix:
parent_matrix = parent_matrix @ bind_matrix
for child in self.children:
child.make_bind_pose_local(parent_matrix)
def collect_skeleton_meshes(self, meshes):
for _, m in self.clusters:
meshes.update(m)
for child in self.children:
child.collect_skeleton_meshes(meshes)
def collect_armature_meshes(self):
if self.is_armature:
armature_matrix_inv = self.get_world_matrix().inverted_safe()
meshes = set()
for child in self.children:
# Children meshes may be linked to children armatures, in which case we do not want to link them
# to a parent one. See T70244.
child.collect_armature_meshes()
if not child.meshes:
child.collect_skeleton_meshes(meshes)
for m in meshes:
old_matrix = m.matrix
m.matrix = armature_matrix_inv @ m.get_world_matrix()
m.anim_compensation_matrix = old_matrix.inverted_safe() @ m.matrix
m.is_global_animation = True
m.parent = self
self.meshes = meshes
else:
for child in self.children:
child.collect_armature_meshes()
def build_skeleton(self, arm, parent_matrix, parent_bone_size=1, force_connect_children=False):
def child_connect(par_bone, child_bone, child_head, connect_ctx):
# child_bone or child_head may be None.
force_connect_children, connected = connect_ctx
if child_bone is not None:
child_bone.parent = par_bone
child_head = child_bone.head
if similar_values_iter(par_bone.tail, child_head):
if child_bone is not None:
child_bone.use_connect = True
# Disallow any force-connection at this level from now on, since that child was 'really'
# connected, we do not want to move current bone's tail anymore!
connected = None
elif force_connect_children and connected is not None:
if connected is ...:
connected = ([child_head.copy(), 1], [child_bone] if child_bone is not None else [])
else:
connected[0][0] += child_head
connected[0][1] += 1
if child_bone is not None:
connected[1].append(child_bone)
connect_ctx[1] = connected
def child_connect_finalize(par_bone, connect_ctx):
force_connect_children, connected = connect_ctx
if force_connect_children and connected is not None and connected is not ...:
par_tail = connected[0][0] / connected[0][1]
if (par_tail - par_bone.head).magnitude < 1e-2:
par_bone_vec = (par_bone.tail - par_bone.head).normalized()
par_tail = par_bone.head + par_bone_vec * 0.01
par_bone.tail = par_tail
for child_bone in connected[1]:
if similar_values_iter(par_tail, child_bone.head):
child_bone.use_connect = True
bone = arm.bl_data.edit_bones.new(name=self.fbx_name)
bone.select = True
self.bl_obj = arm.bl_obj
self.bl_data = arm.bl_data
self.bl_bone = bone.name
bone_size = 0.0
bone_count = 0
for child in self.children:
if child.is_bone:
bone_size += child.get_bind_matrix().to_translation().magnitude
bone_count += 1
if bone_count > 0:
bone_size /= bone_count
else:
bone_size = parent_bone_size
bone_tail = Vector((0.0, 1.0, 0.0)) * max(0.01, bone_size)
bone.tail = bone_tail
bone_matrix = parent_matrix @ self.get_bind_matrix().normalized()
bone.matrix = bone_matrix
self.bone_child_matrix = Matrix.Translation(-bone_tail)
connect_ctx = [force_connect_children, ...]
for child in self.children:
if child.is_leaf and force_connect_children:
child_head = (bone_matrix @ child.get_bind_matrix().normalized()).translation
child_connect(bone, None, child_head, connect_ctx)
elif child.is_bone and not child.ignore:
child_bone = child.build_skeleton(arm, bone_matrix, bone_size,
force_connect_children=force_connect_children)
child_connect(bone, child_bone, None, connect_ctx)
child_connect_finalize(bone, connect_ctx)
return bone
def build_node_obj(self, fbx_tmpl, settings):
if self.bl_obj:
return self.bl_obj
if self.is_bone or not self.fbx_elem:
return None
elem_name_utf8 = self.fbx_name
self.bl_obj = obj = bpy.data.objects.new(name=elem_name_utf8, object_data=self.bl_data)
fbx_props = (elem_find_first(self.fbx_elem, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
obj.color[0:3] = elem_props_get_color_rgb(fbx_props, b'Color', (0.8, 0.8, 0.8))
obj.hide_viewport = not bool(elem_props_get_visibility(fbx_props, b'Visibility', 1.0))
obj.matrix_basis = self.get_matrix()
if settings.use_custom_props:
blen_read_custom_properties(self.fbx_elem, obj, settings)
return obj
def build_skeleton_children(self, fbx_tmpl, settings, scene, view_layer):
if self.is_bone:
for child in self.children:
if child.ignore:
continue
child.build_skeleton_children(fbx_tmpl, settings, scene, view_layer)
return None
else:
obj = self.build_node_obj(fbx_tmpl, settings)
if obj is None:
return None
for child in self.children:
if child.ignore:
continue
child.build_skeleton_children(fbx_tmpl, settings, scene, view_layer)
view_layer.active_layer_collection.collection.objects.link(obj)
obj.select_set(True)
return obj
def link_skeleton_children(self, fbx_tmpl, settings, scene):
if self.is_bone:
for child in self.children:
if child.ignore:
continue
child_obj = child.bl_obj
if child_obj and child_obj != self.bl_obj:
child_obj.parent = self.bl_obj
child_obj.parent_bone = self.bl_bone
child_obj.parent_type = 'BONE'
child_obj.matrix_parent_inverse = Matrix()
if child.pre_matrix:
child.pre_matrix = self.bone_child_matrix @ child.pre_matrix
else:
child.pre_matrix = self.bone_child_matrix
child_obj.matrix_basis = child.get_matrix()
child.link_skeleton_children(fbx_tmpl, settings, scene)
return None
else:
obj = self.bl_obj
for child in self.children:
if child.ignore:
continue
child_obj = child.link_skeleton_children(fbx_tmpl, settings, scene)
if child_obj:
child_obj.parent = obj
return obj
def set_pose_matrix(self, arm):
pose_bone = arm.bl_obj.pose.bones[self.bl_bone]
pose_bone.matrix_basis = self.get_bind_matrix().inverted_safe() @ self.get_matrix()
for child in self.children:
if child.ignore:
continue
if child.is_bone:
child.set_pose_matrix(arm)
def merge_weights(self, combined_weights, fbx_cluster):
indices = elem_prop_first(elem_find_first(fbx_cluster, b'Indexes', default=None), default=())
weights = elem_prop_first(elem_find_first(fbx_cluster, b'Weights', default=None), default=())
for index, weight in zip(indices, weights):
w = combined_weights.get(index)
if w is None:
combined_weights[index] = [weight]
else:
w.append(weight)
def set_bone_weights(self):
ignored_children = tuple(child for child in self.children
if child.is_bone and child.ignore and len(child.clusters) > 0)
if len(ignored_children) > 0:
for fbx_cluster, meshes in self.clusters:
combined_weights = {}
self.merge_weights(combined_weights, fbx_cluster)
for child in ignored_children:
for child_cluster, child_meshes in child.clusters:
if not meshes.isdisjoint(child_meshes):
self.merge_weights(combined_weights, child_cluster)
indices = []
weights = []
for i, w in combined_weights.items():
indices.append(i)
if len(w) > 1:
weights.append(sum(w) / len(w))
else:
weights.append(w[0])
add_vgroup_to_objects(indices, weights, self.bl_bone, [node.bl_obj for node in meshes])
all_meshes = set().union(*[meshes for _, meshes in self.clusters])
for child in ignored_children:
for child_cluster, child_meshes in child.clusters:
if all_meshes.isdisjoint(child_meshes):
indices = elem_prop_first(elem_find_first(child_cluster, b'Indexes', default=None), default=())
weights = elem_prop_first(elem_find_first(child_cluster, b'Weights', default=None), default=())
add_vgroup_to_objects(indices, weights, self.bl_bone, [node.bl_obj for node in child_meshes])
else:
# set the vertex weights on meshes
for fbx_cluster, meshes in self.clusters:
indices = elem_prop_first(elem_find_first(fbx_cluster, b'Indexes', default=None), default=())
weights = elem_prop_first(elem_find_first(fbx_cluster, b'Weights', default=None), default=())
add_vgroup_to_objects(indices, weights, self.bl_bone, [node.bl_obj for node in meshes])
for child in self.children:
if child.is_bone and not child.ignore:
child.set_bone_weights()
def build_hierarchy(self, fbx_tmpl, settings, scene, view_layer):
if self.is_armature:
# create when linking since we need object data
elem_name_utf8 = self.fbx_name
self.bl_data = arm_data = bpy.data.armatures.new(name=elem_name_utf8)
# Object data must be created already
self.bl_obj = arm = bpy.data.objects.new(name=elem_name_utf8, object_data=arm_data)
arm.matrix_basis = self.get_matrix()
if self.fbx_elem:
fbx_props = (elem_find_first(self.fbx_elem, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
if settings.use_custom_props:
blen_read_custom_properties(self.fbx_elem, arm, settings)
# instance in scene
view_layer.active_layer_collection.collection.objects.link(arm)
arm.select_set(True)
# Add bones:
# Switch to Edit mode.
view_layer.objects.active = arm
is_hidden = arm.hide_viewport
arm.hide_viewport = False # Can't switch to Edit mode hidden objects...
bpy.ops.object.mode_set(mode='EDIT')
for child in self.children:
if child.ignore:
continue
if child.is_bone:
child.build_skeleton(self, Matrix(), force_connect_children=settings.force_connect_children)
bpy.ops.object.mode_set(mode='OBJECT')
arm.hide_viewport = is_hidden
for child in self.children:
if child.ignore:
continue
if child.is_bone:
child.set_pose_matrix(self)
for child in self.children:
if child.ignore:
continue
child_obj = child.build_skeleton_children(fbx_tmpl, settings, scene, view_layer)
return arm
elif self.fbx_elem and not self.is_bone:
obj = self.build_node_obj(fbx_tmpl, settings)
for child in self.children:
child.build_hierarchy(fbx_tmpl, settings, scene, view_layer)
view_layer.active_layer_collection.collection.objects.link(obj)
obj.select_set(True)
return obj
else:
for child in self.children:
child.build_hierarchy(fbx_tmpl, settings, scene, view_layer)
return None
def link_hierarchy(self, fbx_tmpl, settings, scene):
if self.is_armature:
arm = self.bl_obj
for child in self.children:
if child.ignore:
continue
child_obj = child.link_skeleton_children(fbx_tmpl, settings, scene)
if child_obj:
child_obj.parent = arm
if self.meshes:
for mesh in self.meshes:
(mmat, amat) = mesh.armature_setup[self]
me_obj = mesh.bl_obj
# Probably because org app (max) handles it completely aside from any parenting stuff,
# which we obviously cannot do in Blender. :/
if amat is None:
amat = self.bind_matrix
amat = settings.global_matrix @ (Matrix() if amat is None else amat)
if self.matrix_geom:
amat = amat @ self.matrix_geom
mmat = settings.global_matrix @ mmat
if mesh.matrix_geom:
mmat = mmat @ mesh.matrix_geom
# Now that we have armature and mesh in there (global) bind 'state' (matrix),
# we can compute inverse parenting matrix of the mesh.
me_obj.matrix_parent_inverse = amat.inverted_safe() @ mmat @ me_obj.matrix_basis.inverted_safe()
mod = mesh.bl_obj.modifiers.new(arm.name, 'ARMATURE')
mod.object = arm
# Add bone weights to the deformers
for child in self.children:
if child.ignore:
continue
if child.is_bone:
child.set_bone_weights()
return arm
elif self.bl_obj:
obj = self.bl_obj
# walk through children
for child in self.children:
child_obj = child.link_hierarchy(fbx_tmpl, settings, scene)
if child_obj:
child_obj.parent = obj
return obj
else:
for child in self.children:
child.link_hierarchy(fbx_tmpl, settings, scene)
return None
def load(operator, context, filepath="",
use_manual_orientation=False,
axis_forward='-Z',
axis_up='Y',
global_scale=1.0,
bake_space_transform=False,
use_custom_normals=True,
use_image_search=False,
use_alpha_decals=False,
decal_offset=0.0,
use_anim=True,
anim_offset=1.0,
use_subsurf=False,
use_custom_props=True,
use_custom_props_enum_as_string=True,
ignore_leaf_bones=False,
force_connect_children=False,
automatic_bone_orientation=False,
primary_bone_axis='Y',
secondary_bone_axis='X',
use_prepost_rot=True):
global fbx_elem_nil
fbx_elem_nil = FBXElem('', (), (), ())
import os
import time
from bpy_extras.io_utils import axis_conversion
from . import parse_fbx
from .fbx_utils import RIGHT_HAND_AXES, FBX_FRAMERATES
start_time_proc = time.process_time()
start_time_sys = time.time()
perfmon = PerfMon()
perfmon.level_up()
perfmon.step("FBX Import: start importing %s" % filepath)
perfmon.level_up()
# Detect ASCII files.
# Typically it's bad practice to fail silently on any error,
try:
with open(filepath, 'r', encoding="utf-8") as fh:
fh.read(24)
is_ascii = True
except Exception:
is_ascii = False
if is_ascii:
operator.report({'ERROR'}, "ASCII FBX files are not supported %r" % filepath)
return {'CANCELLED'}
del is_ascii
try:
elem_root, version = parse_fbx.parse(filepath)
except Exception as e:
import traceback
traceback.print_exc()
operator.report({'ERROR'}, "Couldn't open file %r (%s)" % (filepath, e))
return {'CANCELLED'}
if version < 7100:
operator.report({'ERROR'}, "Version %r unsupported, must be %r or later" % (version, 7100))
return {'CANCELLED'}
print("FBX version: %r" % version)
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
# deselect all
if bpy.ops.object.select_all.poll():
bpy.ops.object.select_all(action='DESELECT')
basedir = os.path.dirname(filepath)
nodal_material_wrap_map = {}
image_cache = {}
# Tables: (FBX_byte_id -> [FBX_data, None or Blender_datablock])
fbx_table_nodes = {}
if use_alpha_decals:
material_decals = set()
else:
material_decals = None
scene = context.scene
view_layer = context.view_layer
# #### Get some info from GlobalSettings.
perfmon.step("FBX import: Prepare...")
fbx_settings = elem_find_first(elem_root, b'GlobalSettings')
fbx_settings_props = elem_find_first(fbx_settings, b'Properties70')
if fbx_settings is None or fbx_settings_props is None:
operator.report({'ERROR'}, "No 'GlobalSettings' found in file %r" % filepath)
return {'CANCELLED'}
# FBX default base unit seems to be the centimeter, while raw Blender Unit is equivalent to the meter...
unit_scale = elem_props_get_number(fbx_settings_props, b'UnitScaleFactor', 1.0)
unit_scale_org = elem_props_get_number(fbx_settings_props, b'OriginalUnitScaleFactor', 1.0)
global_scale *= (unit_scale / units_blender_to_fbx_factor(context.scene))
# Compute global matrix and scale.
if not use_manual_orientation:
axis_forward = (elem_props_get_integer(fbx_settings_props, b'FrontAxis', 1),
elem_props_get_integer(fbx_settings_props, b'FrontAxisSign', 1))
axis_up = (elem_props_get_integer(fbx_settings_props, b'UpAxis', 2),
elem_props_get_integer(fbx_settings_props, b'UpAxisSign', 1))
axis_coord = (elem_props_get_integer(fbx_settings_props, b'CoordAxis', 0),
elem_props_get_integer(fbx_settings_props, b'CoordAxisSign', 1))
axis_key = (axis_up, axis_forward, axis_coord)
axis_up, axis_forward = {v: k for k, v in RIGHT_HAND_AXES.items()}.get(axis_key, ('Z', 'Y'))
global_matrix = (Matrix.Scale(global_scale, 4) @
axis_conversion(from_forward=axis_forward, from_up=axis_up).to_4x4())
# To cancel out unwanted rotation/scale on nodes.
global_matrix_inv = global_matrix.inverted()
# For transforming mesh normals.
global_matrix_inv_transposed = global_matrix_inv.transposed()
# Compute bone correction matrix
bone_correction_matrix = None # None means no correction/identity
if not automatic_bone_orientation:
if (primary_bone_axis, secondary_bone_axis) != ('Y', 'X'):
bone_correction_matrix = axis_conversion(from_forward='X',
from_up='Y',
to_forward=secondary_bone_axis,
to_up=primary_bone_axis,
).to_4x4()
# Compute framerate settings.
custom_fps = elem_props_get_number(fbx_settings_props, b'CustomFrameRate', 25.0)
time_mode = elem_props_get_enum(fbx_settings_props, b'TimeMode')
real_fps = {eid: val for val, eid in FBX_FRAMERATES[1:]}.get(time_mode, custom_fps)
if real_fps <= 0.0:
real_fps = 25.0
scene.render.fps = round(real_fps)
scene.render.fps_base = scene.render.fps / real_fps
# store global settings that need to be accessed during conversion
settings = FBXImportSettings(
operator.report, (axis_up, axis_forward), global_matrix, global_scale,
bake_space_transform, global_matrix_inv, global_matrix_inv_transposed,
use_custom_normals, use_image_search,
use_alpha_decals, decal_offset,
use_anim, anim_offset,
use_subsurf,
use_custom_props, use_custom_props_enum_as_string,
nodal_material_wrap_map, image_cache,
ignore_leaf_bones, force_connect_children, automatic_bone_orientation, bone_correction_matrix,
use_prepost_rot,
)
# #### And now, the "real" data.
perfmon.step("FBX import: Templates...")
fbx_defs = elem_find_first(elem_root, b'Definitions') # can be None
fbx_nodes = elem_find_first(elem_root, b'Objects')
fbx_connections = elem_find_first(elem_root, b'Connections')
if fbx_nodes is None:
operator.report({'ERROR'}, "No 'Objects' found in file %r" % filepath)
return {'CANCELLED'}
if fbx_connections is None:
operator.report({'ERROR'}, "No 'Connections' found in file %r" % filepath)
return {'CANCELLED'}
# ----
# First load property templates
# Load 'PropertyTemplate' values.
# Key is a tuple, (ObjectType, FBXNodeType)
# eg, (b'Texture', b'KFbxFileTexture')
# (b'Geometry', b'KFbxMesh')
fbx_templates = {}
def _():
if fbx_defs is not None:
for fbx_def in fbx_defs.elems:
if fbx_def.id == b'ObjectType':
for fbx_subdef in fbx_def.elems:
if fbx_subdef.id == b'PropertyTemplate':
assert(fbx_def.props_type == b'S')
assert(fbx_subdef.props_type == b'S')
# (b'Texture', b'KFbxFileTexture') - eg.
key = fbx_def.props[0], fbx_subdef.props[0]
fbx_templates[key] = fbx_subdef
_(); del _
def fbx_template_get(key):
ret = fbx_templates.get(key, fbx_elem_nil)
if ret is fbx_elem_nil:
# Newest FBX (7.4 and above) use no more 'K' in their type names...
key = (key[0], key[1][1:])
return fbx_templates.get(key, fbx_elem_nil)
return ret
perfmon.step("FBX import: Nodes...")
# ----
# Build FBX node-table
def _():
for fbx_obj in fbx_nodes.elems:
# TODO, investigate what other items after first 3 may be
assert(fbx_obj.props_type[:3] == b'LSS')
fbx_uuid = elem_uuid(fbx_obj)
fbx_table_nodes[fbx_uuid] = [fbx_obj, None]
_(); del _
# ----
# Load in the data
# http://download.autodesk.com/us/fbx/20112/FBX_SDK_HELP/index.html?url=
# WS73099cc142f487551fea285e1221e4f9ff8-7fda.htm,topicNumber=d0e6388
perfmon.step("FBX import: Connections...")
fbx_connection_map = {}
fbx_connection_map_reverse = {}
def _():
for fbx_link in fbx_connections.elems:
c_type = fbx_link.props[0]
if fbx_link.props_type[1:3] == b'LL':
c_src, c_dst = fbx_link.props[1:3]
fbx_connection_map.setdefault(c_src, []).append((c_dst, fbx_link))
fbx_connection_map_reverse.setdefault(c_dst, []).append((c_src, fbx_link))
_(); del _
perfmon.step("FBX import: Meshes...")
# ----
# Load mesh data
def _():
fbx_tmpl = fbx_template_get((b'Geometry', b'KFbxMesh'))
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Geometry':
continue
if fbx_obj.props[-1] == b'Mesh':
assert(blen_data is None)
fbx_item[1] = blen_read_geom(fbx_tmpl, fbx_obj, settings)
_(); del _
perfmon.step("FBX import: Materials & Textures...")
# ----
# Load material data
def _():
fbx_tmpl = fbx_template_get((b'Material', b'KFbxSurfacePhong'))
# b'KFbxSurfaceLambert'
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Material':
continue
assert(blen_data is None)
fbx_item[1] = blen_read_material(fbx_tmpl, fbx_obj, settings)
_(); del _
# ----
# Load image & textures data
def _():
fbx_tmpl_tex = fbx_template_get((b'Texture', b'KFbxFileTexture'))
fbx_tmpl_img = fbx_template_get((b'Video', b'KFbxVideo'))
# Important to run all 'Video' ones first, embedded images are stored in those nodes.
# XXX Note we simplify things here, assuming both matching Video and Texture will use same file path,
# this may be a bit weak, if issue arise we'll fallback to plain connection stuff...
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Video':
continue
fbx_item[1] = blen_read_texture_image(fbx_tmpl_img, fbx_obj, basedir, settings)
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Texture':
continue
fbx_item[1] = blen_read_texture_image(fbx_tmpl_tex, fbx_obj, basedir, settings)
_(); del _
perfmon.step("FBX import: Cameras & Lamps...")
def _():
fbx_tmpl = fbx_template_get((b'NodeAttribute', b'KFbxCamera'))
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'NodeAttribute':
continue
if fbx_obj.props[-1] == b'Camera':
assert(blen_data is None)
fbx_item[1] = blen_read_camera(fbx_tmpl, fbx_obj, global_scale)
_(); del _
def _():
fbx_tmpl = fbx_template_get((b'NodeAttribute', b'KFbxLight'))
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'NodeAttribute':
continue
if fbx_obj.props[-1] == b'Light':
assert(blen_data is None)
fbx_item[1] = blen_read_light(fbx_tmpl, fbx_obj, global_scale)
_(); del _
def connection_filter_ex(fbx_uuid, fbx_id, dct):
return [(c_found[0], c_found[1], c_type)
for (c_uuid, c_type) in dct.get(fbx_uuid, ())
for c_found in (() if c_uuid == 0 else (fbx_table_nodes.get(c_uuid, (None, None)),))
if (fbx_id is None) or (c_found[0] and c_found[0].id == fbx_id)]
def connection_filter_forward(fbx_uuid, fbx_id):
return connection_filter_ex(fbx_uuid, fbx_id, fbx_connection_map)
def connection_filter_reverse(fbx_uuid, fbx_id):
return connection_filter_ex(fbx_uuid, fbx_id, fbx_connection_map_reverse)
perfmon.step("FBX import: Objects & Armatures...")
fbx_helper_nodes = {}
def _():
fbx_helper_nodes[0] = root_helper = FbxImportHelperNode(None, None, None, False)
root_helper.is_root = True
fbx_tmpl = fbx_template_get((b'Model', b'KFbxNode'))
for a_uuid, a_item in fbx_table_nodes.items():
fbx_obj, bl_data = a_item
if fbx_obj is None or fbx_obj.id != b'Model':
continue
fbx_props = (elem_find_first(fbx_obj, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
transform_data = blen_read_object_transform_preprocess(fbx_props, fbx_obj, Matrix(), use_prepost_rot)
is_bone = fbx_obj.props[2] in {b'LimbNode', b'Limb'}
fbx_helper_nodes[a_uuid] = FbxImportHelperNode(fbx_obj, bl_data, transform_data, is_bone)
for fbx_link in fbx_connections.elems:
if fbx_link.props[0] != b'OO':
continue
if fbx_link.props_type[1:3] == b'LL':
c_src, c_dst = fbx_link.props[1:3]
parent = fbx_helper_nodes.get(c_dst)
if parent is None:
continue
child = fbx_helper_nodes.get(c_src)
if child is None:
fbx_sdata, bl_data = p_item = fbx_table_nodes.get(c_src, (None, None))
if fbx_sdata is None:
continue
if fbx_sdata.id not in {b'Geometry', b'NodeAttribute'}:
continue
parent.bl_data = bl_data
else:
child.parent = parent
root_helper.find_armatures()
root_helper.find_bone_children()
root_helper.find_fake_bones()
if settings.ignore_leaf_bones:
root_helper.mark_leaf_bones()
for a_uuid, a_item in fbx_table_nodes.items():
fbx_obj, bl_data = a_item
if fbx_obj is None:
continue
if fbx_obj.id != b'Pose':
continue
if fbx_obj.props[2] != b'BindPose':
continue
for fbx_pose_node in fbx_obj.elems:
if fbx_pose_node.id != b'PoseNode':
continue
node_elem = elem_find_first(fbx_pose_node, b'Node')
node = elem_uuid(node_elem)
matrix_elem = elem_find_first(fbx_pose_node, b'Matrix')
matrix = array_to_matrix4(matrix_elem.props[0]) if matrix_elem else None
bone = fbx_helper_nodes.get(node)
if bone and matrix:
bone.bind_matrix = matrix
for helper_uuid, helper_node in fbx_helper_nodes.items():
if not helper_node.is_bone:
continue
for cluster_uuid, cluster_link in fbx_connection_map.get(helper_uuid, ()):
if cluster_link.props[0] != b'OO':
continue
fbx_cluster, _ = fbx_table_nodes.get(cluster_uuid, (None, None))
if fbx_cluster is None or fbx_cluster.id != b'Deformer' or fbx_cluster.props[2] != b'Cluster':
continue
tx_mesh_elem = elem_find_first(fbx_cluster, b'Transform', default=None)
tx_mesh = array_to_matrix4(tx_mesh_elem.props[0]) if tx_mesh_elem else Matrix()
tx_bone_elem = elem_find_first(fbx_cluster, b'TransformLink', default=None)
tx_bone = array_to_matrix4(tx_bone_elem.props[0]) if tx_bone_elem else None
tx_arm_elem = elem_find_first(fbx_cluster, b'TransformAssociateModel', default=None)
tx_arm = array_to_matrix4(tx_arm_elem.props[0]) if tx_arm_elem else None
mesh_matrix = tx_mesh
armature_matrix = tx_arm
if tx_bone:
mesh_matrix = tx_bone @ mesh_matrix
helper_node.bind_matrix = tx_bone
meshes = set()
for skin_uuid, skin_link in fbx_connection_map.get(cluster_uuid):
if skin_link.props[0] != b'OO':
continue
fbx_skin, _ = fbx_table_nodes.get(skin_uuid, (None, None))
if fbx_skin is None or fbx_skin.id != b'Deformer' or fbx_skin.props[2] != b'Skin':
continue
for mesh_uuid, mesh_link in fbx_connection_map.get(skin_uuid):
if mesh_link.props[0] != b'OO':
continue
fbx_mesh, _ = fbx_table_nodes.get(mesh_uuid, (None, None))
if fbx_mesh is None or fbx_mesh.id != b'Geometry' or fbx_mesh.props[2] != b'Mesh':
continue
for object_uuid, object_link in fbx_connection_map.get(mesh_uuid):
if object_link.props[0] != b'OO':
continue
mesh_node = fbx_helper_nodes[object_uuid]
if mesh_node:
# ----
# If we get a valid mesh matrix (in bone space), store armature and
# mesh global matrices, we need them to compute mesh's matrix_parent_inverse
mesh_node.armature_setup[helper_node.armature] = (mesh_matrix, armature_matrix)
meshes.add(mesh_node)
helper_node.clusters.append((fbx_cluster, meshes))
root_helper.make_bind_pose_local()
root_helper.collect_armature_meshes()
root_helper.find_correction_matrix(settings)
root_helper.build_hierarchy(fbx_tmpl, settings, scene, view_layer)
root_helper.link_hierarchy(fbx_tmpl, settings, scene)
_(); del _
perfmon.step("FBX import: ShapeKeys...")
blend_shape_channels = {}
def _():
fbx_tmpl = fbx_template_get((b'Geometry', b'KFbxShape'))
for s_uuid, s_item in fbx_table_nodes.items():
fbx_sdata, bl_sdata = s_item = fbx_table_nodes.get(s_uuid, (None, None))
if fbx_sdata is None or fbx_sdata.id != b'Geometry' or fbx_sdata.props[2] != b'Shape':
continue
for bc_uuid, bc_ctype in fbx_connection_map.get(s_uuid, ()):
if bc_ctype.props[0] != b'OO':
continue
fbx_bcdata, _bl_bcdata = fbx_table_nodes.get(bc_uuid, (None, None))
if fbx_bcdata is None or fbx_bcdata.id != b'Deformer' or fbx_bcdata.props[2] != b'BlendShapeChannel':
continue
meshes = []
objects = []
for bs_uuid, bs_ctype in fbx_connection_map.get(bc_uuid, ()):
if bs_ctype.props[0] != b'OO':
continue
fbx_bsdata, _bl_bsdata = fbx_table_nodes.get(bs_uuid, (None, None))
if fbx_bsdata is None or fbx_bsdata.id != b'Deformer' or fbx_bsdata.props[2] != b'BlendShape':
continue
for m_uuid, m_ctype in fbx_connection_map.get(bs_uuid, ()):
if m_ctype.props[0] != b'OO':
continue
fbx_mdata, bl_mdata = fbx_table_nodes.get(m_uuid, (None, None))
if fbx_mdata is None or fbx_mdata.id != b'Geometry' or fbx_mdata.props[2] != b'Mesh':
continue
assert(isinstance(bl_mdata, bpy.types.Mesh))
objects = []
for o_uuid, o_ctype in fbx_connection_map.get(m_uuid, ()):
if o_ctype.props[0] != b'OO':
continue
node = fbx_helper_nodes[o_uuid]
if node:
objects.append(node)
meshes.append((bl_mdata, objects))
keyblocks = blen_read_shape(fbx_tmpl, fbx_sdata, fbx_bcdata, meshes, scene)
blend_shape_channels[bc_uuid] = keyblocks
_(); del _
if settings.use_subsurf:
perfmon.step("FBX import: Subdivision surfaces")
def _():
for fbx_link in fbx_connections.elems:
if fbx_link.props[0] != b'OO':
continue
if fbx_link.props_type[1:3] == b'LL':
c_src, c_dst = fbx_link.props[1:3]
parent = fbx_helper_nodes.get(c_dst)
if parent is None:
continue
child = fbx_helper_nodes.get(c_src)
if child is None:
fbx_sdata, bl_data = fbx_table_nodes.get(c_src, (None, None))
if fbx_sdata.id != b'Geometry':
continue
preview_levels = elem_prop_first(elem_find_first(fbx_sdata, b'PreviewDivisionLevels'))
render_levels = elem_prop_first(elem_find_first(fbx_sdata, b'RenderDivisionLevels'))
if isinstance(preview_levels, int) and isinstance(render_levels, int):
mod = parent.bl_obj.modifiers.new('subsurf', 'SUBSURF')
mod.levels = preview_levels
mod.render_levels = render_levels
_(); del _
if use_anim:
perfmon.step("FBX import: Animations...")
def _():
fbx_tmpl_astack = fbx_template_get((b'AnimationStack', b'FbxAnimStack'))
fbx_tmpl_alayer = fbx_template_get((b'AnimationLayer', b'FbxAnimLayer'))
stacks = {}
for as_uuid, fbx_asitem in fbx_table_nodes.items():
fbx_asdata, _blen_data = fbx_asitem
if fbx_asdata.id != b'AnimationStack' or fbx_asdata.props[2] != b'':
continue
stacks[as_uuid] = (fbx_asitem, {})
def get_astacks_from_alayer(al_uuid):
for as_uuid, as_ctype in fbx_connection_map.get(al_uuid, ()):
if as_ctype.props[0] != b'OO':
continue
fbx_asdata, _bl_asdata = fbx_table_nodes.get(as_uuid, (None, None))
if (fbx_asdata is None or fbx_asdata.id != b'AnimationStack' or
fbx_asdata.props[2] != b'' or as_uuid not in stacks):
continue
yield as_uuid
for al_uuid, fbx_alitem in fbx_table_nodes.items():
fbx_aldata, _blen_data = fbx_alitem
if fbx_aldata.id != b'AnimationLayer' or fbx_aldata.props[2] != b'':
continue
for as_uuid in get_astacks_from_alayer(al_uuid):
_fbx_asitem, alayers = stacks[as_uuid]
alayers[al_uuid] = (fbx_alitem, {})
curvenodes = {}
for acn_uuid, fbx_acnitem in fbx_table_nodes.items():
fbx_acndata, _blen_data = fbx_acnitem
if fbx_acndata.id != b'AnimationCurveNode' or fbx_acndata.props[2] != b'':
continue
cnode = curvenodes[acn_uuid] = {}
items = []
for n_uuid, n_ctype in fbx_connection_map.get(acn_uuid, ()):
if n_ctype.props[0] != b'OP':
continue
lnk_prop = n_ctype.props[3]
if lnk_prop in {b'Lcl Translation', b'Lcl Rotation', b'Lcl Scaling'}:
ob = fbx_helper_nodes.get(n_uuid, None)
if ob is None or ob.is_root:
continue
items.append((ob, lnk_prop))
elif lnk_prop == b'DeformPercent':
keyblocks = blend_shape_channels.get(n_uuid, None)
if keyblocks is None:
continue
items += [(kb, lnk_prop) for kb in keyblocks]
elif lnk_prop == b'FocalLength':
from bpy.types import Camera
fbx_item = fbx_table_nodes.get(n_uuid, None)
if fbx_item is None or not isinstance(fbx_item[1], Camera):
continue
cam = fbx_item[1]
items.append((cam, lnk_prop))
elif lnk_prop == b'DiffuseColor':
from bpy.types import Material
fbx_item = fbx_table_nodes.get(n_uuid, None)
if fbx_item is None or not isinstance(fbx_item[1], Material):
continue
mat = fbx_item[1]
items.append((mat, lnk_prop))
print("WARNING! Importing material's animation is not supported for Nodal materials...")
for al_uuid, al_ctype in fbx_connection_map.get(acn_uuid, ()):
if al_ctype.props[0] != b'OO':
continue
fbx_aldata, _blen_aldata = fbx_alitem = fbx_table_nodes.get(al_uuid, (None, None))
if fbx_aldata is None or fbx_aldata.id != b'AnimationLayer' or fbx_aldata.props[2] != b'':
continue
for as_uuid in get_astacks_from_alayer(al_uuid):
_fbx_alitem, anim_items = stacks[as_uuid][1][al_uuid]
assert(_fbx_alitem == fbx_alitem)
for item, item_prop in items:
# No need to keep curvenode FBX data here, contains nothing useful for us.
anim_items.setdefault(item, {})[acn_uuid] = (cnode, item_prop)
# AnimationCurves (real animation data).
for ac_uuid, fbx_acitem in fbx_table_nodes.items():
fbx_acdata, _blen_data = fbx_acitem
if fbx_acdata.id != b'AnimationCurve' or fbx_acdata.props[2] != b'':
continue
for acn_uuid, acn_ctype in fbx_connection_map.get(ac_uuid, ()):
if acn_ctype.props[0] != b'OP':
continue
fbx_acndata, _bl_acndata = fbx_table_nodes.get(acn_uuid, (None, None))
if (fbx_acndata is None or fbx_acndata.id != b'AnimationCurveNode' or
fbx_acndata.props[2] != b'' or acn_uuid not in curvenodes):
continue
# Note this is an infamous simplification of the compound props stuff,
# seems to be standard naming but we'll probably have to be smarter to handle more exotic files?
channel = {
b'd|X': 0, b'd|Y': 1, b'd|Z': 2,
b'd|DeformPercent': 0,
b'd|FocalLength': 0
}.get(acn_ctype.props[3], None)
if channel is None:
continue
curvenodes[acn_uuid][ac_uuid] = (fbx_acitem, channel)
blen_read_animations(fbx_tmpl_astack, fbx_tmpl_alayer, stacks, scene, settings.anim_offset)
_(); del _
perfmon.step("FBX import: Assign materials...")
def _():
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Geometry':
continue
mesh = fbx_table_nodes.get(fbx_uuid, (None, None))[1]
if mesh is None:
continue
done_materials = set()
for (fbx_lnk, fbx_lnk_item, fbx_lnk_type) in connection_filter_forward(fbx_uuid, b'Model'):
fbx_lnk_uuid = elem_uuid(fbx_lnk)
for (fbx_lnk_material, material, fbx_lnk_material_type) in connection_filter_reverse(fbx_lnk_uuid, b'Material'):
if material not in done_materials:
mesh.materials.append(material)
done_materials.add(material)
# Some FBX seem to have an extra 'default' material which is not defined in FBX file.
if mesh.validate_material_indices():
print("WARNING: mesh '%s' had invalid material indices, those were reset to first material" % mesh.name)
_(); del _
perfmon.step("FBX import: Assign textures...")
def _():
material_images = {}
fbx_tmpl = fbx_template_get((b'Material', b'KFbxSurfacePhong'))
# b'KFbxSurfaceLambert'
def texture_mapping_set(fbx_obj, node_texture):
assert(fbx_obj.id == b'Texture')
fbx_props = (elem_find_first(fbx_obj, b'Properties70'),
elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil))
loc = elem_props_get_vector_3d(fbx_props, b'Translation', (0.0, 0.0, 0.0))
rot = tuple(-r for r in elem_props_get_vector_3d(fbx_props, b'Rotation', (0.0, 0.0, 0.0)))
scale = tuple(((1.0 / s) if s != 0.0 else 1.0)
for s in elem_props_get_vector_3d(fbx_props, b'Scaling', (1.0, 1.0, 1.0)))
clamp = (bool(elem_props_get_enum(fbx_props, b'WrapModeU', 0)) or
bool(elem_props_get_enum(fbx_props, b'WrapModeV', 0)))
if (loc == (0.0, 0.0, 0.0) and
rot == (0.0, 0.0, 0.0) and
scale == (1.0, 1.0, 1.0) and
clamp == False):
return
node_texture.translation = loc
node_texture.rotation = rot
node_texture.scale = scale
if clamp:
node_texture.extension = 'EXTEND'
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Material':
continue
material = fbx_table_nodes.get(fbx_uuid, (None, None))[1]
for (fbx_lnk,
image,
fbx_lnk_type) in connection_filter_reverse(fbx_uuid, b'Texture'):
if fbx_lnk_type.props[0] == b'OP':
lnk_type = fbx_lnk_type.props[3]
ma_wrap = nodal_material_wrap_map[material]
if lnk_type in {b'DiffuseColor', b'3dsMax|maps|texmap_diffuse'}:
ma_wrap.base_color_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.base_color_texture)
elif lnk_type in {b'SpecularColor', b'SpecularFactor'}:
# Intensity actually, not color...
ma_wrap.specular_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.specular_texture)
elif lnk_type in {b'ReflectionColor', b'ReflectionFactor', b'3dsMax|maps|texmap_reflection'}:
# Intensity actually, not color...
ma_wrap.metallic_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.metallic_texture)
elif lnk_type in {b'TransparentColor', b'TransparentFactor'}:
ma_wrap.alpha_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.alpha_texture)
if use_alpha_decals:
material_decals.add(material)
elif lnk_type == b'ShininessExponent':
# That is probably reversed compared to expected results? TODO...
ma_wrap.roughness_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.roughness_texture)
# XXX, applications abuse bump!
elif lnk_type in {b'NormalMap', b'Bump', b'3dsMax|maps|texmap_bump'}:
ma_wrap.normalmap_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.normalmap_texture)
"""
elif lnk_type == b'Bump':
# TODO displacement...
"""
elif lnk_type in {b'EmissiveColor'}:
ma_wrap.emission_color_texture.image = image
texture_mapping_set(fbx_lnk, ma_wrap.emission_color_texture)
else:
print("WARNING: material link %r ignored" % lnk_type)
material_images.setdefault(material, {})[lnk_type] = image
# Check if the diffuse image has an alpha channel,
# if so, use the alpha channel.
# Note: this could be made optional since images may have alpha but be entirely opaque
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Material':
continue
material = fbx_table_nodes.get(fbx_uuid, (None, None))[1]
image = material_images.get(material, {}).get(b'DiffuseColor', None)
# do we have alpha?
if image and image.depth == 32:
if use_alpha_decals:
material_decals.add(material)
ma_wrap = nodal_material_wrap_map[material]
ma_wrap.alpha_texture.use_alpha = True
ma_wrap.alpha_texture.copy_from(ma_wrap.base_color_texture)
# Propagate mapping from diffuse to all other channels which have none defined.
# XXX Commenting for now, I do not really understand the logic here, why should diffuse mapping
# be applied to all others if not defined for them???
# ~ ma_wrap = nodal_material_wrap_map[material]
# ~ ma_wrap.mapping_set_from_diffuse()
_(); del _
perfmon.step("FBX import: Cycles z-offset workaround...")
def _():
# Annoying workaround for cycles having no z-offset
if material_decals and use_alpha_decals:
for fbx_uuid, fbx_item in fbx_table_nodes.items():
fbx_obj, blen_data = fbx_item
if fbx_obj.id != b'Geometry':
continue
if fbx_obj.props[-1] == b'Mesh':
mesh = fbx_item[1]
if decal_offset != 0.0:
for material in mesh.materials:
if material in material_decals:
for v in mesh.vertices:
v.co += v.normal * decal_offset
break
for obj in (obj for obj in bpy.data.objects if obj.data == mesh):
obj.cycles_visibility.shadow = False
_(); del _
perfmon.level_down()
perfmon.level_down("Import finished.")
return {'FINISHED'}
| true | true |
1c3448577386eda5a0b2ccf0122ca2762a88e024 | 414 | py | Python | quokka/utils/translation.py | mysky528/quokka | d2c3c169f6b78cace154274747297f8e1dc56825 | [
"MIT"
] | null | null | null | quokka/utils/translation.py | mysky528/quokka | d2c3c169f6b78cace154274747297f8e1dc56825 | [
"MIT"
] | null | null | null | quokka/utils/translation.py | mysky528/quokka | d2c3c169f6b78cace154274747297f8e1dc56825 | [
"MIT"
] | null | null | null | from flask import g
from babel.support import LazyProxy
from flask_babelex import gettext, lazy_gettext, ngettext
# from quokka.utils.translations import ugettext_lazy as _
def ugettext(s):
# we assume a before_request function
# assigns the correct user-specific
# translations
return g.translations.ugettext(s)
ugettext_lazy = LazyProxy(ugettext)
_ = gettext
_l = lazy_gettext
_n = ngettext
| 21.789474 | 58 | 0.772947 | from flask import g
from babel.support import LazyProxy
from flask_babelex import gettext, lazy_gettext, ngettext
def ugettext(s):
return g.translations.ugettext(s)
ugettext_lazy = LazyProxy(ugettext)
_ = gettext
_l = lazy_gettext
_n = ngettext
| true | true |
1c344920d95c5e1359987c32f63769ecf95ca103 | 4,388 | py | Python | project/migrations/0001_initial.py | developerayyo/projectx | 5f67502cb96ac3bc59031d48440982b1c217d535 | [
"MIT"
] | null | null | null | project/migrations/0001_initial.py | developerayyo/projectx | 5f67502cb96ac3bc59031d48440982b1c217d535 | [
"MIT"
] | null | null | null | project/migrations/0001_initial.py | developerayyo/projectx | 5f67502cb96ac3bc59031d48440982b1c217d535 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-14 19:17
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Faculty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic', models.CharField(max_length=500)),
('student_name', models.CharField(max_length=500)),
('year', models.DateField()),
('supervisor_name', models.CharField(max_length=500)),
('document', models.FileField(upload_to='documents/')),
('darpartment', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='project.department')),
('faculty', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='project.faculty')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('is_admin', models.BooleanField(default=False)),
('picture', models.ImageField(blank=True, null=True, upload_to='pictures/')),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 58.506667 | 329 | 0.628304 |
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Faculty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic', models.CharField(max_length=500)),
('student_name', models.CharField(max_length=500)),
('year', models.DateField()),
('supervisor_name', models.CharField(max_length=500)),
('document', models.FileField(upload_to='documents/')),
('darpartment', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='project.department')),
('faculty', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='project.faculty')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('is_admin', models.BooleanField(default=False)),
('picture', models.ImageField(blank=True, null=True, upload_to='pictures/')),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| true | true |
1c3449c19d9db174777b8c944dcf2839d91a05c4 | 555 | py | Python | manage.py | JennyRemolina/api-projex | 9c34a7dc0036c09215a15b9e1ad59dd025ef2705 | [
"MIT"
] | 1 | 2019-05-31T04:40:09.000Z | 2019-05-31T04:40:09.000Z | manage.py | JennyRemolina/api-projex | 9c34a7dc0036c09215a15b9e1ad59dd025ef2705 | [
"MIT"
] | 4 | 2020-06-05T20:41:34.000Z | 2021-09-08T00:58:10.000Z | manage.py | JennyRemolina/api-projex | 9c34a7dc0036c09215a15b9e1ad59dd025ef2705 | [
"MIT"
] | 3 | 2019-05-31T04:40:04.000Z | 2020-02-08T21:54:23.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projexbackend.config.development')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.6875 | 87 | 0.693694 |
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projexbackend.config.development')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true | true |
1c344a24eb5a6f062aeec960eb783508f8f251c8 | 45 | py | Python | test/login.py | Super-ye/yeye | 11515021d837812bf816dcb08f0f965736863d75 | [
"MIT"
] | null | null | null | test/login.py | Super-ye/yeye | 11515021d837812bf816dcb08f0f965736863d75 | [
"MIT"
] | null | null | null | test/login.py | Super-ye/yeye | 11515021d837812bf816dcb08f0f965736863d75 | [
"MIT"
] | null | null | null | num1 = 10
num2 = 20
num3 = 300
num3 = 40
| 5 | 10 | 0.555556 | num1 = 10
num2 = 20
num3 = 300
num3 = 40
| true | true |
1c344a958ffe3e6892f6de64738dce43a9ec63e1 | 327 | py | Python | setup.py | m2man/pytorch-lung-segmentation | 1845841b1cc04f179ac9e7552b8b0865443f81f5 | [
"MIT"
] | 11 | 2018-10-23T08:50:54.000Z | 2022-01-11T07:18:02.000Z | setup.py | m2man/pytorch-lung-segmentation | 1845841b1cc04f179ac9e7552b8b0865443f81f5 | [
"MIT"
] | 1 | 2020-03-11T08:31:58.000Z | 2020-03-14T03:03:40.000Z | setup.py | m2man/pytorch-lung-segmentation | 1845841b1cc04f179ac9e7552b8b0865443f81f5 | [
"MIT"
] | 4 | 2019-10-02T12:34:12.000Z | 2021-07-16T14:11:11.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(name='Pytorch Lung Segmentation',
version="1.0",
description='CXR Lung Segmentation Tools for Pytorch',
packages=find_packages(exclude=[]),
entry_points={
},
include_package_data=True,
)
| 23.357143 | 60 | 0.651376 |
from setuptools import setup, find_packages
setup(name='Pytorch Lung Segmentation',
version="1.0",
description='CXR Lung Segmentation Tools for Pytorch',
packages=find_packages(exclude=[]),
entry_points={
},
include_package_data=True,
)
| true | true |
1c344aad8538ab1fa6efeddea18a32c48a3a3f05 | 8,335 | py | Python | Packs/Exabeam/Integrations/Exabeam/test_data/response_constants.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/Exabeam/Integrations/Exabeam/test_data/response_constants.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/Exabeam/Integrations/Exabeam/test_data/response_constants.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | RESPONSE_PEER_GROUPS = [
"Marketing",
"usa",
"101",
"Program Manager",
"Channel Administrator",
"Chief Marketing Officer",
"",
"Chief Strategy Officer",
"CN=Andrew",
"BitLockerUsersComputers"
]
RESPONSE_USER_LABELS = [
"privileged_user",
"service_account"
]
RESPONSE_WATCHLISTS = [
{
"category": "UserLabels",
"title": "Executive Users",
"watchlistId": "1234"
},
{
"category": "UserLabels",
"title": "Service Accounts",
"watchlistId": "1111"
},
{
"category": "Users",
"title": "user watchlist",
"watchlistId": "2222"
},
{
"category": "PeerGroups",
"title": "VP Operations",
"watchlistId": "3333"
}
]
RESPONSE_ASSET_DATA = {
"asset": {
"assetType": "Windows",
"compromisedTime": 0,
"firstSeen": 1530627660000,
"hostName": "name",
"ipAddress": "1.2.3.4",
"lastSeen": 1538324597000
}
}
RESPONSE_SESSION_INFO = { 'sessionInfo': {
"numOfAssets": 29,
"riskScore": 0,
"numOfAccounts": 1,
"accounts": [],
"zones": [],
"endTime": "1591071360000",
"numOfZones": 5,
"startTime": "1591021860000",
"loginHost": "lt-dummy-888",
"sessionId": "dummy-20200601143100",
"numOfReasons": 0,
"label": "",
"username": "dummy",
"numOfSecurityEvents": 0,
"numOfEvents": 62,
"initialRiskScore": 0
}
}
RESPONSE_MODEL_DATA = {
"agingWindow": 32,
"alpha": 0.8,
"binWidth": None,
"category": "Other",
"convergenceFilter": "confidence_factor>=0.8",
"cutOff": 5,
"description": "Models which security groups users are being added to in the organization",
"disabled": "FALSE",
"feature": "group_name",
"featureName": "group_name",
"featureType": "group_name",
"histogramEventTypes": "member-added",
"iconName": None,
"maxNumberOfBins": 1000000,
"modelTemplate": "Account management, groups which users are being added to",
"modelType": "CATEGORICAL",
"name": "dummy",
"scopeType": "ORG",
"scopeValue": "org",
"trainIf": "TRUE"
}
RESPONSE_NOTABLE_ASSET_DATA = {
'assets': [{
'asset': {
'hostName': 'host',
'ipAddress': '1.1.1.1',
'assetType': 'test',
'firstSeen': 1591022160000,
'lastSeen': 1593820320000
},
'highestRiskScore': 150,
'highestRiskSequence': {
'id': '1111',
'entityName': 'asset',
'entityValue': 'test',
'day': 1593648000000,
'triggeredRuleCountOpt': 15,
'riskScoreOpt': 150.0
},
'latestAssetComment': {
'commentId': 'test1111',
'commentType': 'asset',
'commentObjectId': 'test',
'text': 'test',
'exaUser': 'test',
'exaUserFullname': '',
'createTime': 1612275291188,
'updateTime': 1612275291188,
'edited': False
}
}]
}
RESPONSE_NOTABLE_SESSION_DETAILS = {
'totalCount': 2, 'sessions': [
{'sessionId': 'session1', 'username': 'username1', 'startTime': 1593704040000,
'endTime': 1593727380000, 'initialRiskScore': 0, 'riskScore': 110, 'numOfReasons': 9,
'loginHost': 'host1', 'label': '', 'accounts': ['account1', 'account2'], 'numOfAccounts': 2,
'zones': ['zone1', 'zone2'], 'numOfZones': 2, 'numOfAssets': 7, 'numOfEvents': 6,
'numOfSecurityEvents': 0},
{'sessionId': 'session2', 'username': 'username2', 'startTime': 1593682380000,
'endTime': 1593727260000, 'initialRiskScore': 26, 'riskScore': 313, 'numOfReasons': 39, 'loginHost': 'host2',
'label': '', 'accounts': ['account1', 'account2'], 'numOfAccounts': 2,
'zones': ['zone1', 'zone2', 'zone3', 'zone4'], 'numOfZones': 4,
'numOfAssets': 17, 'numOfEvents': 30, 'numOfSecurityEvents': 1, 'riskTransferScore': 126.0}],
'users': {
'username2': {'username': 'username2', 'riskScore': 313.18, 'averageRiskScore': 171.41,
'pastScores': [287.19, 218.36, 0.0, 0.0, 0.0, 0.0, 0.0], 'lastSessionId': 'session2',
'firstSeen': 1591021500000, 'lastSeen': 1593820320000, 'lastActivityType': 'Account is active',
'lastActivityTime': 1593818940000,
'info': {'location': 'us',
'photo': '',
'phoneCell': '1234567890',
'email': 'test@.com',
'employeeType': 'employee', 'fullName': 'user username2',
'departmentNumber': '000',
'dn': 'test',
'country': 'usa', 'division': 'division',
'department': 'department',
'manager': 'test',
'phoneOffice': '1234567890',
'employeeNumber': '1234',
'title': 'title',
'group': 'test'},
'labels': [],
'pendingRiskTransfers': []},
'mburgess': {'username': 'username1', 'riskScore': 109.73, 'averageRiskScore': 52.25,
'pastScores': [109.7382543963077], 'lastSessionId': 'session1',
'firstSeen': 1591025220000, 'lastSeen': 1593727380000, 'lastActivityType': 'Account is active',
'lastActivityTime': 1593704040000,
'info': {'location': 'us',
'photo': '',
'phoneCell': '1234567890',
'email': 'test@.com',
'employeeType': 'employee',
'fullName': 'user username1', 'departmentNumber': '000',
'dn': 'test',
'country': 'usa', 'division': 'division',
'department': 'department',
'manager': 'test',
'phoneOffice': '1234567890',
'employeeNumber': '1234',
'title': 'title',
'group': 'test'}, 'labels': [],
'pendingRiskTransfers': []}},
'executiveUserFlags': {'username1': False, 'username2': False}
}
RESPONSE_NOTABLE_SEQUENCE_DETAILS = [{
'sequenceId': 'ID',
'isWhitelisted': False,
'areAllTriggeredRulesWhiteListed': False,
'sequenceInfo': {
'startTime': 1593648000000,
'endTime': 1593734399999,
'riskScore': 150,
'numOfReasons': 8,
'numOfEvents': 18,
'numOfUsers': 4,
'numOfSecurityEvents': 0,
'numOfZones': 3,
'numOfAssets': 8,
'sequenceId': 'ID',
'assetId': 'ID'},
'hasBeenPartiallyWhiteListed': False
}]
RESPONSE_NOTABLE_SEQUENCE_EVENTS = [{
'eventType': 'type1',
'displayName': 'dn1',
'count': 1},
{'eventType': 'type2',
'displayName': 'dn2',
'count': 1},
{'eventType': 'type3',
'displayName': 'dn3',
'count': 1},
{'eventType': 'type4',
'displayName': 'dn4',
'count': 1},
{'eventType': 'type5',
'displayName': 'dn5',
'count': 2},
{'eventType': 'type6',
'displayName': 'dn6',
'count': 2},
{'eventType': 'type7',
'displayName': 'dn7',
'count': 8},
{'eventType': 'type8',
'displayName': 'dn8',
'count': 1},
{'eventType': 'type9',
'displayName': 'dn9',
'count': 1}
]
DELETE_RECORD_RESPONSE = {'sessionId': '56a5b19a-4193-4616-9978-0bbabb1e2d60',
'recordChanges': [{
'changeType': 'removed',
'changeId': '4aad5392-20e7-4423-abcb-a9680c566215',
'record': {'key': '', 'id': 'test_key'}
}],
'metadata': {'createdSize': 0, 'updatedSize': 0, 'removedSize': 1, 'duplicates': []}}
| 35.317797 | 118 | 0.486623 | RESPONSE_PEER_GROUPS = [
"Marketing",
"usa",
"101",
"Program Manager",
"Channel Administrator",
"Chief Marketing Officer",
"",
"Chief Strategy Officer",
"CN=Andrew",
"BitLockerUsersComputers"
]
RESPONSE_USER_LABELS = [
"privileged_user",
"service_account"
]
RESPONSE_WATCHLISTS = [
{
"category": "UserLabels",
"title": "Executive Users",
"watchlistId": "1234"
},
{
"category": "UserLabels",
"title": "Service Accounts",
"watchlistId": "1111"
},
{
"category": "Users",
"title": "user watchlist",
"watchlistId": "2222"
},
{
"category": "PeerGroups",
"title": "VP Operations",
"watchlistId": "3333"
}
]
RESPONSE_ASSET_DATA = {
"asset": {
"assetType": "Windows",
"compromisedTime": 0,
"firstSeen": 1530627660000,
"hostName": "name",
"ipAddress": "1.2.3.4",
"lastSeen": 1538324597000
}
}
RESPONSE_SESSION_INFO = { 'sessionInfo': {
"numOfAssets": 29,
"riskScore": 0,
"numOfAccounts": 1,
"accounts": [],
"zones": [],
"endTime": "1591071360000",
"numOfZones": 5,
"startTime": "1591021860000",
"loginHost": "lt-dummy-888",
"sessionId": "dummy-20200601143100",
"numOfReasons": 0,
"label": "",
"username": "dummy",
"numOfSecurityEvents": 0,
"numOfEvents": 62,
"initialRiskScore": 0
}
}
RESPONSE_MODEL_DATA = {
"agingWindow": 32,
"alpha": 0.8,
"binWidth": None,
"category": "Other",
"convergenceFilter": "confidence_factor>=0.8",
"cutOff": 5,
"description": "Models which security groups users are being added to in the organization",
"disabled": "FALSE",
"feature": "group_name",
"featureName": "group_name",
"featureType": "group_name",
"histogramEventTypes": "member-added",
"iconName": None,
"maxNumberOfBins": 1000000,
"modelTemplate": "Account management, groups which users are being added to",
"modelType": "CATEGORICAL",
"name": "dummy",
"scopeType": "ORG",
"scopeValue": "org",
"trainIf": "TRUE"
}
RESPONSE_NOTABLE_ASSET_DATA = {
'assets': [{
'asset': {
'hostName': 'host',
'ipAddress': '1.1.1.1',
'assetType': 'test',
'firstSeen': 1591022160000,
'lastSeen': 1593820320000
},
'highestRiskScore': 150,
'highestRiskSequence': {
'id': '1111',
'entityName': 'asset',
'entityValue': 'test',
'day': 1593648000000,
'triggeredRuleCountOpt': 15,
'riskScoreOpt': 150.0
},
'latestAssetComment': {
'commentId': 'test1111',
'commentType': 'asset',
'commentObjectId': 'test',
'text': 'test',
'exaUser': 'test',
'exaUserFullname': '',
'createTime': 1612275291188,
'updateTime': 1612275291188,
'edited': False
}
}]
}
RESPONSE_NOTABLE_SESSION_DETAILS = {
'totalCount': 2, 'sessions': [
{'sessionId': 'session1', 'username': 'username1', 'startTime': 1593704040000,
'endTime': 1593727380000, 'initialRiskScore': 0, 'riskScore': 110, 'numOfReasons': 9,
'loginHost': 'host1', 'label': '', 'accounts': ['account1', 'account2'], 'numOfAccounts': 2,
'zones': ['zone1', 'zone2'], 'numOfZones': 2, 'numOfAssets': 7, 'numOfEvents': 6,
'numOfSecurityEvents': 0},
{'sessionId': 'session2', 'username': 'username2', 'startTime': 1593682380000,
'endTime': 1593727260000, 'initialRiskScore': 26, 'riskScore': 313, 'numOfReasons': 39, 'loginHost': 'host2',
'label': '', 'accounts': ['account1', 'account2'], 'numOfAccounts': 2,
'zones': ['zone1', 'zone2', 'zone3', 'zone4'], 'numOfZones': 4,
'numOfAssets': 17, 'numOfEvents': 30, 'numOfSecurityEvents': 1, 'riskTransferScore': 126.0}],
'users': {
'username2': {'username': 'username2', 'riskScore': 313.18, 'averageRiskScore': 171.41,
'pastScores': [287.19, 218.36, 0.0, 0.0, 0.0, 0.0, 0.0], 'lastSessionId': 'session2',
'firstSeen': 1591021500000, 'lastSeen': 1593820320000, 'lastActivityType': 'Account is active',
'lastActivityTime': 1593818940000,
'info': {'location': 'us',
'photo': '',
'phoneCell': '1234567890',
'email': 'test@.com',
'employeeType': 'employee', 'fullName': 'user username2',
'departmentNumber': '000',
'dn': 'test',
'country': 'usa', 'division': 'division',
'department': 'department',
'manager': 'test',
'phoneOffice': '1234567890',
'employeeNumber': '1234',
'title': 'title',
'group': 'test'},
'labels': [],
'pendingRiskTransfers': []},
'mburgess': {'username': 'username1', 'riskScore': 109.73, 'averageRiskScore': 52.25,
'pastScores': [109.7382543963077], 'lastSessionId': 'session1',
'firstSeen': 1591025220000, 'lastSeen': 1593727380000, 'lastActivityType': 'Account is active',
'lastActivityTime': 1593704040000,
'info': {'location': 'us',
'photo': '',
'phoneCell': '1234567890',
'email': 'test@.com',
'employeeType': 'employee',
'fullName': 'user username1', 'departmentNumber': '000',
'dn': 'test',
'country': 'usa', 'division': 'division',
'department': 'department',
'manager': 'test',
'phoneOffice': '1234567890',
'employeeNumber': '1234',
'title': 'title',
'group': 'test'}, 'labels': [],
'pendingRiskTransfers': []}},
'executiveUserFlags': {'username1': False, 'username2': False}
}
RESPONSE_NOTABLE_SEQUENCE_DETAILS = [{
'sequenceId': 'ID',
'isWhitelisted': False,
'areAllTriggeredRulesWhiteListed': False,
'sequenceInfo': {
'startTime': 1593648000000,
'endTime': 1593734399999,
'riskScore': 150,
'numOfReasons': 8,
'numOfEvents': 18,
'numOfUsers': 4,
'numOfSecurityEvents': 0,
'numOfZones': 3,
'numOfAssets': 8,
'sequenceId': 'ID',
'assetId': 'ID'},
'hasBeenPartiallyWhiteListed': False
}]
RESPONSE_NOTABLE_SEQUENCE_EVENTS = [{
'eventType': 'type1',
'displayName': 'dn1',
'count': 1},
{'eventType': 'type2',
'displayName': 'dn2',
'count': 1},
{'eventType': 'type3',
'displayName': 'dn3',
'count': 1},
{'eventType': 'type4',
'displayName': 'dn4',
'count': 1},
{'eventType': 'type5',
'displayName': 'dn5',
'count': 2},
{'eventType': 'type6',
'displayName': 'dn6',
'count': 2},
{'eventType': 'type7',
'displayName': 'dn7',
'count': 8},
{'eventType': 'type8',
'displayName': 'dn8',
'count': 1},
{'eventType': 'type9',
'displayName': 'dn9',
'count': 1}
]
DELETE_RECORD_RESPONSE = {'sessionId': '56a5b19a-4193-4616-9978-0bbabb1e2d60',
'recordChanges': [{
'changeType': 'removed',
'changeId': '4aad5392-20e7-4423-abcb-a9680c566215',
'record': {'key': '', 'id': 'test_key'}
}],
'metadata': {'createdSize': 0, 'updatedSize': 0, 'removedSize': 1, 'duplicates': []}}
| true | true |
1c344b173215b8b1bac7e4170ab0865b4dceac29 | 2,083 | py | Python | graph_solver/solver.py | fleeb24/nlp_capstone | 79eb02fb7174a42ce66ef9121f39ff3d2ac76100 | [
"MIT"
] | null | null | null | graph_solver/solver.py | fleeb24/nlp_capstone | 79eb02fb7174a42ce66ef9121f39ff3d2ac76100 | [
"MIT"
] | null | null | null | graph_solver/solver.py | fleeb24/nlp_capstone | 79eb02fb7174a42ce66ef9121f39ff3d2ac76100 | [
"MIT"
] | null | null | null | """base class that solvers should inherit from"""
from typing import Any
from aristomini.common.models import MultipleChoiceQuestion, MultipleChoiceAnswer, \
SolverAnswer, parse_question
# built in `json` module doesn't serialize namedtuples correctly; `simplejson` does.
import simplejson as json
from flask import Flask, request
from flask_cors import CORS
import time
from memory_profiler import profile
class SolverBase:
"""
interface for solvers. to implement one just inherit from this class and override
`answer_question` and `solver_info`
"""
output = open("mem_usage.txt", "a+")
@profile(stream=output)
def run(self, host='localhost', port=8000) -> None:
"""run the solver"""
app = Flask(__name__)
CORS(app)
@app.route('/answer', methods=['GET', 'POST'])
def solve() -> Any: # pylint: disable=unused-variable
"""
get a json-serialized MultipleChoiceQuestion out of the request body, feed it to
answer_question, and return the json-serialized result
"""
body = request.get_json(force=True)
question = parse_question(body)
start_time = time.time()
multiple_choice_answer = self.answer_question(question)
time_elapsed = time.time() - start_time
solver_answer = SolverAnswer(solverInfo=self.solver_info(),
multipleChoiceAnswer=multiple_choice_answer,
timeElapsed=time_elapsed)
return json.dumps(solver_answer)
@app.route('/solver-info')
def info(): # pylint: disable=unused-variable
"""return the solver name"""
return self.solver_info()
app.run(host=host, port=port)
def answer_question(self, question: MultipleChoiceQuestion) -> MultipleChoiceAnswer:
"""answer the question"""
raise NotImplementedError()
def solver_info(self) -> str:
"""info about the solver"""
raise NotImplementedError()
| 36.54386 | 92 | 0.636102 |
from typing import Any
from aristomini.common.models import MultipleChoiceQuestion, MultipleChoiceAnswer, \
SolverAnswer, parse_question
import simplejson as json
from flask import Flask, request
from flask_cors import CORS
import time
from memory_profiler import profile
class SolverBase:
output = open("mem_usage.txt", "a+")
@profile(stream=output)
def run(self, host='localhost', port=8000) -> None:
app = Flask(__name__)
CORS(app)
@app.route('/answer', methods=['GET', 'POST'])
def solve() -> Any: # pylint: disable=unused-variable
body = request.get_json(force=True)
question = parse_question(body)
start_time = time.time()
multiple_choice_answer = self.answer_question(question)
time_elapsed = time.time() - start_time
solver_answer = SolverAnswer(solverInfo=self.solver_info(),
multipleChoiceAnswer=multiple_choice_answer,
timeElapsed=time_elapsed)
return json.dumps(solver_answer)
@app.route('/solver-info')
def info(): # pylint: disable=unused-variable
return self.solver_info()
app.run(host=host, port=port)
def answer_question(self, question: MultipleChoiceQuestion) -> MultipleChoiceAnswer:
raise NotImplementedError()
def solver_info(self) -> str:
raise NotImplementedError()
| true | true |
1c344b70c9edb692f04feab28d891f594e723e8d | 2,407 | py | Python | kumoslab/getServer.py | trand2/Discord-Levels-Bot | ab445ef3291efecf0f0ba36907eab99121d51b89 | [
"Apache-2.0"
] | 38 | 2021-07-10T07:02:58.000Z | 2022-03-30T20:06:58.000Z | kumoslab/getServer.py | trand2/Discord-Levels-Bot | ab445ef3291efecf0f0ba36907eab99121d51b89 | [
"Apache-2.0"
] | 6 | 2021-02-20T18:28:37.000Z | 2021-04-12T05:24:42.000Z | kumoslab/getServer.py | trand2/Discord-Levels-Bot | ab445ef3291efecf0f0ba36907eab99121d51b89 | [
"Apache-2.0"
] | 34 | 2021-07-05T04:31:16.000Z | 2022-03-29T16:28:02.000Z | import discord
from Systems.levelsys import levelling
async def xpPerMessage(guildID=None):
if guildID is None:
print("xpPerMessage requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
xp_message = stats['xp_per_message']
return str(xp_message)
except Exception as e:
print(f"xpPerMessage ran into an error!\n\n{e}")
async def doubleXPRole(guildID=None):
if guildID is None:
print("doubleXPRole requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
double_xp = stats['double_xp_role']
return str(double_xp)
except Exception as e:
print(f"doubleXPRole ran into an error!\n\n{e}")
async def levelChannel(guildID=None):
if guildID is None:
print("levelChannel requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
level_channel = stats['level_channel']
return str(level_channel)
except Exception as e:
print(f"levelChannel ran into an error!\n\n{e}")
async def getLevels(guildID=None):
if guildID is None:
print("getLevels requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
levels = stats['level']
return str(levels)
except Exception as e:
print(f"getLevels ran into an error!\n\n{e}")
async def getRoles(guildID=None):
if guildID is None:
print("getRoles requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
roles = stats['role']
return str(roles)
except Exception as e:
print(f"getRoles ran into an error!\n\n{e}")
async def ignoredRole(guildID=None):
if guildID is None:
print("ignoredRole requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
ignored_role = stats['ignoredRole']
return str(ignored_role)
except Exception as e:
print(f"ignoredRole ran into an error!\n\n{e}")
| 29.353659 | 61 | 0.561695 | import discord
from Systems.levelsys import levelling
async def xpPerMessage(guildID=None):
if guildID is None:
print("xpPerMessage requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
xp_message = stats['xp_per_message']
return str(xp_message)
except Exception as e:
print(f"xpPerMessage ran into an error!\n\n{e}")
async def doubleXPRole(guildID=None):
if guildID is None:
print("doubleXPRole requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
double_xp = stats['double_xp_role']
return str(double_xp)
except Exception as e:
print(f"doubleXPRole ran into an error!\n\n{e}")
async def levelChannel(guildID=None):
if guildID is None:
print("levelChannel requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
level_channel = stats['level_channel']
return str(level_channel)
except Exception as e:
print(f"levelChannel ran into an error!\n\n{e}")
async def getLevels(guildID=None):
if guildID is None:
print("getLevels requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
levels = stats['level']
return str(levels)
except Exception as e:
print(f"getLevels ran into an error!\n\n{e}")
async def getRoles(guildID=None):
if guildID is None:
print("getRoles requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
roles = stats['role']
return str(roles)
except Exception as e:
print(f"getRoles ran into an error!\n\n{e}")
async def ignoredRole(guildID=None):
if guildID is None:
print("ignoredRole requires 'guildID'.")
return
else:
try:
stats = levelling.find_one({"server": guildID})
ignored_role = stats['ignoredRole']
return str(ignored_role)
except Exception as e:
print(f"ignoredRole ran into an error!\n\n{e}")
| true | true |
1c344c547f3ce1b47d143e427a99fd715626effb | 1,261 | py | Python | tests/fixtures.py | schinckel/pipeline-runner | 5642e3ce33ba21d42289bc6e3592e8286b7321d3 | [
"MIT"
] | 6 | 2021-04-23T20:28:24.000Z | 2022-02-12T14:55:27.000Z | tests/fixtures.py | schinckel/pipeline-runner | 5642e3ce33ba21d42289bc6e3592e8286b7321d3 | [
"MIT"
] | 1 | 2022-01-17T14:43:04.000Z | 2022-01-17T14:43:04.000Z | tests/fixtures.py | schinckel/pipeline-runner | 5642e3ce33ba21d42289bc6e3592e8286b7321d3 | [
"MIT"
] | 2 | 2022-01-16T23:32:11.000Z | 2022-02-08T20:39:22.000Z | import os.path
import pytest
from pipeline_runner.models import ProjectMetadata, Repository
@pytest.fixture(autouse=True)
def user_cache_directory(tmp_path, mocker):
cache_dir = os.path.join(tmp_path, "cache")
m = mocker.patch("pipeline_runner.utils.get_cache_directory")
m.return_value = cache_dir
yield cache_dir
@pytest.fixture(autouse=True)
def user_data_directory(tmp_path, mocker):
data_dir = os.path.join(tmp_path, "data")
m = mocker.patch("pipeline_runner.utils.get_data_directory")
m.return_value = data_dir
yield data_dir
@pytest.fixture
def project_metadata(mocker):
project_metadata = ProjectMetadata(
name="SomeNiceProject",
slug="some-nice-project",
key="SNP",
path_slug="some-nice-project-FOOBAR",
build_number=451,
)
mocker.patch("pipeline_runner.models.ProjectMetadata.load_from_file", return_value=project_metadata)
return project_metadata
@pytest.fixture
def repository():
from pipeline_runner import __file__ as root_file
return Repository(os.path.dirname(os.path.dirname(root_file)))
@pytest.fixture
def tmp_path_chdir(request, tmp_path):
os.chdir(tmp_path)
yield tmp_path
os.chdir(request.config.invocation_dir)
| 22.927273 | 104 | 0.738303 | import os.path
import pytest
from pipeline_runner.models import ProjectMetadata, Repository
@pytest.fixture(autouse=True)
def user_cache_directory(tmp_path, mocker):
cache_dir = os.path.join(tmp_path, "cache")
m = mocker.patch("pipeline_runner.utils.get_cache_directory")
m.return_value = cache_dir
yield cache_dir
@pytest.fixture(autouse=True)
def user_data_directory(tmp_path, mocker):
data_dir = os.path.join(tmp_path, "data")
m = mocker.patch("pipeline_runner.utils.get_data_directory")
m.return_value = data_dir
yield data_dir
@pytest.fixture
def project_metadata(mocker):
project_metadata = ProjectMetadata(
name="SomeNiceProject",
slug="some-nice-project",
key="SNP",
path_slug="some-nice-project-FOOBAR",
build_number=451,
)
mocker.patch("pipeline_runner.models.ProjectMetadata.load_from_file", return_value=project_metadata)
return project_metadata
@pytest.fixture
def repository():
from pipeline_runner import __file__ as root_file
return Repository(os.path.dirname(os.path.dirname(root_file)))
@pytest.fixture
def tmp_path_chdir(request, tmp_path):
os.chdir(tmp_path)
yield tmp_path
os.chdir(request.config.invocation_dir)
| true | true |
1c344cab00d4d50a9b24403c6e30792b5d058549 | 183 | py | Python | portfolio/apps.py | Nearlz/nearlz-portfolio | 7534457184d93e78adde84c439c8564826486fc1 | [
"MIT"
] | null | null | null | portfolio/apps.py | Nearlz/nearlz-portfolio | 7534457184d93e78adde84c439c8564826486fc1 | [
"MIT"
] | null | null | null | portfolio/apps.py | Nearlz/nearlz-portfolio | 7534457184d93e78adde84c439c8564826486fc1 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class PortfolioConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'portfolio'
verbose_name = 'Portafolio'
| 20.333333 | 56 | 0.748634 | from django.apps import AppConfig
class PortfolioConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'portfolio'
verbose_name = 'Portafolio'
| true | true |
1c344ec9d1dd5ffd4941ae24892de745f482dabe | 1,540 | py | Python | bci_framework/default_extensions/Car_Raicing_Motor_Imagery_Feedback/main.py | UN-GCPDS/bci-framework- | b51f530967561738dc34752acf6add20cbb02283 | [
"BSD-2-Clause"
] | null | null | null | bci_framework/default_extensions/Car_Raicing_Motor_Imagery_Feedback/main.py | UN-GCPDS/bci-framework- | b51f530967561738dc34752acf6add20cbb02283 | [
"BSD-2-Clause"
] | null | null | null | bci_framework/default_extensions/Car_Raicing_Motor_Imagery_Feedback/main.py | UN-GCPDS/bci-framework- | b51f530967561738dc34752acf6add20cbb02283 | [
"BSD-2-Clause"
] | null | null | null | from bci_framework.extensions.data_analysis import DataAnalysis, loop_consumer, fake_loop_consumer
import logging
import gym
import numpy as np
from predictor import predict
BUFFER = 3 # Segundos de analisis de la señal
SLIDING_DATA = 300 # Cantidad de datos que se actualizaran en cada clasificación
GAS = 0.01
BREAK_SYSTEM = 0
########################################################################
class Analysis(DataAnalysis):
""""""
# ----------------------------------------------------------------------
def __init__(self, *args, **kwargs):
""""""
super().__init__(*args, **kwargs)
self.steering_wheel = 0
# Car raicing
self.env = gym.make('CarRacing-v0')
self.env.reset()
# Buffer
self.create_buffer(BUFFER, aux_shape=3, fill=0)
self.stream()
# ----------------------------------------------------------------------
@fake_loop_consumer('eeg', package_size=SLIDING_DATA)
def stream(self, frame):
""""""
action = predict(self.buffer_eeg.reshape(1, 16, -1))
match action:
case 'right':
self.steering_wheel += 0.1
case 'left':
self.steering_wheel -= 0.1
# Move Car
logging.warning(f'Action: {action}')
self.env.render()
self.env.step([self.steering_wheel, GAS, BREAK_SYSTEM])
if __name__ == '__main__':
Analysis()
| 26.101695 | 98 | 0.487013 | from bci_framework.extensions.data_analysis import DataAnalysis, loop_consumer, fake_loop_consumer
import logging
import gym
import numpy as np
from predictor import predict
BUFFER = 3
SLIDING_DATA = 300
GAS = 0.01
BREAK_SYSTEM = 0
| true | true |
1c344ee38cb96fde115c113b5cfe7beb7a903d49 | 5,407 | py | Python | cvae.py | samsungsds-rnd/CADD-CVAE | b4a21f65440aaf7cb5c01f163356fb249a6cba30 | [
"MIT"
] | 4 | 2020-07-29T02:44:05.000Z | 2022-01-29T03:33:20.000Z | cvae.py | samsungsds-rnd/CADD-CVAE | b4a21f65440aaf7cb5c01f163356fb249a6cba30 | [
"MIT"
] | null | null | null | cvae.py | samsungsds-rnd/CADD-CVAE | b4a21f65440aaf7cb5c01f163356fb249a6cba30 | [
"MIT"
] | 2 | 2020-07-28T02:29:22.000Z | 2022-03-24T12:22:47.000Z | """
The MIT License
Copyright (c) 2020 Samsung SDS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
#
# Original code(This file is under Apache 2.0 License)
# https://github.com/SAP-samples/security-research-membership-inference-against-generative-networks/blob/master/Monte-Carlo-Attacks/Monte-Carlo-MNIST_CVAE/vae.py
# modified by Sunghoon Joo
#
import tensorflow as tf
# Gaussian MLP as conditional encoder
def gaussian_MLP_conditional_encoder(x, y, n_hidden, n_output, keep_prob):
with tf.variable_scope("gaussian_MLP_encoder"):
# concatenate condition and image
dim_y = int(y.get_shape()[1])
input = tf.concat(axis=1, values=[x, y])
# initializers
w_init = tf.contrib.layers.variance_scaling_initializer()
b_init = tf.constant_initializer(0.)
# 1st hidden layer
w0 = tf.get_variable('w0', [input.get_shape()[1], n_hidden+dim_y], initializer=w_init)
b0 = tf.get_variable('b0', [n_hidden+dim_y], initializer=b_init)
h0 = tf.matmul(input, w0) + b0
h0 = tf.nn.elu(h0)
h0 = tf.nn.dropout(h0, keep_prob)
# 2nd hidden layer
w1 = tf.get_variable('w1', [h0.get_shape()[1], n_hidden], initializer=w_init)
b1 = tf.get_variable('b1', [n_hidden], initializer=b_init)
h1 = tf.matmul(h0, w1) + b1
h1 = tf.nn.tanh(h1)
h1 = tf.nn.dropout(h1, keep_prob)
# output layer
# borrowed from https: // github.com / altosaar / vae / blob / master / vae.py
wo = tf.get_variable('wo', [h1.get_shape()[1], n_output * 2], initializer=w_init)
bo = tf.get_variable('bo', [n_output * 2], initializer=b_init)
gaussian_params = tf.matmul(h1, wo) + bo
# The mean parameter is unconstrained
mean = gaussian_params[:, :n_output]
# The standard deviation must be positive. Parametrize with a softplus and
# add a small epsilon for numerical stability
stddev = 1e-6 + tf.nn.softplus(gaussian_params[:, n_output:])
return mean, stddev
# Bernoulli MLP as conditional decoder
def bernoulli_MLP_conditional_decoder(z, y, n_hidden, n_output, keep_prob, reuse=False):
with tf.variable_scope("bernoulli_MLP_decoder", reuse=reuse):
# concatenate condition and latent vectors
input = tf.concat(axis=1, values=[z, y])
# initializers
w_init = tf.contrib.layers.variance_scaling_initializer()
b_init = tf.constant_initializer(0.)
# 1st hidden layer
w0 = tf.get_variable('w0', [input.get_shape()[1], n_hidden], initializer=w_init)
b0 = tf.get_variable('b0', [n_hidden], initializer=b_init)
h0 = tf.matmul(input, w0) + b0
h0 = tf.nn.tanh(h0)
h0 = tf.nn.dropout(h0, keep_prob)
# 2nd hidden layer
w1 = tf.get_variable('w1', [h0.get_shape()[1], n_hidden], initializer=w_init)
b1 = tf.get_variable('b1', [n_hidden], initializer=b_init)
h1 = tf.matmul(h0, w1) + b1
h1 = tf.nn.elu(h1)
h1 = tf.nn.dropout(h1, keep_prob)
# output layer-mean
wo = tf.get_variable('wo', [h1.get_shape()[1], n_output], initializer=w_init)
bo = tf.get_variable('bo', [n_output], initializer=b_init)
y = tf.sigmoid(tf.matmul(h1, wo) + bo)
return y
# Gateway
def autoencoder(x_hat, x, y, dim_img, dim_z, n_hidden, keep_prob):
# encoding
mu, sigma = gaussian_MLP_conditional_encoder(x_hat, y, n_hidden, dim_z, keep_prob)
# sampling by re-parameterization technique
z = mu + sigma * tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32)
# decoding
x_ = bernoulli_MLP_conditional_decoder(z, y, n_hidden, dim_img, keep_prob)
x_ = tf.clip_by_value(x_, 1e-6, 1 - 1e-6)
# ELBO
marginal_likelihood = tf.reduce_sum(x * tf.log(x_) + (1 - x) * tf.log(1 - x_), 1)
KL_divergence = 0.5 * tf.reduce_sum(tf.square(mu) + tf.square(sigma) - tf.log(1e-6 + tf.square(sigma)) - 1, 1)
marginal_likelihood = tf.reduce_mean(marginal_likelihood)
KL_divergence = tf.reduce_mean(KL_divergence)
ELBO = marginal_likelihood - KL_divergence
# minimize loss instead of maximizing ELBO
loss = -ELBO
return x_, z, loss, -marginal_likelihood, KL_divergence
# Conditional Decoder (Generator)
def decoder(z, y, dim_img, n_hidden):
x_ = bernoulli_MLP_conditional_decoder(z, y, n_hidden, dim_img, 1.0, reuse=True)
return x_
| 37.034247 | 161 | 0.682819 |
import tensorflow as tf
def gaussian_MLP_conditional_encoder(x, y, n_hidden, n_output, keep_prob):
with tf.variable_scope("gaussian_MLP_encoder"):
dim_y = int(y.get_shape()[1])
input = tf.concat(axis=1, values=[x, y])
w_init = tf.contrib.layers.variance_scaling_initializer()
b_init = tf.constant_initializer(0.)
w0 = tf.get_variable('w0', [input.get_shape()[1], n_hidden+dim_y], initializer=w_init)
b0 = tf.get_variable('b0', [n_hidden+dim_y], initializer=b_init)
h0 = tf.matmul(input, w0) + b0
h0 = tf.nn.elu(h0)
h0 = tf.nn.dropout(h0, keep_prob)
w1 = tf.get_variable('w1', [h0.get_shape()[1], n_hidden], initializer=w_init)
b1 = tf.get_variable('b1', [n_hidden], initializer=b_init)
h1 = tf.matmul(h0, w1) + b1
h1 = tf.nn.tanh(h1)
h1 = tf.nn.dropout(h1, keep_prob)
wo = tf.get_variable('wo', [h1.get_shape()[1], n_output * 2], initializer=w_init)
bo = tf.get_variable('bo', [n_output * 2], initializer=b_init)
gaussian_params = tf.matmul(h1, wo) + bo
mean = gaussian_params[:, :n_output]
stddev = 1e-6 + tf.nn.softplus(gaussian_params[:, n_output:])
return mean, stddev
def bernoulli_MLP_conditional_decoder(z, y, n_hidden, n_output, keep_prob, reuse=False):
with tf.variable_scope("bernoulli_MLP_decoder", reuse=reuse):
input = tf.concat(axis=1, values=[z, y])
w_init = tf.contrib.layers.variance_scaling_initializer()
b_init = tf.constant_initializer(0.)
w0 = tf.get_variable('w0', [input.get_shape()[1], n_hidden], initializer=w_init)
b0 = tf.get_variable('b0', [n_hidden], initializer=b_init)
h0 = tf.matmul(input, w0) + b0
h0 = tf.nn.tanh(h0)
h0 = tf.nn.dropout(h0, keep_prob)
w1 = tf.get_variable('w1', [h0.get_shape()[1], n_hidden], initializer=w_init)
b1 = tf.get_variable('b1', [n_hidden], initializer=b_init)
h1 = tf.matmul(h0, w1) + b1
h1 = tf.nn.elu(h1)
h1 = tf.nn.dropout(h1, keep_prob)
wo = tf.get_variable('wo', [h1.get_shape()[1], n_output], initializer=w_init)
bo = tf.get_variable('bo', [n_output], initializer=b_init)
y = tf.sigmoid(tf.matmul(h1, wo) + bo)
return y
def autoencoder(x_hat, x, y, dim_img, dim_z, n_hidden, keep_prob):
mu, sigma = gaussian_MLP_conditional_encoder(x_hat, y, n_hidden, dim_z, keep_prob)
z = mu + sigma * tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32)
x_ = bernoulli_MLP_conditional_decoder(z, y, n_hidden, dim_img, keep_prob)
x_ = tf.clip_by_value(x_, 1e-6, 1 - 1e-6)
marginal_likelihood = tf.reduce_sum(x * tf.log(x_) + (1 - x) * tf.log(1 - x_), 1)
KL_divergence = 0.5 * tf.reduce_sum(tf.square(mu) + tf.square(sigma) - tf.log(1e-6 + tf.square(sigma)) - 1, 1)
marginal_likelihood = tf.reduce_mean(marginal_likelihood)
KL_divergence = tf.reduce_mean(KL_divergence)
ELBO = marginal_likelihood - KL_divergence
loss = -ELBO
return x_, z, loss, -marginal_likelihood, KL_divergence
def decoder(z, y, dim_img, n_hidden):
x_ = bernoulli_MLP_conditional_decoder(z, y, n_hidden, dim_img, 1.0, reuse=True)
return x_
| true | true |
1c344f254d4c108da4e293b1f038a9f76e08022d | 7,072 | py | Python | databricks/koalas/usage_logging/__init__.py | charlesdong1991/koalas | 82e2e410817dc1728f97038f193d823f615d0d6a | [
"Apache-2.0"
] | 1 | 2019-09-18T02:36:19.000Z | 2019-09-18T02:36:19.000Z | databricks/koalas/usage_logging/__init__.py | yiming1012/koalas | 326a11c43bb30cb07063e5baf4dab21b4ec90b9d | [
"Apache-2.0"
] | null | null | null | databricks/koalas/usage_logging/__init__.py | yiming1012/koalas | 326a11c43bb30cb07063e5baf4dab21b4ec90b9d | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import importlib
import inspect
import threading
import time
from types import ModuleType
from typing import Union
import pandas as pd
from databricks.koalas import namespace, sql
from databricks.koalas.frame import DataFrame
from databricks.koalas.datetimes import DatetimeMethods
from databricks.koalas.groupby import DataFrameGroupBy, SeriesGroupBy
from databricks.koalas.indexes import Index, MultiIndex
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.missing.groupby import _MissingPandasLikeDataFrameGroupBy, \
_MissingPandasLikeSeriesGroupBy
from databricks.koalas.missing.indexes import _MissingPandasLikeIndex, \
_MissingPandasLikeMultiIndex
from databricks.koalas.missing.series import _MissingPandasLikeSeries
from databricks.koalas.series import Series
from databricks.koalas.strings import StringMethods
def attach(logger_module: Union[str, ModuleType]) -> None:
"""
Attach the usage logger.
Parameters
----------
logger_module : the module or module name contains the usage logger.
The module needs to provide `get_logger` function as an entry point of the plug-in
returning the usage logger.
See Also
--------
usage_logger : the reference implementation of the usage logger.
"""
if isinstance(logger_module, str):
logger_module = importlib.import_module(logger_module)
logger = getattr(logger_module, 'get_logger')()
modules = [namespace]
classes = [DataFrame, Series, Index, MultiIndex,
DataFrameGroupBy, SeriesGroupBy, DatetimeMethods, StringMethods]
try:
from databricks.koalas import mlflow
modules.append(mlflow)
classes.append(mlflow.PythonModelWrapper)
except ImportError:
pass
sql._CAPTURE_SCOPES = 3 # type: ignore
modules.append(sql) # type: ignore
# Modules
for target_module in modules:
target_name = target_module.__name__.split('.')[-1]
for name in getattr(target_module, '__all__'):
func = getattr(target_module, name)
if not inspect.isfunction(func):
continue
setattr(target_module, name, _wrap_function(target_name, name, func, logger))
special_functions = set(['__init__', '__repr__', '__str__', '_repr_html_', '__len__',
'__getitem__', '__setitem__', '__getattr__'])
# Classes
for target_class in classes:
for name, func in inspect.getmembers(target_class, inspect.isfunction):
if name.startswith('_') and name not in special_functions:
continue
setattr(target_class, name, _wrap_function(target_class.__name__, name, func, logger))
for name, prop in inspect.getmembers(target_class, lambda o: isinstance(o, property)):
if name.startswith('_'):
continue
setattr(target_class, name, _wrap_property(target_class.__name__, name, prop, logger))
# Missings
for original, missing in \
[(pd.DataFrame, _MissingPandasLikeDataFrame),
(pd.Series, _MissingPandasLikeSeries),
(pd.Index, _MissingPandasLikeIndex),
(pd.MultiIndex, _MissingPandasLikeMultiIndex),
(pd.core.groupby.DataFrameGroupBy, _MissingPandasLikeDataFrameGroupBy),
(pd.core.groupby.SeriesGroupBy, _MissingPandasLikeSeriesGroupBy)]:
for name, func in inspect.getmembers(missing, inspect.isfunction):
setattr(missing, name,
_wrap_missing_function(original.__name__, name, func, original, logger))
for name, prop in inspect.getmembers(missing, lambda o: isinstance(o, property)):
setattr(missing, name, _wrap_missing_property(original.__name__, name, prop, logger))
_local = threading.local()
def _wrap_function(class_name, function_name, func, logger):
signature = inspect.signature(func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
if hasattr(_local, 'logging') and _local.logging:
# no need to log since this should be internal call.
return func(*args, **kwargs)
_local.logging = True
try:
start = time.perf_counter()
try:
res = func(*args, **kwargs)
logger.log_success(
class_name, function_name, time.perf_counter() - start, signature)
return res
except Exception as ex:
logger.log_failure(
class_name, function_name, ex, time.perf_counter() - start, signature)
raise
finally:
_local.logging = False
return wrapper
def _wrap_property(class_name, property_name, prop, logger):
@property
def wrapper(self):
if hasattr(_local, 'logging') and _local.logging:
# no need to log since this should be internal call.
return prop.fget(self)
_local.logging = True
try:
start = time.perf_counter()
try:
res = prop.fget(self)
logger.log_success(
class_name, property_name, time.perf_counter() - start)
return res
except Exception as ex:
logger.log_failure(
class_name, property_name, ex, time.perf_counter() - start)
raise
finally:
_local.logging = False
if prop.fset is not None:
wrapper = wrapper.setter(_wrap_function(class_name, prop.fset.__name__, prop.fset, logger))
return wrapper
def _wrap_missing_function(class_name, function_name, func, original, logger):
if not hasattr(original, function_name):
return func
signature = inspect.signature(getattr(original, function_name))
is_deprecated = func.__name__ == 'deprecated_function'
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
finally:
logger.log_missing(class_name, function_name, is_deprecated, signature)
return wrapper
def _wrap_missing_property(class_name, property_name, prop, logger):
is_deprecated = prop.fget.__name__ == 'deprecated_property'
@property
def wrapper(self):
try:
return prop.fget(self)
finally:
logger.log_missing(class_name, property_name, is_deprecated)
return wrapper
| 34.666667 | 99 | 0.670249 |
import functools
import importlib
import inspect
import threading
import time
from types import ModuleType
from typing import Union
import pandas as pd
from databricks.koalas import namespace, sql
from databricks.koalas.frame import DataFrame
from databricks.koalas.datetimes import DatetimeMethods
from databricks.koalas.groupby import DataFrameGroupBy, SeriesGroupBy
from databricks.koalas.indexes import Index, MultiIndex
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.missing.groupby import _MissingPandasLikeDataFrameGroupBy, \
_MissingPandasLikeSeriesGroupBy
from databricks.koalas.missing.indexes import _MissingPandasLikeIndex, \
_MissingPandasLikeMultiIndex
from databricks.koalas.missing.series import _MissingPandasLikeSeries
from databricks.koalas.series import Series
from databricks.koalas.strings import StringMethods
def attach(logger_module: Union[str, ModuleType]) -> None:
if isinstance(logger_module, str):
logger_module = importlib.import_module(logger_module)
logger = getattr(logger_module, 'get_logger')()
modules = [namespace]
classes = [DataFrame, Series, Index, MultiIndex,
DataFrameGroupBy, SeriesGroupBy, DatetimeMethods, StringMethods]
try:
from databricks.koalas import mlflow
modules.append(mlflow)
classes.append(mlflow.PythonModelWrapper)
except ImportError:
pass
sql._CAPTURE_SCOPES = 3
modules.append(sql)
for target_module in modules:
target_name = target_module.__name__.split('.')[-1]
for name in getattr(target_module, '__all__'):
func = getattr(target_module, name)
if not inspect.isfunction(func):
continue
setattr(target_module, name, _wrap_function(target_name, name, func, logger))
special_functions = set(['__init__', '__repr__', '__str__', '_repr_html_', '__len__',
'__getitem__', '__setitem__', '__getattr__'])
for target_class in classes:
for name, func in inspect.getmembers(target_class, inspect.isfunction):
if name.startswith('_') and name not in special_functions:
continue
setattr(target_class, name, _wrap_function(target_class.__name__, name, func, logger))
for name, prop in inspect.getmembers(target_class, lambda o: isinstance(o, property)):
if name.startswith('_'):
continue
setattr(target_class, name, _wrap_property(target_class.__name__, name, prop, logger))
for original, missing in \
[(pd.DataFrame, _MissingPandasLikeDataFrame),
(pd.Series, _MissingPandasLikeSeries),
(pd.Index, _MissingPandasLikeIndex),
(pd.MultiIndex, _MissingPandasLikeMultiIndex),
(pd.core.groupby.DataFrameGroupBy, _MissingPandasLikeDataFrameGroupBy),
(pd.core.groupby.SeriesGroupBy, _MissingPandasLikeSeriesGroupBy)]:
for name, func in inspect.getmembers(missing, inspect.isfunction):
setattr(missing, name,
_wrap_missing_function(original.__name__, name, func, original, logger))
for name, prop in inspect.getmembers(missing, lambda o: isinstance(o, property)):
setattr(missing, name, _wrap_missing_property(original.__name__, name, prop, logger))
_local = threading.local()
def _wrap_function(class_name, function_name, func, logger):
signature = inspect.signature(func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
if hasattr(_local, 'logging') and _local.logging:
return func(*args, **kwargs)
_local.logging = True
try:
start = time.perf_counter()
try:
res = func(*args, **kwargs)
logger.log_success(
class_name, function_name, time.perf_counter() - start, signature)
return res
except Exception as ex:
logger.log_failure(
class_name, function_name, ex, time.perf_counter() - start, signature)
raise
finally:
_local.logging = False
return wrapper
def _wrap_property(class_name, property_name, prop, logger):
@property
def wrapper(self):
if hasattr(_local, 'logging') and _local.logging:
return prop.fget(self)
_local.logging = True
try:
start = time.perf_counter()
try:
res = prop.fget(self)
logger.log_success(
class_name, property_name, time.perf_counter() - start)
return res
except Exception as ex:
logger.log_failure(
class_name, property_name, ex, time.perf_counter() - start)
raise
finally:
_local.logging = False
if prop.fset is not None:
wrapper = wrapper.setter(_wrap_function(class_name, prop.fset.__name__, prop.fset, logger))
return wrapper
def _wrap_missing_function(class_name, function_name, func, original, logger):
if not hasattr(original, function_name):
return func
signature = inspect.signature(getattr(original, function_name))
is_deprecated = func.__name__ == 'deprecated_function'
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
finally:
logger.log_missing(class_name, function_name, is_deprecated, signature)
return wrapper
def _wrap_missing_property(class_name, property_name, prop, logger):
is_deprecated = prop.fget.__name__ == 'deprecated_property'
@property
def wrapper(self):
try:
return prop.fget(self)
finally:
logger.log_missing(class_name, property_name, is_deprecated)
return wrapper
| true | true |
1c344fb799997b882b34a4550e32ec2812b402b1 | 1,427 | py | Python | Meiduo/apps/carts/utils.py | wanglijing615/python46 | 2c5f84ce79bf352b7a3c57be32f3210ce204c8c0 | [
"MIT"
] | null | null | null | Meiduo/apps/carts/utils.py | wanglijing615/python46 | 2c5f84ce79bf352b7a3c57be32f3210ce204c8c0 | [
"MIT"
] | null | null | null | Meiduo/apps/carts/utils.py | wanglijing615/python46 | 2c5f84ce79bf352b7a3c57be32f3210ce204c8c0 | [
"MIT"
] | null | null | null | # 合并购物车
import base64
import pickle
from django_redis import get_redis_connection
def merge_cart_cookie_to_redis(request, user, response):
'''
如果cookie中的数据redis已存在,则用cookie的数据覆盖redis存在的数据
'''
# 获取cookie数据
cookie_data = request.COOKIES.get('carts')
if cookie_data is not None:
# redis数据结构: hash carts_user.id, count,1
# set: (sku_id,sku_id)
# 取redis的数据
redis_conn = get_redis_connection('carts')
# carts_data = redis_conn.hmget('carts_%s' % user.id)
selected_data = redis_conn.smembers('selected_%s' % user.id)
# 从reids获取的数据为bytes类型,需进行转换组装数据
redis_id_list = []
for id in selected_data:
redis_id_list.append(int(id))
cookie_dict = pickle.loads(base64.b64decode(cookie_data))
# {sku_id:{'count':2,'selected':True},}
# cookie_select_id_list = []
# cookie_unselected_id_list = []
for sku_id, value in cookie_dict.items():
redis_conn.hset('carts_%s' % user.id, sku_id, int(value['count']))
if value['selected']:
redis_conn.sadd('selected_%s' % user.id, sku_id)
# cookie_unselected_id_list.append(sku_id)
else:
redis_conn.srem('selected_%s' % user.id, sku_id)
# cookie_select_id_list.append(sku_id)
# 删除cookie数据
response.delete_cookie('carts')
return response
| 33.97619 | 78 | 0.626489 |
import base64
import pickle
from django_redis import get_redis_connection
def merge_cart_cookie_to_redis(request, user, response):
cookie_data = request.COOKIES.get('carts')
if cookie_data is not None:
redis_conn = get_redis_connection('carts')
selected_data = redis_conn.smembers('selected_%s' % user.id)
redis_id_list = []
for id in selected_data:
redis_id_list.append(int(id))
cookie_dict = pickle.loads(base64.b64decode(cookie_data))
for sku_id, value in cookie_dict.items():
redis_conn.hset('carts_%s' % user.id, sku_id, int(value['count']))
if value['selected']:
redis_conn.sadd('selected_%s' % user.id, sku_id)
else:
redis_conn.srem('selected_%s' % user.id, sku_id)
response.delete_cookie('carts')
return response
| true | true |
1c3452041494a823a35117badbed2c30dbd08c0a | 58,878 | py | Python | python/mxnet/rnn/rnn_cell.py | Najah-lshanableh/Deep_learning | 4b8235bdacd319843dda7b331f207808e4a90a93 | [
"Apache-2.0"
] | null | null | null | python/mxnet/rnn/rnn_cell.py | Najah-lshanableh/Deep_learning | 4b8235bdacd319843dda7b331f207808e4a90a93 | [
"Apache-2.0"
] | null | null | null | python/mxnet/rnn/rnn_cell.py | Najah-lshanableh/Deep_learning | 4b8235bdacd319843dda7b331f207808e4a90a93 | [
"Apache-2.0"
] | 1 | 2020-01-22T05:15:29.000Z | 2020-01-22T05:15:29.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=no-member, invalid-name, protected-access, no-self-use
# pylint: disable=too-many-branches, too-many-arguments, no-self-use
# pylint: disable=too-many-lines
"""Definition of various recurrent neural network cells."""
from __future__ import print_function
import warnings
import functools
from .. import symbol, init, ndarray
from ..base import string_types, numeric_types
def _cells_state_shape(cells):
return sum([c.state_shape for c in cells], [])
def _cells_state_info(cells):
return sum([c.state_info for c in cells], [])
def _cells_begin_state(cells, **kwargs):
return sum([c.begin_state(**kwargs) for c in cells], [])
def _cells_unpack_weights(cells, args):
for cell in cells:
args = cell.unpack_weights(args)
return args
def _cells_pack_weights(cells, args):
for cell in cells:
args = cell.pack_weights(args)
return args
def _normalize_sequence(length, inputs, layout, merge, in_layout=None):
assert inputs is not None, \
"unroll(inputs=None) has been deprecated. " \
"Please create input variables outside unroll."
axis = layout.find('T')
in_axis = in_layout.find('T') if in_layout is not None else axis
if isinstance(inputs, symbol.Symbol):
if merge is False:
assert len(inputs.list_outputs()) == 1, \
"unroll doesn't allow grouped symbol as input. Please convert " \
"to list with list(inputs) first or let unroll handle splitting."
inputs = list(symbol.split(inputs, axis=in_axis, num_outputs=length,
squeeze_axis=1))
else:
assert length is None or len(inputs) == length
if merge is True:
inputs = [symbol.expand_dims(i, axis=axis) for i in inputs]
inputs = symbol.Concat(*inputs, dim=axis)
in_axis = axis
if isinstance(inputs, symbol.Symbol) and axis != in_axis:
inputs = symbol.swapaxes(inputs, dim0=axis, dim1=in_axis)
return inputs, axis
class RNNParams(object):
"""Container for holding variables.
Used by RNN cells for parameter sharing between cells.
Parameters
----------
prefix : str
Names of all variables created by this container will
be prepended with prefix.
"""
def __init__(self, prefix=''):
self._prefix = prefix
self._params = {}
def get(self, name, **kwargs):
"""Get the variable given a name if one exists or create a new one if missing.
Parameters
----------
name : str
name of the variable
**kwargs :
more arguments that's passed to symbol.Variable
"""
name = self._prefix + name
if name not in self._params:
self._params[name] = symbol.Variable(name, **kwargs)
return self._params[name]
class BaseRNNCell(object):
"""Abstract base class for RNN cells
Parameters
----------
prefix : str, optional
Prefix for names of layers
(this prefix is also used for names of weights if `params` is None
i.e. if `params` are being created and not reused)
params : RNNParams, default None.
Container for weight sharing between cells.
A new RNNParams container is created if `params` is None.
"""
def __init__(self, prefix='', params=None):
if params is None:
params = RNNParams(prefix)
self._own_params = True
else:
self._own_params = False
self._prefix = prefix
self._params = params
self._modified = False
self.reset()
def reset(self):
"""Reset before re-using the cell for another graph."""
self._init_counter = -1
self._counter = -1
def __call__(self, inputs, states):
"""Unroll the RNN for one time step.
Parameters
----------
inputs : sym.Variable
input symbol, 2D, batch * num_units
states : list of sym.Variable
RNN state from previous step or the output of begin_state().
Returns
-------
output : Symbol
Symbol corresponding to the output from the RNN when unrolling
for a single time step.
states : nested list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of begin_state().
This can be used as input state to the next time step
of this RNN.
See Also
--------
begin_state: This function can provide the states for the first time step.
unroll: This function unrolls an RNN for a given number of (>=1) time steps.
"""
raise NotImplementedError()
@property
def params(self):
"""Parameters of this cell"""
self._own_params = False
return self._params
@property
def state_info(self):
"""shape and layout information of states"""
raise NotImplementedError()
@property
def state_shape(self):
"""shape(s) of states"""
return [ele['shape'] for ele in self.state_info]
@property
def _gate_names(self):
"""name(s) of gates"""
return ()
def begin_state(self, func=symbol.zeros, **kwargs):
"""Initial state for this cell.
Parameters
----------
func : callable, default symbol.zeros
Function for creating initial state. Can be symbol.zeros,
symbol.uniform, symbol.Variable etc.
Use symbol.Variable if you want to directly
feed input as states.
**kwargs :
more keyword arguments passed to func. For example
mean, std, dtype, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
"""
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
states = []
for info in self.state_info:
self._init_counter += 1
if info is None:
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**kwargs)
else:
kwargs.update(info)
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**kwargs)
states.append(state)
return states
def unpack_weights(self, args):
"""Unpack fused weight matrices into separate
weight matrices.
For example, say you use a module object `mod` to run a network that has an lstm cell.
In `mod.get_params()[0]`, the lstm parameters are all represented as a single big vector.
`cell.unpack_weights(mod.get_params()[0])` will unpack this vector into a dictionary of
more readable lstm parameters - c, f, i, o gates for i2h (input to hidden) and
h2h (hidden to hidden) weights.
Parameters
----------
args : dict of str -> NDArray
Dictionary containing packed weights.
usually from `Module.get_params()[0]`.
Returns
-------
args : dict of str -> NDArray
Dictionary with unpacked weights associated with
this cell.
See Also
--------
pack_weights: Performs the reverse operation of this function.
"""
args = args.copy()
if not self._gate_names:
return args
h = self._num_hidden
for group_name in ['i2h', 'h2h']:
weight = args.pop('%s%s_weight'%(self._prefix, group_name))
bias = args.pop('%s%s_bias' % (self._prefix, group_name))
for j, gate in enumerate(self._gate_names):
wname = '%s%s%s_weight' % (self._prefix, group_name, gate)
args[wname] = weight[j*h:(j+1)*h].copy()
bname = '%s%s%s_bias' % (self._prefix, group_name, gate)
args[bname] = bias[j*h:(j+1)*h].copy()
return args
def pack_weights(self, args):
"""Pack separate weight matrices into a single packed
weight.
Parameters
----------
args : dict of str -> NDArray
Dictionary containing unpacked weights.
Returns
-------
args : dict of str -> NDArray
Dictionary with packed weights associated with
this cell.
"""
args = args.copy()
if not self._gate_names:
return args
for group_name in ['i2h', 'h2h']:
weight = []
bias = []
for gate in self._gate_names:
wname = '%s%s%s_weight'%(self._prefix, group_name, gate)
weight.append(args.pop(wname))
bname = '%s%s%s_bias'%(self._prefix, group_name, gate)
bias.append(args.pop(bname))
args['%s%s_weight'%(self._prefix, group_name)] = ndarray.concatenate(weight)
args['%s%s_bias'%(self._prefix, group_name)] = ndarray.concatenate(bias)
return args
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
"""Unroll an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if layout == 'NTC',
or (length, batch_size, ...) if layout == 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_state : nested list of Symbol, default None
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if None.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If False, return outputs as a list of Symbols.
If True, concatenate output across time steps
and return a single symbol with shape
(batch_size, length, ...) if layout == 'NTC',
or (length, batch_size, ...) if layout == 'TNC'.
If None, output whatever is faster.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : nested list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of begin_state().
"""
self.reset()
inputs, _ = _normalize_sequence(length, inputs, layout, False)
if begin_state is None:
begin_state = self.begin_state()
states = begin_state
outputs = []
for i in range(length):
output, states = self(inputs[i], states)
outputs.append(output)
outputs, _ = _normalize_sequence(length, outputs, layout, merge_outputs)
return outputs, states
#pylint: disable=no-self-use
def _get_activation(self, inputs, activation, **kwargs):
"""Get activation function. Convert if is string"""
if isinstance(activation, string_types):
return symbol.Activation(inputs, act_type=activation, **kwargs)
else:
return activation(inputs, **kwargs)
class RNNCell(BaseRNNCell):
"""Simple recurrent neural network cell.
Parameters
----------
num_hidden : int
Number of units in output symbol.
activation : str or Symbol, default 'tanh'
Type of activation function. Options are 'relu' and 'tanh'.
prefix : str, default 'rnn_'
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
"""
def __init__(self, num_hidden, activation='tanh', prefix='rnn_', params=None):
super(RNNCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
self._activation = activation
self._iW = self.params.get('i2h_weight')
self._iB = self.params.get('i2h_bias')
self._hW = self.params.get('h2h_weight')
self._hB = self.params.get('h2h_bias')
@property
def state_info(self):
return [{'shape': (0, self._num_hidden), '__layout__': 'NC'}]
@property
def _gate_names(self):
return ('',)
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h = symbol.FullyConnected(data=inputs, weight=self._iW, bias=self._iB,
num_hidden=self._num_hidden,
name='%si2h'%name)
h2h = symbol.FullyConnected(data=states[0], weight=self._hW, bias=self._hB,
num_hidden=self._num_hidden,
name='%sh2h'%name)
output = self._get_activation(i2h + h2h, self._activation,
name='%sout'%name)
return output, [output]
class LSTMCell(BaseRNNCell):
"""Long-Short Term Memory (LSTM) network cell.
Parameters
----------
num_hidden : int
Number of units in output symbol.
prefix : str, default 'lstm_'
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
forget_bias : bias added to forget gate, default 1.0.
Jozefowicz et al. 2015 recommends setting this to 1.0
"""
def __init__(self, num_hidden, prefix='lstm_', params=None, forget_bias=1.0):
super(LSTMCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
self._iW = self.params.get('i2h_weight')
self._hW = self.params.get('h2h_weight')
# we add the forget_bias to i2h_bias, this adds the bias to the forget gate activation
self._iB = self.params.get('i2h_bias', init=init.LSTMBias(forget_bias=forget_bias))
self._hB = self.params.get('h2h_bias')
@property
def state_info(self):
return [{'shape': (0, self._num_hidden), '__layout__': 'NC'},
{'shape': (0, self._num_hidden), '__layout__': 'NC'}]
@property
def _gate_names(self):
return ['_i', '_f', '_c', '_o']
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h = symbol.FullyConnected(data=inputs, weight=self._iW, bias=self._iB,
num_hidden=self._num_hidden*4,
name='%si2h'%name)
h2h = symbol.FullyConnected(data=states[0], weight=self._hW, bias=self._hB,
num_hidden=self._num_hidden*4,
name='%sh2h'%name)
gates = i2h + h2h
slice_gates = symbol.SliceChannel(gates, num_outputs=4,
name="%sslice"%name)
in_gate = symbol.Activation(slice_gates[0], act_type="sigmoid",
name='%si'%name)
forget_gate = symbol.Activation(slice_gates[1], act_type="sigmoid",
name='%sf'%name)
in_transform = symbol.Activation(slice_gates[2], act_type="tanh",
name='%sc'%name)
out_gate = symbol.Activation(slice_gates[3], act_type="sigmoid",
name='%so'%name)
next_c = symbol._internal._plus(forget_gate * states[1], in_gate * in_transform,
name='%sstate'%name)
next_h = symbol._internal._mul(out_gate, symbol.Activation(next_c, act_type="tanh"),
name='%sout'%name)
return next_h, [next_h, next_c]
class GRUCell(BaseRNNCell):
"""Gated Rectified Unit (GRU) network cell.
Note: this is an implementation of the cuDNN version of GRUs
(slight modification compared to Cho et al. 2014).
Parameters
----------
num_hidden : int
Number of units in output symbol.
prefix : str, default 'gru_'
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
"""
def __init__(self, num_hidden, prefix='gru_', params=None):
super(GRUCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
self._iW = self.params.get("i2h_weight")
self._iB = self.params.get("i2h_bias")
self._hW = self.params.get("h2h_weight")
self._hB = self.params.get("h2h_bias")
@property
def state_info(self):
return [{'shape': (0, self._num_hidden),
'__layout__': 'NC'}]
@property
def _gate_names(self):
return ['_r', '_z', '_o']
def __call__(self, inputs, states):
# pylint: disable=too-many-locals
self._counter += 1
seq_idx = self._counter
name = '%st%d_' % (self._prefix, seq_idx)
prev_state_h = states[0]
i2h = symbol.FullyConnected(data=inputs,
weight=self._iW,
bias=self._iB,
num_hidden=self._num_hidden * 3,
name="%s_i2h" % name)
h2h = symbol.FullyConnected(data=prev_state_h,
weight=self._hW,
bias=self._hB,
num_hidden=self._num_hidden * 3,
name="%s_h2h" % name)
i2h_r, i2h_z, i2h = symbol.SliceChannel(i2h, num_outputs=3, name="%s_i2h_slice" % name)
h2h_r, h2h_z, h2h = symbol.SliceChannel(h2h, num_outputs=3, name="%s_h2h_slice" % name)
reset_gate = symbol.Activation(i2h_r + h2h_r, act_type="sigmoid",
name="%s_r_act" % name)
update_gate = symbol.Activation(i2h_z + h2h_z, act_type="sigmoid",
name="%s_z_act" % name)
next_h_tmp = symbol.Activation(i2h + reset_gate * h2h, act_type="tanh",
name="%s_h_act" % name)
next_h = symbol._internal._plus((1. - update_gate) * next_h_tmp, update_gate * prev_state_h,
name='%sout' % name)
return next_h, [next_h]
class FusedRNNCell(BaseRNNCell):
"""Fusing RNN layers across time step into one kernel.
Improves speed but is less flexible. Currently only
supported if using cuDNN on GPU.
Parameters
----------
num_hidden : int
Number of units in output symbol.
num_layers : int, default 1
Number of layers in the cell.
mode : str, default 'lstm'
Type of RNN. options are 'rnn_relu', 'rnn_tanh', 'lstm', 'gru'.
bidirectional : bool, default False
Whether to use bidirectional unroll. The output dimension size is doubled if bidrectional.
dropout : float, default 0.
Fraction of the input that gets dropped out during training time.
get_next_state : bool, default False
Whether to return the states that can be used as starting states next time.
forget_bias : bias added to forget gate, default 1.0.
Jozefowicz et al. 2015 recommends setting this to 1.0
prefix : str, default '$mode_' such as 'lstm_'
Prefix for names of layers
(this prefix is also used for names of weights if `params` is None
i.e. if `params` are being created and not reused)
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
"""
def __init__(self, num_hidden, num_layers=1, mode='lstm', bidirectional=False,
dropout=0., get_next_state=False, forget_bias=1.0,
prefix=None, params=None):
if prefix is None:
prefix = '%s_'%mode
super(FusedRNNCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
self._num_layers = num_layers
self._mode = mode
self._bidirectional = bidirectional
self._dropout = dropout
self._get_next_state = get_next_state
self._directions = ['l', 'r'] if bidirectional else ['l']
initializer = init.FusedRNN(None, num_hidden, num_layers, mode,
bidirectional, forget_bias)
self._parameter = self.params.get('parameters', init=initializer)
@property
def state_info(self):
b = self._bidirectional + 1
n = (self._mode == 'lstm') + 1
return [{'shape': (b*self._num_layers, 0, self._num_hidden), '__layout__': 'LNC'}
for _ in range(n)]
@property
def _gate_names(self):
return {'rnn_relu': [''],
'rnn_tanh': [''],
'lstm': ['_i', '_f', '_c', '_o'],
'gru': ['_r', '_z', '_o']}[self._mode]
@property
def _num_gates(self):
return len(self._gate_names)
def _slice_weights(self, arr, li, lh):
"""slice fused rnn weights"""
args = {}
gate_names = self._gate_names
directions = self._directions
b = len(directions)
p = 0
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_weight'%(self._prefix, direction, layer, gate)
if layer > 0:
size = b*lh*lh
args[name] = arr[p:p+size].reshape((lh, b*lh))
else:
size = li*lh
args[name] = arr[p:p+size].reshape((lh, li))
p += size
for gate in gate_names:
name = '%s%s%d_h2h%s_weight'%(self._prefix, direction, layer, gate)
size = lh**2
args[name] = arr[p:p+size].reshape((lh, lh))
p += size
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
for gate in gate_names:
name = '%s%s%d_h2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
assert p == arr.size, "Invalid parameters size for FusedRNNCell"
return args
def unpack_weights(self, args):
args = args.copy()
arr = args.pop(self._parameter.name)
b = len(self._directions)
m = self._num_gates
h = self._num_hidden
num_input = arr.size//b//h//m - (self._num_layers - 1)*(h+b*h+2) - h - 2
nargs = self._slice_weights(arr, num_input, self._num_hidden)
args.update({name: nd.copy() for name, nd in nargs.items()})
return args
def pack_weights(self, args):
args = args.copy()
b = self._bidirectional + 1
m = self._num_gates
c = self._gate_names
h = self._num_hidden
w0 = args['%sl0_i2h%s_weight'%(self._prefix, c[0])]
num_input = w0.shape[1]
total = (num_input+h+2)*h*m*b + (self._num_layers-1)*m*h*(h+b*h+2)*b
arr = ndarray.zeros((total,), ctx=w0.context, dtype=w0.dtype)
for name, nd in self._slice_weights(arr, num_input, h).items():
nd[:] = args.pop(name)
args[self._parameter.name] = arr
return args
def __call__(self, inputs, states):
raise NotImplementedError("FusedRNNCell cannot be stepped. Please use unroll")
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, axis = _normalize_sequence(length, inputs, layout, True)
if axis == 1:
warnings.warn("NTC layout detected. Consider using "
"TNC for FusedRNNCell for faster speed")
inputs = symbol.swapaxes(inputs, dim1=0, dim2=1)
else:
assert axis == 0, "Unsupported layout %s"%layout
if begin_state is None:
begin_state = self.begin_state()
states = begin_state
if self._mode == 'lstm':
states = {'state': states[0], 'state_cell': states[1]} # pylint: disable=redefined-variable-type
else:
states = {'state': states[0]}
rnn = symbol.RNN(data=inputs, parameters=self._parameter,
state_size=self._num_hidden, num_layers=self._num_layers,
bidirectional=self._bidirectional, p=self._dropout,
state_outputs=self._get_next_state,
mode=self._mode, name=self._prefix+'rnn',
**states)
attr = {'__layout__' : 'LNC'}
if not self._get_next_state:
outputs, states = rnn, []
elif self._mode == 'lstm':
rnn[1]._set_attr(**attr)
rnn[2]._set_attr(**attr)
outputs, states = rnn[0], [rnn[1], rnn[2]]
else:
rnn[1]._set_attr(**attr)
outputs, states = rnn[0], [rnn[1]]
if axis == 1:
outputs = symbol.swapaxes(outputs, dim1=0, dim2=1)
outputs, _ = _normalize_sequence(length, outputs, layout, merge_outputs)
return outputs, states
def unfuse(self):
"""Unfuse the fused RNN in to a stack of rnn cells.
Returns
-------
cell : SequentialRNNCell
unfused cell that can be used for stepping, and can run on CPU.
"""
stack = SequentialRNNCell()
get_cell = {'rnn_relu': lambda cell_prefix: RNNCell(self._num_hidden,
activation='relu',
prefix=cell_prefix),
'rnn_tanh': lambda cell_prefix: RNNCell(self._num_hidden,
activation='tanh',
prefix=cell_prefix),
'lstm': lambda cell_prefix: LSTMCell(self._num_hidden,
prefix=cell_prefix),
'gru': lambda cell_prefix: GRUCell(self._num_hidden,
prefix=cell_prefix)}[self._mode]
for i in range(self._num_layers):
if self._bidirectional:
stack.add(BidirectionalCell(
get_cell('%sl%d_'%(self._prefix, i)),
get_cell('%sr%d_'%(self._prefix, i)),
output_prefix='%sbi_l%d_'%(self._prefix, i)))
else:
stack.add(get_cell('%sl%d_'%(self._prefix, i)))
if self._dropout > 0 and i != self._num_layers - 1:
stack.add(DropoutCell(self._dropout, prefix='%s_dropout%d_'%(self._prefix, i)))
return stack
class SequentialRNNCell(BaseRNNCell):
"""Sequantially stacking multiple RNN cells.
Parameters
----------
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
"""
def __init__(self, params=None):
super(SequentialRNNCell, self).__init__(prefix='', params=params)
self._override_cell_params = params is not None
self._cells = []
def add(self, cell):
"""Append a cell into the stack.
Parameters
----------
cell : BaseRNNCell
The cell to be appended. During unroll, previous cell's output (or raw inputs if
no previous cell) is used as the input to this cell.
"""
self._cells.append(cell)
if self._override_cell_params:
assert cell._own_params, \
"Either specify params for SequentialRNNCell " \
"or child cells, not both."
cell.params._params.update(self.params._params)
self.params._params.update(cell.params._params)
@property
def state_info(self):
return _cells_state_info(self._cells)
def begin_state(self, **kwargs): # pylint: disable=arguments-differ
assert not self._modified, \
"After applying modifier cells (e.g. ZoneoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
return _cells_begin_state(self._cells, **kwargs)
def unpack_weights(self, args):
return _cells_unpack_weights(self._cells, args)
def pack_weights(self, args):
return _cells_pack_weights(self._cells, args)
def __call__(self, inputs, states):
self._counter += 1
next_states = []
p = 0
for cell in self._cells:
assert not isinstance(cell, BidirectionalCell)
n = len(cell.state_info)
state = states[p:p+n]
p += n
inputs, state = cell(inputs, state)
next_states.append(state)
return inputs, sum(next_states, [])
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
num_cells = len(self._cells)
if begin_state is None:
begin_state = self.begin_state()
p = 0
next_states = []
for i, cell in enumerate(self._cells):
n = len(cell.state_info)
states = begin_state[p:p+n]
p += n
inputs, states = cell.unroll(length, inputs=inputs, begin_state=states, layout=layout,
merge_outputs=None if i < num_cells-1 else merge_outputs)
next_states.extend(states)
return inputs, next_states
class DropoutCell(BaseRNNCell):
"""Apply dropout on input.
Parameters
----------
dropout : float
Percentage of elements to drop out, which
is 1 - percentage to retain.
prefix : str, default 'dropout_'
Prefix for names of layers
(this prefix is also used for names of weights if `params` is None
i.e. if `params` are being created and not reused)
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
"""
def __init__(self, dropout, prefix='dropout_', params=None):
super(DropoutCell, self).__init__(prefix, params)
assert isinstance(dropout, numeric_types), "dropout probability must be a number"
self.dropout = dropout
@property
def state_info(self):
return []
def __call__(self, inputs, states):
if self.dropout > 0:
inputs = symbol.Dropout(data=inputs, p=self.dropout)
return inputs, states
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, _ = _normalize_sequence(length, inputs, layout, merge_outputs)
if isinstance(inputs, symbol.Symbol):
return self(inputs, [])
else:
return super(DropoutCell, self).unroll(
length, inputs, begin_state=begin_state, layout=layout,
merge_outputs=merge_outputs)
class ModifierCell(BaseRNNCell):
"""Base class for modifier cells. A modifier
cell takes a base cell, apply modifications
on it (e.g. Zoneout), and returns a new cell.
After applying modifiers the base cell should
no longer be called directly. The modifer cell
should be used instead.
"""
def __init__(self, base_cell):
super(ModifierCell, self).__init__()
base_cell._modified = True
self.base_cell = base_cell
@property
def params(self):
self._own_params = False
return self.base_cell.params
@property
def state_info(self):
return self.base_cell.state_info
def begin_state(self, init_sym=symbol.zeros, **kwargs): # pylint: disable=arguments-differ
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
self.base_cell._modified = False
begin = self.base_cell.begin_state(init_sym, **kwargs)
self.base_cell._modified = True
return begin
def unpack_weights(self, args):
return self.base_cell.unpack_weights(args)
def pack_weights(self, args):
return self.base_cell.pack_weights(args)
def __call__(self, inputs, states):
raise NotImplementedError
class ZoneoutCell(ModifierCell):
"""Apply Zoneout on base cell.
Parameters
----------
base_cell : BaseRNNCell
Cell on whose states to perform zoneout.
zoneout_outputs : float, default 0.
Fraction of the output that gets dropped out during training time.
zoneout_states : float, default 0.
Fraction of the states that gets dropped out during training time.
"""
def __init__(self, base_cell, zoneout_outputs=0., zoneout_states=0.):
assert not isinstance(base_cell, FusedRNNCell), \
"FusedRNNCell doesn't support zoneout. " \
"Please unfuse first."
assert not isinstance(base_cell, BidirectionalCell), \
"BidirectionalCell doesn't support zoneout since it doesn't support step. " \
"Please add ZoneoutCell to the cells underneath instead."
assert not isinstance(base_cell, SequentialRNNCell) or not base_cell._bidirectional, \
"Bidirectional SequentialRNNCell doesn't support zoneout. " \
"Please add ZoneoutCell to the cells underneath instead."
super(ZoneoutCell, self).__init__(base_cell)
self.zoneout_outputs = zoneout_outputs
self.zoneout_states = zoneout_states
self.prev_output = None
def reset(self):
super(ZoneoutCell, self).reset()
self.prev_output = None
def __call__(self, inputs, states):
cell, p_outputs, p_states = self.base_cell, self.zoneout_outputs, self.zoneout_states
next_output, next_states = cell(inputs, states)
mask = lambda p, like: symbol.Dropout(symbol.ones_like(like), p=p)
prev_output = self.prev_output if self.prev_output is not None else symbol.zeros((0, 0))
output = (symbol.where(mask(p_outputs, next_output), next_output, prev_output)
if p_outputs != 0. else next_output)
states = ([symbol.where(mask(p_states, new_s), new_s, old_s) for new_s, old_s in
zip(next_states, states)] if p_states != 0. else next_states)
self.prev_output = output
return output, states
class ResidualCell(ModifierCell):
"""Adds residual connection as described in Wu et al, 2016
(https://arxiv.org/abs/1609.08144).
Output of the cell is output of the base cell plus input.
Parameters
----------
base_cell : BaseRNNCell
Cell on whose outputs to add residual connection.
"""
def __init__(self, base_cell):
super(ResidualCell, self).__init__(base_cell)
def __call__(self, inputs, states):
output, states = self.base_cell(inputs, states)
output = symbol.elemwise_add(output, inputs, name="%s_plus_residual" % output.name)
return output, states
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
self.base_cell._modified = False
outputs, states = self.base_cell.unroll(length, inputs=inputs, begin_state=begin_state,
layout=layout, merge_outputs=merge_outputs)
self.base_cell._modified = True
merge_outputs = isinstance(outputs, symbol.Symbol) if merge_outputs is None else \
merge_outputs
inputs, _ = _normalize_sequence(length, inputs, layout, merge_outputs)
if merge_outputs:
outputs = symbol.elemwise_add(outputs, inputs, name="%s_plus_residual" % outputs.name)
else:
outputs = [symbol.elemwise_add(output_sym, input_sym,
name="%s_plus_residual" % output_sym.name)
for output_sym, input_sym in zip(outputs, inputs)]
return outputs, states
class BidirectionalCell(BaseRNNCell):
"""Bidirectional RNN cell.
Parameters
----------
l_cell : BaseRNNCell
cell for forward unrolling
r_cell : BaseRNNCell
cell for backward unrolling
params : RNNParams, default None.
Container for weight sharing between cells.
A new RNNParams container is created if `params` is None.
output_prefix : str, default 'bi_'
prefix for name of output
"""
def __init__(self, l_cell, r_cell, params=None, output_prefix='bi_'):
super(BidirectionalCell, self).__init__('', params=params)
self._output_prefix = output_prefix
self._override_cell_params = params is not None
if self._override_cell_params:
assert l_cell._own_params and r_cell._own_params, \
"Either specify params for BidirectionalCell " \
"or child cells, not both."
l_cell.params._params.update(self.params._params)
r_cell.params._params.update(self.params._params)
self.params._params.update(l_cell.params._params)
self.params._params.update(r_cell.params._params)
self._cells = [l_cell, r_cell]
def unpack_weights(self, args):
return _cells_unpack_weights(self._cells, args)
def pack_weights(self, args):
return _cells_pack_weights(self._cells, args)
def __call__(self, inputs, states):
raise NotImplementedError("Bidirectional cannot be stepped. Please use unroll")
@property
def state_info(self):
return _cells_state_info(self._cells)
def begin_state(self, **kwargs): # pylint: disable=arguments-differ
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
return _cells_begin_state(self._cells, **kwargs)
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, axis = _normalize_sequence(length, inputs, layout, False)
if begin_state is None:
begin_state = self.begin_state()
states = begin_state
l_cell, r_cell = self._cells
l_outputs, l_states = l_cell.unroll(length, inputs=inputs,
begin_state=states[:len(l_cell.state_info)],
layout=layout, merge_outputs=merge_outputs)
r_outputs, r_states = r_cell.unroll(length,
inputs=list(reversed(inputs)),
begin_state=states[len(l_cell.state_info):],
layout=layout, merge_outputs=merge_outputs)
if merge_outputs is None:
merge_outputs = (isinstance(l_outputs, symbol.Symbol)
and isinstance(r_outputs, symbol.Symbol))
if not merge_outputs:
if isinstance(l_outputs, symbol.Symbol):
l_outputs = list(symbol.SliceChannel(l_outputs, axis=axis,
num_outputs=length, squeeze_axis=1))
if isinstance(r_outputs, symbol.Symbol):
r_outputs = list(symbol.SliceChannel(r_outputs, axis=axis,
num_outputs=length, squeeze_axis=1))
if merge_outputs:
l_outputs = [l_outputs]
r_outputs = [symbol.reverse(r_outputs, axis=axis)]
else:
r_outputs = list(reversed(r_outputs))
outputs = [symbol.Concat(l_o, r_o, dim=1+merge_outputs,
name=('%sout'%(self._output_prefix) if merge_outputs
else '%st%d'%(self._output_prefix, i)))
for i, l_o, r_o in
zip(range(len(l_outputs)), l_outputs, r_outputs)]
if merge_outputs:
outputs = outputs[0]
states = [l_states, r_states]
return outputs, states
class BaseConvRNNCell(BaseRNNCell):
"""Abstract base class for Convolutional RNN cells"""
def __init__(self, input_shape, num_hidden,
h2h_kernel, h2h_dilate,
i2h_kernel, i2h_stride,
i2h_pad, i2h_dilate,
i2h_weight_initializer, h2h_weight_initializer,
i2h_bias_initializer, h2h_bias_initializer,
activation, prefix='', params=None, conv_layout='NCHW'):
super(BaseConvRNNCell, self).__init__(prefix=prefix, params=params)
# Convolution setting
self._h2h_kernel = h2h_kernel
assert (self._h2h_kernel[0] % 2 == 1) and (self._h2h_kernel[1] % 2 == 1), \
"Only support odd number, get h2h_kernel= %s" % str(h2h_kernel)
self._h2h_pad = (h2h_dilate[0] * (h2h_kernel[0] - 1) // 2,
h2h_dilate[1] * (h2h_kernel[1] - 1) // 2)
self._h2h_dilate = h2h_dilate
self._i2h_kernel = i2h_kernel
self._i2h_stride = i2h_stride
self._i2h_pad = i2h_pad
self._i2h_dilate = i2h_dilate
self._num_hidden = num_hidden
self._input_shape = input_shape
self._conv_layout = conv_layout
self._activation = activation
# Infer state shape
data = symbol.Variable('data')
self._state_shape = symbol.Convolution(data=data,
num_filter=self._num_hidden,
kernel=self._i2h_kernel,
stride=self._i2h_stride,
pad=self._i2h_pad,
dilate=self._i2h_dilate,
layout=conv_layout)
self._state_shape = self._state_shape.infer_shape(data=input_shape)[1][0]
self._state_shape = (0, ) + self._state_shape[1:]
# Get params
self._iW = self.params.get('i2h_weight', init=i2h_weight_initializer)
self._hW = self.params.get('h2h_weight', init=h2h_weight_initializer)
self._iB = self.params.get('i2h_bias', init=i2h_bias_initializer)
self._hB = self.params.get('h2h_bias', init=h2h_bias_initializer)
@property
def _num_gates(self):
return len(self._gate_names)
@property
def state_info(self):
return [{'shape': self._state_shape, '__layout__': self._conv_layout},
{'shape': self._state_shape, '__layout__': self._conv_layout}]
def _conv_forward(self, inputs, states, name):
i2h = symbol.Convolution(name='%si2h'%name,
data=inputs,
num_filter=self._num_hidden*self._num_gates,
kernel=self._i2h_kernel,
stride=self._i2h_stride,
pad=self._i2h_pad,
dilate=self._i2h_dilate,
weight=self._iW,
bias=self._iB,
layout=self._conv_layout)
h2h = symbol.Convolution(name='%sh2h'%name,
data=states[0],
num_filter=self._num_hidden*self._num_gates,
kernel=self._h2h_kernel,
dilate=self._h2h_dilate,
pad=self._h2h_pad,
stride=(1, 1),
weight=self._hW,
bias=self._hB,
layout=self._conv_layout)
return i2h, h2h
def __call__(self, inputs, states):
raise NotImplementedError("BaseConvRNNCell is abstract class for convolutional RNN")
class ConvRNNCell(BaseConvRNNCell):
"""Convolutional RNN cells
Parameters
----------
input_shape : tuple of int
Shape of input in single timestep.
num_hidden : int
Number of units in output symbol.
h2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in state-to-state transitions.
h2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in state-to-state transitions.
i2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in input-to-state transitions.
i2h_stride : tuple of int, default (1, 1)
Stride of Convolution operator in input-to-state transitions.
i2h_pad : tuple of int, default (1, 1)
Pad of Convolution operator in input-to-state transitions.
i2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in input-to-state transitions.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the convolution
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the convolution
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
activation : str or Symbol,
default functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2)
Type of activation function.
prefix : str, default 'ConvRNN_'
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
conv_layout : str, , default 'NCHW'
Layout of ConvolutionOp
"""
def __init__(self, input_shape, num_hidden,
h2h_kernel=(3, 3), h2h_dilate=(1, 1),
i2h_kernel=(3, 3), i2h_stride=(1, 1),
i2h_pad=(1, 1), i2h_dilate=(1, 1),
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
activation=functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2),
prefix='ConvRNN_', params=None, conv_layout='NCHW'):
super(ConvRNNCell, self).__init__(input_shape=input_shape, num_hidden=num_hidden,
h2h_kernel=h2h_kernel, h2h_dilate=h2h_dilate,
i2h_kernel=i2h_kernel, i2h_stride=i2h_stride,
i2h_pad=i2h_pad, i2h_dilate=i2h_dilate,
i2h_weight_initializer=i2h_weight_initializer,
h2h_weight_initializer=h2h_weight_initializer,
i2h_bias_initializer=i2h_bias_initializer,
h2h_bias_initializer=h2h_bias_initializer,
activation=activation, prefix=prefix,
params=params, conv_layout=conv_layout)
@property
def _gate_names(self):
return ('',)
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h, h2h = self._conv_forward(inputs, states, name)
output = self._get_activation(i2h + h2h, self._activation,
name='%sout'%name)
return output, [output]
class ConvLSTMCell(BaseConvRNNCell):
"""Convolutional LSTM network cell.
Reference:
Xingjian et al. NIPS2015
Parameters
----------
input_shape : tuple of int
Shape of input in single timestep.
num_hidden : int
Number of units in output symbol.
h2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in state-to-state transitions.
h2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in state-to-state transitions.
i2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in input-to-state transitions.
i2h_stride : tuple of int, default (1, 1)
Stride of Convolution operator in input-to-state transitions.
i2h_pad : tuple of int, default (1, 1)
Pad of Convolution operator in input-to-state transitions.
i2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in input-to-state transitions.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the convolution
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the convolution
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
activation : str or Symbol
default functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2)
Type of activation function.
prefix : str, default 'ConvLSTM_'
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
conv_layout : str, , default 'NCHW'
Layout of ConvolutionOp
"""
def __init__(self, input_shape, num_hidden,
h2h_kernel=(3, 3), h2h_dilate=(1, 1),
i2h_kernel=(3, 3), i2h_stride=(1, 1),
i2h_pad=(1, 1), i2h_dilate=(1, 1),
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
activation=functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2),
prefix='ConvLSTM_', params=None,
conv_layout='NCHW'):
super(ConvLSTMCell, self).__init__(input_shape=input_shape, num_hidden=num_hidden,
h2h_kernel=h2h_kernel, h2h_dilate=h2h_dilate,
i2h_kernel=i2h_kernel, i2h_stride=i2h_stride,
i2h_pad=i2h_pad, i2h_dilate=i2h_dilate,
i2h_weight_initializer=i2h_weight_initializer,
h2h_weight_initializer=h2h_weight_initializer,
i2h_bias_initializer=i2h_bias_initializer,
h2h_bias_initializer=h2h_bias_initializer,
activation=activation, prefix=prefix,
params=params, conv_layout=conv_layout)
@property
def _gate_names(self):
return ['_i', '_f', '_c', '_o']
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h, h2h = self._conv_forward(inputs, states, name)
gates = i2h + h2h
slice_gates = symbol.SliceChannel(gates, num_outputs=4, axis=self._conv_layout.find('C'),
name="%sslice"%name)
in_gate = symbol.Activation(slice_gates[0], act_type="sigmoid",
name='%si'%name)
forget_gate = symbol.Activation(slice_gates[1], act_type="sigmoid",
name='%sf'%name)
in_transform = self._get_activation(slice_gates[2], self._activation,
name='%sc'%name)
out_gate = symbol.Activation(slice_gates[3], act_type="sigmoid",
name='%so'%name)
next_c = symbol._internal._plus(forget_gate * states[1], in_gate * in_transform,
name='%sstate'%name)
next_h = symbol._internal._mul(out_gate, self._get_activation(next_c, self._activation),
name='%sout'%name)
return next_h, [next_h, next_c]
class ConvGRUCell(BaseConvRNNCell):
"""Convolutional Gated Rectified Unit (GRU) network cell.
Parameters
----------
input_shape : tuple of int
Shape of input in single timestep.
num_hidden : int
Number of units in output symbol.
h2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in state-to-state transitions.
h2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in state-to-state transitions.
i2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in input-to-state transitions.
i2h_stride : tuple of int, default (1, 1)
Stride of Convolution operator in input-to-state transitions.
i2h_pad : tuple of int, default (1, 1)
Pad of Convolution operator in input-to-state transitions.
i2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in input-to-state transitions.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the convolution
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the convolution
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
activation : str or Symbol,
default functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2)
Type of activation function.
prefix : str, default 'ConvGRU_'
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
conv_layout : str, , default 'NCHW'
Layout of ConvolutionOp
"""
def __init__(self, input_shape, num_hidden,
h2h_kernel=(3, 3), h2h_dilate=(1, 1),
i2h_kernel=(3, 3), i2h_stride=(1, 1),
i2h_pad=(1, 1), i2h_dilate=(1, 1),
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
activation=functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2),
prefix='ConvGRU_', params=None, conv_layout='NCHW'):
super(ConvGRUCell, self).__init__(input_shape=input_shape, num_hidden=num_hidden,
h2h_kernel=h2h_kernel, h2h_dilate=h2h_dilate,
i2h_kernel=i2h_kernel, i2h_stride=i2h_stride,
i2h_pad=i2h_pad, i2h_dilate=i2h_dilate,
i2h_weight_initializer=i2h_weight_initializer,
h2h_weight_initializer=h2h_weight_initializer,
i2h_bias_initializer=i2h_bias_initializer,
h2h_bias_initializer=h2h_bias_initializer,
activation=activation, prefix=prefix,
params=params, conv_layout=conv_layout)
@property
def _gate_names(self):
return ['_r', '_z', '_o']
def __call__(self, inputs, states):
self._counter += 1
seq_idx = self._counter
name = '%st%d_' % (self._prefix, seq_idx)
i2h, h2h = self._conv_forward(inputs, states, name)
i2h_r, i2h_z, i2h = symbol.SliceChannel(i2h, num_outputs=3, name="%s_i2h_slice" % name)
h2h_r, h2h_z, h2h = symbol.SliceChannel(h2h, num_outputs=3, name="%s_h2h_slice" % name)
reset_gate = symbol.Activation(i2h_r + h2h_r, act_type="sigmoid",
name="%s_r_act" % name)
update_gate = symbol.Activation(i2h_z + h2h_z, act_type="sigmoid",
name="%s_z_act" % name)
next_h_tmp = self._get_activation(i2h + reset_gate * h2h, self._activation,
name="%s_h_act" % name)
next_h = symbol._internal._plus((1. - update_gate) * next_h_tmp, update_gate * states[0],
name='%sout' % name)
return next_h, [next_h]
| 41.434201 | 108 | 0.585125 |
from __future__ import print_function
import warnings
import functools
from .. import symbol, init, ndarray
from ..base import string_types, numeric_types
def _cells_state_shape(cells):
return sum([c.state_shape for c in cells], [])
def _cells_state_info(cells):
return sum([c.state_info for c in cells], [])
def _cells_begin_state(cells, **kwargs):
return sum([c.begin_state(**kwargs) for c in cells], [])
def _cells_unpack_weights(cells, args):
for cell in cells:
args = cell.unpack_weights(args)
return args
def _cells_pack_weights(cells, args):
for cell in cells:
args = cell.pack_weights(args)
return args
def _normalize_sequence(length, inputs, layout, merge, in_layout=None):
assert inputs is not None, \
"unroll(inputs=None) has been deprecated. " \
"Please create input variables outside unroll."
axis = layout.find('T')
in_axis = in_layout.find('T') if in_layout is not None else axis
if isinstance(inputs, symbol.Symbol):
if merge is False:
assert len(inputs.list_outputs()) == 1, \
"unroll doesn't allow grouped symbol as input. Please convert " \
"to list with list(inputs) first or let unroll handle splitting."
inputs = list(symbol.split(inputs, axis=in_axis, num_outputs=length,
squeeze_axis=1))
else:
assert length is None or len(inputs) == length
if merge is True:
inputs = [symbol.expand_dims(i, axis=axis) for i in inputs]
inputs = symbol.Concat(*inputs, dim=axis)
in_axis = axis
if isinstance(inputs, symbol.Symbol) and axis != in_axis:
inputs = symbol.swapaxes(inputs, dim0=axis, dim1=in_axis)
return inputs, axis
class RNNParams(object):
def __init__(self, prefix=''):
self._prefix = prefix
self._params = {}
def get(self, name, **kwargs):
name = self._prefix + name
if name not in self._params:
self._params[name] = symbol.Variable(name, **kwargs)
return self._params[name]
class BaseRNNCell(object):
def __init__(self, prefix='', params=None):
if params is None:
params = RNNParams(prefix)
self._own_params = True
else:
self._own_params = False
self._prefix = prefix
self._params = params
self._modified = False
self.reset()
def reset(self):
self._init_counter = -1
self._counter = -1
def __call__(self, inputs, states):
raise NotImplementedError()
@property
def params(self):
self._own_params = False
return self._params
@property
def state_info(self):
raise NotImplementedError()
@property
def state_shape(self):
return [ele['shape'] for ele in self.state_info]
@property
def _gate_names(self):
return ()
def begin_state(self, func=symbol.zeros, **kwargs):
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
states = []
for info in self.state_info:
self._init_counter += 1
if info is None:
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**kwargs)
else:
kwargs.update(info)
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**kwargs)
states.append(state)
return states
def unpack_weights(self, args):
args = args.copy()
if not self._gate_names:
return args
h = self._num_hidden
for group_name in ['i2h', 'h2h']:
weight = args.pop('%s%s_weight'%(self._prefix, group_name))
bias = args.pop('%s%s_bias' % (self._prefix, group_name))
for j, gate in enumerate(self._gate_names):
wname = '%s%s%s_weight' % (self._prefix, group_name, gate)
args[wname] = weight[j*h:(j+1)*h].copy()
bname = '%s%s%s_bias' % (self._prefix, group_name, gate)
args[bname] = bias[j*h:(j+1)*h].copy()
return args
def pack_weights(self, args):
args = args.copy()
if not self._gate_names:
return args
for group_name in ['i2h', 'h2h']:
weight = []
bias = []
for gate in self._gate_names:
wname = '%s%s%s_weight'%(self._prefix, group_name, gate)
weight.append(args.pop(wname))
bname = '%s%s%s_bias'%(self._prefix, group_name, gate)
bias.append(args.pop(bname))
args['%s%s_weight'%(self._prefix, group_name)] = ndarray.concatenate(weight)
args['%s%s_bias'%(self._prefix, group_name)] = ndarray.concatenate(bias)
return args
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, _ = _normalize_sequence(length, inputs, layout, False)
if begin_state is None:
begin_state = self.begin_state()
states = begin_state
outputs = []
for i in range(length):
output, states = self(inputs[i], states)
outputs.append(output)
outputs, _ = _normalize_sequence(length, outputs, layout, merge_outputs)
return outputs, states
#pylint: disable=no-self-use
def _get_activation(self, inputs, activation, **kwargs):
if isinstance(activation, string_types):
return symbol.Activation(inputs, act_type=activation, **kwargs)
else:
return activation(inputs, **kwargs)
class RNNCell(BaseRNNCell):
def __init__(self, num_hidden, activation='tanh', prefix='rnn_', params=None):
super(RNNCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
self._activation = activation
self._iW = self.params.get('i2h_weight')
self._iB = self.params.get('i2h_bias')
self._hW = self.params.get('h2h_weight')
self._hB = self.params.get('h2h_bias')
@property
def state_info(self):
return [{'shape': (0, self._num_hidden), '__layout__': 'NC'}]
@property
def _gate_names(self):
return ('',)
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h = symbol.FullyConnected(data=inputs, weight=self._iW, bias=self._iB,
num_hidden=self._num_hidden,
name='%si2h'%name)
h2h = symbol.FullyConnected(data=states[0], weight=self._hW, bias=self._hB,
num_hidden=self._num_hidden,
name='%sh2h'%name)
output = self._get_activation(i2h + h2h, self._activation,
name='%sout'%name)
return output, [output]
class LSTMCell(BaseRNNCell):
def __init__(self, num_hidden, prefix='lstm_', params=None, forget_bias=1.0):
super(LSTMCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
self._iW = self.params.get('i2h_weight')
self._hW = self.params.get('h2h_weight')
# we add the forget_bias to i2h_bias, this adds the bias to the forget gate activation
self._iB = self.params.get('i2h_bias', init=init.LSTMBias(forget_bias=forget_bias))
self._hB = self.params.get('h2h_bias')
@property
def state_info(self):
return [{'shape': (0, self._num_hidden), '__layout__': 'NC'},
{'shape': (0, self._num_hidden), '__layout__': 'NC'}]
@property
def _gate_names(self):
return ['_i', '_f', '_c', '_o']
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h = symbol.FullyConnected(data=inputs, weight=self._iW, bias=self._iB,
num_hidden=self._num_hidden*4,
name='%si2h'%name)
h2h = symbol.FullyConnected(data=states[0], weight=self._hW, bias=self._hB,
num_hidden=self._num_hidden*4,
name='%sh2h'%name)
gates = i2h + h2h
slice_gates = symbol.SliceChannel(gates, num_outputs=4,
name="%sslice"%name)
in_gate = symbol.Activation(slice_gates[0], act_type="sigmoid",
name='%si'%name)
forget_gate = symbol.Activation(slice_gates[1], act_type="sigmoid",
name='%sf'%name)
in_transform = symbol.Activation(slice_gates[2], act_type="tanh",
name='%sc'%name)
out_gate = symbol.Activation(slice_gates[3], act_type="sigmoid",
name='%so'%name)
next_c = symbol._internal._plus(forget_gate * states[1], in_gate * in_transform,
name='%sstate'%name)
next_h = symbol._internal._mul(out_gate, symbol.Activation(next_c, act_type="tanh"),
name='%sout'%name)
return next_h, [next_h, next_c]
class GRUCell(BaseRNNCell):
def __init__(self, num_hidden, prefix='gru_', params=None):
super(GRUCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
self._iW = self.params.get("i2h_weight")
self._iB = self.params.get("i2h_bias")
self._hW = self.params.get("h2h_weight")
self._hB = self.params.get("h2h_bias")
@property
def state_info(self):
return [{'shape': (0, self._num_hidden),
'__layout__': 'NC'}]
@property
def _gate_names(self):
return ['_r', '_z', '_o']
def __call__(self, inputs, states):
# pylint: disable=too-many-locals
self._counter += 1
seq_idx = self._counter
name = '%st%d_' % (self._prefix, seq_idx)
prev_state_h = states[0]
i2h = symbol.FullyConnected(data=inputs,
weight=self._iW,
bias=self._iB,
num_hidden=self._num_hidden * 3,
name="%s_i2h" % name)
h2h = symbol.FullyConnected(data=prev_state_h,
weight=self._hW,
bias=self._hB,
num_hidden=self._num_hidden * 3,
name="%s_h2h" % name)
i2h_r, i2h_z, i2h = symbol.SliceChannel(i2h, num_outputs=3, name="%s_i2h_slice" % name)
h2h_r, h2h_z, h2h = symbol.SliceChannel(h2h, num_outputs=3, name="%s_h2h_slice" % name)
reset_gate = symbol.Activation(i2h_r + h2h_r, act_type="sigmoid",
name="%s_r_act" % name)
update_gate = symbol.Activation(i2h_z + h2h_z, act_type="sigmoid",
name="%s_z_act" % name)
next_h_tmp = symbol.Activation(i2h + reset_gate * h2h, act_type="tanh",
name="%s_h_act" % name)
next_h = symbol._internal._plus((1. - update_gate) * next_h_tmp, update_gate * prev_state_h,
name='%sout' % name)
return next_h, [next_h]
class FusedRNNCell(BaseRNNCell):
def __init__(self, num_hidden, num_layers=1, mode='lstm', bidirectional=False,
dropout=0., get_next_state=False, forget_bias=1.0,
prefix=None, params=None):
if prefix is None:
prefix = '%s_'%mode
super(FusedRNNCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
self._num_layers = num_layers
self._mode = mode
self._bidirectional = bidirectional
self._dropout = dropout
self._get_next_state = get_next_state
self._directions = ['l', 'r'] if bidirectional else ['l']
initializer = init.FusedRNN(None, num_hidden, num_layers, mode,
bidirectional, forget_bias)
self._parameter = self.params.get('parameters', init=initializer)
@property
def state_info(self):
b = self._bidirectional + 1
n = (self._mode == 'lstm') + 1
return [{'shape': (b*self._num_layers, 0, self._num_hidden), '__layout__': 'LNC'}
for _ in range(n)]
@property
def _gate_names(self):
return {'rnn_relu': [''],
'rnn_tanh': [''],
'lstm': ['_i', '_f', '_c', '_o'],
'gru': ['_r', '_z', '_o']}[self._mode]
@property
def _num_gates(self):
return len(self._gate_names)
def _slice_weights(self, arr, li, lh):
args = {}
gate_names = self._gate_names
directions = self._directions
b = len(directions)
p = 0
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_weight'%(self._prefix, direction, layer, gate)
if layer > 0:
size = b*lh*lh
args[name] = arr[p:p+size].reshape((lh, b*lh))
else:
size = li*lh
args[name] = arr[p:p+size].reshape((lh, li))
p += size
for gate in gate_names:
name = '%s%s%d_h2h%s_weight'%(self._prefix, direction, layer, gate)
size = lh**2
args[name] = arr[p:p+size].reshape((lh, lh))
p += size
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
for gate in gate_names:
name = '%s%s%d_h2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
assert p == arr.size, "Invalid parameters size for FusedRNNCell"
return args
def unpack_weights(self, args):
args = args.copy()
arr = args.pop(self._parameter.name)
b = len(self._directions)
m = self._num_gates
h = self._num_hidden
num_input = arr.size//b//h//m - (self._num_layers - 1)*(h+b*h+2) - h - 2
nargs = self._slice_weights(arr, num_input, self._num_hidden)
args.update({name: nd.copy() for name, nd in nargs.items()})
return args
def pack_weights(self, args):
args = args.copy()
b = self._bidirectional + 1
m = self._num_gates
c = self._gate_names
h = self._num_hidden
w0 = args['%sl0_i2h%s_weight'%(self._prefix, c[0])]
num_input = w0.shape[1]
total = (num_input+h+2)*h*m*b + (self._num_layers-1)*m*h*(h+b*h+2)*b
arr = ndarray.zeros((total,), ctx=w0.context, dtype=w0.dtype)
for name, nd in self._slice_weights(arr, num_input, h).items():
nd[:] = args.pop(name)
args[self._parameter.name] = arr
return args
def __call__(self, inputs, states):
raise NotImplementedError("FusedRNNCell cannot be stepped. Please use unroll")
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, axis = _normalize_sequence(length, inputs, layout, True)
if axis == 1:
warnings.warn("NTC layout detected. Consider using "
"TNC for FusedRNNCell for faster speed")
inputs = symbol.swapaxes(inputs, dim1=0, dim2=1)
else:
assert axis == 0, "Unsupported layout %s"%layout
if begin_state is None:
begin_state = self.begin_state()
states = begin_state
if self._mode == 'lstm':
states = {'state': states[0], 'state_cell': states[1]} # pylint: disable=redefined-variable-type
else:
states = {'state': states[0]}
rnn = symbol.RNN(data=inputs, parameters=self._parameter,
state_size=self._num_hidden, num_layers=self._num_layers,
bidirectional=self._bidirectional, p=self._dropout,
state_outputs=self._get_next_state,
mode=self._mode, name=self._prefix+'rnn',
**states)
attr = {'__layout__' : 'LNC'}
if not self._get_next_state:
outputs, states = rnn, []
elif self._mode == 'lstm':
rnn[1]._set_attr(**attr)
rnn[2]._set_attr(**attr)
outputs, states = rnn[0], [rnn[1], rnn[2]]
else:
rnn[1]._set_attr(**attr)
outputs, states = rnn[0], [rnn[1]]
if axis == 1:
outputs = symbol.swapaxes(outputs, dim1=0, dim2=1)
outputs, _ = _normalize_sequence(length, outputs, layout, merge_outputs)
return outputs, states
def unfuse(self):
stack = SequentialRNNCell()
get_cell = {'rnn_relu': lambda cell_prefix: RNNCell(self._num_hidden,
activation='relu',
prefix=cell_prefix),
'rnn_tanh': lambda cell_prefix: RNNCell(self._num_hidden,
activation='tanh',
prefix=cell_prefix),
'lstm': lambda cell_prefix: LSTMCell(self._num_hidden,
prefix=cell_prefix),
'gru': lambda cell_prefix: GRUCell(self._num_hidden,
prefix=cell_prefix)}[self._mode]
for i in range(self._num_layers):
if self._bidirectional:
stack.add(BidirectionalCell(
get_cell('%sl%d_'%(self._prefix, i)),
get_cell('%sr%d_'%(self._prefix, i)),
output_prefix='%sbi_l%d_'%(self._prefix, i)))
else:
stack.add(get_cell('%sl%d_'%(self._prefix, i)))
if self._dropout > 0 and i != self._num_layers - 1:
stack.add(DropoutCell(self._dropout, prefix='%s_dropout%d_'%(self._prefix, i)))
return stack
class SequentialRNNCell(BaseRNNCell):
def __init__(self, params=None):
super(SequentialRNNCell, self).__init__(prefix='', params=params)
self._override_cell_params = params is not None
self._cells = []
def add(self, cell):
self._cells.append(cell)
if self._override_cell_params:
assert cell._own_params, \
"Either specify params for SequentialRNNCell " \
"or child cells, not both."
cell.params._params.update(self.params._params)
self.params._params.update(cell.params._params)
@property
def state_info(self):
return _cells_state_info(self._cells)
def begin_state(self, **kwargs): # pylint: disable=arguments-differ
assert not self._modified, \
"After applying modifier cells (e.g. ZoneoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
return _cells_begin_state(self._cells, **kwargs)
def unpack_weights(self, args):
return _cells_unpack_weights(self._cells, args)
def pack_weights(self, args):
return _cells_pack_weights(self._cells, args)
def __call__(self, inputs, states):
self._counter += 1
next_states = []
p = 0
for cell in self._cells:
assert not isinstance(cell, BidirectionalCell)
n = len(cell.state_info)
state = states[p:p+n]
p += n
inputs, state = cell(inputs, state)
next_states.append(state)
return inputs, sum(next_states, [])
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
num_cells = len(self._cells)
if begin_state is None:
begin_state = self.begin_state()
p = 0
next_states = []
for i, cell in enumerate(self._cells):
n = len(cell.state_info)
states = begin_state[p:p+n]
p += n
inputs, states = cell.unroll(length, inputs=inputs, begin_state=states, layout=layout,
merge_outputs=None if i < num_cells-1 else merge_outputs)
next_states.extend(states)
return inputs, next_states
class DropoutCell(BaseRNNCell):
def __init__(self, dropout, prefix='dropout_', params=None):
super(DropoutCell, self).__init__(prefix, params)
assert isinstance(dropout, numeric_types), "dropout probability must be a number"
self.dropout = dropout
@property
def state_info(self):
return []
def __call__(self, inputs, states):
if self.dropout > 0:
inputs = symbol.Dropout(data=inputs, p=self.dropout)
return inputs, states
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, _ = _normalize_sequence(length, inputs, layout, merge_outputs)
if isinstance(inputs, symbol.Symbol):
return self(inputs, [])
else:
return super(DropoutCell, self).unroll(
length, inputs, begin_state=begin_state, layout=layout,
merge_outputs=merge_outputs)
class ModifierCell(BaseRNNCell):
def __init__(self, base_cell):
super(ModifierCell, self).__init__()
base_cell._modified = True
self.base_cell = base_cell
@property
def params(self):
self._own_params = False
return self.base_cell.params
@property
def state_info(self):
return self.base_cell.state_info
def begin_state(self, init_sym=symbol.zeros, **kwargs): # pylint: disable=arguments-differ
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
self.base_cell._modified = False
begin = self.base_cell.begin_state(init_sym, **kwargs)
self.base_cell._modified = True
return begin
def unpack_weights(self, args):
return self.base_cell.unpack_weights(args)
def pack_weights(self, args):
return self.base_cell.pack_weights(args)
def __call__(self, inputs, states):
raise NotImplementedError
class ZoneoutCell(ModifierCell):
def __init__(self, base_cell, zoneout_outputs=0., zoneout_states=0.):
assert not isinstance(base_cell, FusedRNNCell), \
"FusedRNNCell doesn't support zoneout. " \
"Please unfuse first."
assert not isinstance(base_cell, BidirectionalCell), \
"BidirectionalCell doesn't support zoneout since it doesn't support step. " \
"Please add ZoneoutCell to the cells underneath instead."
assert not isinstance(base_cell, SequentialRNNCell) or not base_cell._bidirectional, \
"Bidirectional SequentialRNNCell doesn't support zoneout. " \
"Please add ZoneoutCell to the cells underneath instead."
super(ZoneoutCell, self).__init__(base_cell)
self.zoneout_outputs = zoneout_outputs
self.zoneout_states = zoneout_states
self.prev_output = None
def reset(self):
super(ZoneoutCell, self).reset()
self.prev_output = None
def __call__(self, inputs, states):
cell, p_outputs, p_states = self.base_cell, self.zoneout_outputs, self.zoneout_states
next_output, next_states = cell(inputs, states)
mask = lambda p, like: symbol.Dropout(symbol.ones_like(like), p=p)
prev_output = self.prev_output if self.prev_output is not None else symbol.zeros((0, 0))
output = (symbol.where(mask(p_outputs, next_output), next_output, prev_output)
if p_outputs != 0. else next_output)
states = ([symbol.where(mask(p_states, new_s), new_s, old_s) for new_s, old_s in
zip(next_states, states)] if p_states != 0. else next_states)
self.prev_output = output
return output, states
class ResidualCell(ModifierCell):
def __init__(self, base_cell):
super(ResidualCell, self).__init__(base_cell)
def __call__(self, inputs, states):
output, states = self.base_cell(inputs, states)
output = symbol.elemwise_add(output, inputs, name="%s_plus_residual" % output.name)
return output, states
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
self.base_cell._modified = False
outputs, states = self.base_cell.unroll(length, inputs=inputs, begin_state=begin_state,
layout=layout, merge_outputs=merge_outputs)
self.base_cell._modified = True
merge_outputs = isinstance(outputs, symbol.Symbol) if merge_outputs is None else \
merge_outputs
inputs, _ = _normalize_sequence(length, inputs, layout, merge_outputs)
if merge_outputs:
outputs = symbol.elemwise_add(outputs, inputs, name="%s_plus_residual" % outputs.name)
else:
outputs = [symbol.elemwise_add(output_sym, input_sym,
name="%s_plus_residual" % output_sym.name)
for output_sym, input_sym in zip(outputs, inputs)]
return outputs, states
class BidirectionalCell(BaseRNNCell):
def __init__(self, l_cell, r_cell, params=None, output_prefix='bi_'):
super(BidirectionalCell, self).__init__('', params=params)
self._output_prefix = output_prefix
self._override_cell_params = params is not None
if self._override_cell_params:
assert l_cell._own_params and r_cell._own_params, \
"Either specify params for BidirectionalCell " \
"or child cells, not both."
l_cell.params._params.update(self.params._params)
r_cell.params._params.update(self.params._params)
self.params._params.update(l_cell.params._params)
self.params._params.update(r_cell.params._params)
self._cells = [l_cell, r_cell]
def unpack_weights(self, args):
return _cells_unpack_weights(self._cells, args)
def pack_weights(self, args):
return _cells_pack_weights(self._cells, args)
def __call__(self, inputs, states):
raise NotImplementedError("Bidirectional cannot be stepped. Please use unroll")
@property
def state_info(self):
return _cells_state_info(self._cells)
def begin_state(self, **kwargs): # pylint: disable=arguments-differ
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
return _cells_begin_state(self._cells, **kwargs)
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, axis = _normalize_sequence(length, inputs, layout, False)
if begin_state is None:
begin_state = self.begin_state()
states = begin_state
l_cell, r_cell = self._cells
l_outputs, l_states = l_cell.unroll(length, inputs=inputs,
begin_state=states[:len(l_cell.state_info)],
layout=layout, merge_outputs=merge_outputs)
r_outputs, r_states = r_cell.unroll(length,
inputs=list(reversed(inputs)),
begin_state=states[len(l_cell.state_info):],
layout=layout, merge_outputs=merge_outputs)
if merge_outputs is None:
merge_outputs = (isinstance(l_outputs, symbol.Symbol)
and isinstance(r_outputs, symbol.Symbol))
if not merge_outputs:
if isinstance(l_outputs, symbol.Symbol):
l_outputs = list(symbol.SliceChannel(l_outputs, axis=axis,
num_outputs=length, squeeze_axis=1))
if isinstance(r_outputs, symbol.Symbol):
r_outputs = list(symbol.SliceChannel(r_outputs, axis=axis,
num_outputs=length, squeeze_axis=1))
if merge_outputs:
l_outputs = [l_outputs]
r_outputs = [symbol.reverse(r_outputs, axis=axis)]
else:
r_outputs = list(reversed(r_outputs))
outputs = [symbol.Concat(l_o, r_o, dim=1+merge_outputs,
name=('%sout'%(self._output_prefix) if merge_outputs
else '%st%d'%(self._output_prefix, i)))
for i, l_o, r_o in
zip(range(len(l_outputs)), l_outputs, r_outputs)]
if merge_outputs:
outputs = outputs[0]
states = [l_states, r_states]
return outputs, states
class BaseConvRNNCell(BaseRNNCell):
def __init__(self, input_shape, num_hidden,
h2h_kernel, h2h_dilate,
i2h_kernel, i2h_stride,
i2h_pad, i2h_dilate,
i2h_weight_initializer, h2h_weight_initializer,
i2h_bias_initializer, h2h_bias_initializer,
activation, prefix='', params=None, conv_layout='NCHW'):
super(BaseConvRNNCell, self).__init__(prefix=prefix, params=params)
# Convolution setting
self._h2h_kernel = h2h_kernel
assert (self._h2h_kernel[0] % 2 == 1) and (self._h2h_kernel[1] % 2 == 1), \
"Only support odd number, get h2h_kernel= %s" % str(h2h_kernel)
self._h2h_pad = (h2h_dilate[0] * (h2h_kernel[0] - 1) // 2,
h2h_dilate[1] * (h2h_kernel[1] - 1) // 2)
self._h2h_dilate = h2h_dilate
self._i2h_kernel = i2h_kernel
self._i2h_stride = i2h_stride
self._i2h_pad = i2h_pad
self._i2h_dilate = i2h_dilate
self._num_hidden = num_hidden
self._input_shape = input_shape
self._conv_layout = conv_layout
self._activation = activation
# Infer state shape
data = symbol.Variable('data')
self._state_shape = symbol.Convolution(data=data,
num_filter=self._num_hidden,
kernel=self._i2h_kernel,
stride=self._i2h_stride,
pad=self._i2h_pad,
dilate=self._i2h_dilate,
layout=conv_layout)
self._state_shape = self._state_shape.infer_shape(data=input_shape)[1][0]
self._state_shape = (0, ) + self._state_shape[1:]
# Get params
self._iW = self.params.get('i2h_weight', init=i2h_weight_initializer)
self._hW = self.params.get('h2h_weight', init=h2h_weight_initializer)
self._iB = self.params.get('i2h_bias', init=i2h_bias_initializer)
self._hB = self.params.get('h2h_bias', init=h2h_bias_initializer)
@property
def _num_gates(self):
return len(self._gate_names)
@property
def state_info(self):
return [{'shape': self._state_shape, '__layout__': self._conv_layout},
{'shape': self._state_shape, '__layout__': self._conv_layout}]
def _conv_forward(self, inputs, states, name):
i2h = symbol.Convolution(name='%si2h'%name,
data=inputs,
num_filter=self._num_hidden*self._num_gates,
kernel=self._i2h_kernel,
stride=self._i2h_stride,
pad=self._i2h_pad,
dilate=self._i2h_dilate,
weight=self._iW,
bias=self._iB,
layout=self._conv_layout)
h2h = symbol.Convolution(name='%sh2h'%name,
data=states[0],
num_filter=self._num_hidden*self._num_gates,
kernel=self._h2h_kernel,
dilate=self._h2h_dilate,
pad=self._h2h_pad,
stride=(1, 1),
weight=self._hW,
bias=self._hB,
layout=self._conv_layout)
return i2h, h2h
def __call__(self, inputs, states):
raise NotImplementedError("BaseConvRNNCell is abstract class for convolutional RNN")
class ConvRNNCell(BaseConvRNNCell):
def __init__(self, input_shape, num_hidden,
h2h_kernel=(3, 3), h2h_dilate=(1, 1),
i2h_kernel=(3, 3), i2h_stride=(1, 1),
i2h_pad=(1, 1), i2h_dilate=(1, 1),
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
activation=functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2),
prefix='ConvRNN_', params=None, conv_layout='NCHW'):
super(ConvRNNCell, self).__init__(input_shape=input_shape, num_hidden=num_hidden,
h2h_kernel=h2h_kernel, h2h_dilate=h2h_dilate,
i2h_kernel=i2h_kernel, i2h_stride=i2h_stride,
i2h_pad=i2h_pad, i2h_dilate=i2h_dilate,
i2h_weight_initializer=i2h_weight_initializer,
h2h_weight_initializer=h2h_weight_initializer,
i2h_bias_initializer=i2h_bias_initializer,
h2h_bias_initializer=h2h_bias_initializer,
activation=activation, prefix=prefix,
params=params, conv_layout=conv_layout)
@property
def _gate_names(self):
return ('',)
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h, h2h = self._conv_forward(inputs, states, name)
output = self._get_activation(i2h + h2h, self._activation,
name='%sout'%name)
return output, [output]
class ConvLSTMCell(BaseConvRNNCell):
def __init__(self, input_shape, num_hidden,
h2h_kernel=(3, 3), h2h_dilate=(1, 1),
i2h_kernel=(3, 3), i2h_stride=(1, 1),
i2h_pad=(1, 1), i2h_dilate=(1, 1),
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
activation=functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2),
prefix='ConvLSTM_', params=None,
conv_layout='NCHW'):
super(ConvLSTMCell, self).__init__(input_shape=input_shape, num_hidden=num_hidden,
h2h_kernel=h2h_kernel, h2h_dilate=h2h_dilate,
i2h_kernel=i2h_kernel, i2h_stride=i2h_stride,
i2h_pad=i2h_pad, i2h_dilate=i2h_dilate,
i2h_weight_initializer=i2h_weight_initializer,
h2h_weight_initializer=h2h_weight_initializer,
i2h_bias_initializer=i2h_bias_initializer,
h2h_bias_initializer=h2h_bias_initializer,
activation=activation, prefix=prefix,
params=params, conv_layout=conv_layout)
@property
def _gate_names(self):
return ['_i', '_f', '_c', '_o']
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h, h2h = self._conv_forward(inputs, states, name)
gates = i2h + h2h
slice_gates = symbol.SliceChannel(gates, num_outputs=4, axis=self._conv_layout.find('C'),
name="%sslice"%name)
in_gate = symbol.Activation(slice_gates[0], act_type="sigmoid",
name='%si'%name)
forget_gate = symbol.Activation(slice_gates[1], act_type="sigmoid",
name='%sf'%name)
in_transform = self._get_activation(slice_gates[2], self._activation,
name='%sc'%name)
out_gate = symbol.Activation(slice_gates[3], act_type="sigmoid",
name='%so'%name)
next_c = symbol._internal._plus(forget_gate * states[1], in_gate * in_transform,
name='%sstate'%name)
next_h = symbol._internal._mul(out_gate, self._get_activation(next_c, self._activation),
name='%sout'%name)
return next_h, [next_h, next_c]
class ConvGRUCell(BaseConvRNNCell):
def __init__(self, input_shape, num_hidden,
h2h_kernel=(3, 3), h2h_dilate=(1, 1),
i2h_kernel=(3, 3), i2h_stride=(1, 1),
i2h_pad=(1, 1), i2h_dilate=(1, 1),
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
activation=functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2),
prefix='ConvGRU_', params=None, conv_layout='NCHW'):
super(ConvGRUCell, self).__init__(input_shape=input_shape, num_hidden=num_hidden,
h2h_kernel=h2h_kernel, h2h_dilate=h2h_dilate,
i2h_kernel=i2h_kernel, i2h_stride=i2h_stride,
i2h_pad=i2h_pad, i2h_dilate=i2h_dilate,
i2h_weight_initializer=i2h_weight_initializer,
h2h_weight_initializer=h2h_weight_initializer,
i2h_bias_initializer=i2h_bias_initializer,
h2h_bias_initializer=h2h_bias_initializer,
activation=activation, prefix=prefix,
params=params, conv_layout=conv_layout)
@property
def _gate_names(self):
return ['_r', '_z', '_o']
def __call__(self, inputs, states):
self._counter += 1
seq_idx = self._counter
name = '%st%d_' % (self._prefix, seq_idx)
i2h, h2h = self._conv_forward(inputs, states, name)
i2h_r, i2h_z, i2h = symbol.SliceChannel(i2h, num_outputs=3, name="%s_i2h_slice" % name)
h2h_r, h2h_z, h2h = symbol.SliceChannel(h2h, num_outputs=3, name="%s_h2h_slice" % name)
reset_gate = symbol.Activation(i2h_r + h2h_r, act_type="sigmoid",
name="%s_r_act" % name)
update_gate = symbol.Activation(i2h_z + h2h_z, act_type="sigmoid",
name="%s_z_act" % name)
next_h_tmp = self._get_activation(i2h + reset_gate * h2h, self._activation,
name="%s_h_act" % name)
next_h = symbol._internal._plus((1. - update_gate) * next_h_tmp, update_gate * states[0],
name='%sout' % name)
return next_h, [next_h]
| true | true |
1c345256310ad0ce2cab26a85861ef0ba051d2a1 | 60,137 | py | Python | nova/network/neutronv2/api.py | rgerganov/nova | 57e546699eae500bbc0733e3415a65b486cd88c2 | [
"Apache-2.0"
] | 1 | 2021-09-10T15:29:02.000Z | 2021-09-10T15:29:02.000Z | nova/network/neutronv2/api.py | rgerganov/nova | 57e546699eae500bbc0733e3415a65b486cd88c2 | [
"Apache-2.0"
] | null | null | null | nova/network/neutronv2/api.py | rgerganov/nova | 57e546699eae500bbc0733e3415a65b486cd88c2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
from neutronclient.common import exceptions as neutron_client_exc
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import conductor
from nova import exception
from nova.network import base_api
from nova.network import model as network_model
from nova.network import neutronv2
from nova.network.neutronv2 import constants
from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
neutron_opts = [
cfg.StrOpt('url',
default='http://127.0.0.1:9696',
help='URL for connecting to neutron',
deprecated_group='DEFAULT',
deprecated_name='neutron_url'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds',
deprecated_group='DEFAULT',
deprecated_name='neutron_url_timeout'),
cfg.StrOpt('admin_username',
help='Username for connecting to neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_username'),
cfg.StrOpt('admin_password',
help='Password for connecting to neutron in admin context',
secret=True,
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_password'),
cfg.StrOpt('admin_tenant_id',
help='Tenant id for connecting to neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_tenant_id'),
cfg.StrOpt('admin_tenant_name',
help='Tenant name for connecting to neutron in admin context. '
'This option is mutually exclusive with '
'admin_tenant_id. Note that with Keystone V3 '
'tenant names are only unique within a domain.',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_tenant_name'),
cfg.StrOpt('region_name',
help='Region name for connecting to neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_region_name'),
cfg.StrOpt('admin_auth_url',
default='http://localhost:5000/v2.0',
help='Authorization URL for connecting to neutron in admin '
'context',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_auth_url'),
cfg.BoolOpt('api_insecure',
default=False,
help='If set, ignore any SSL validation issues',
deprecated_group='DEFAULT',
deprecated_name='neutron_api_insecure'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Authorization strategy for connecting to '
'neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_auth_strategy'),
# TODO(berrange) temporary hack until Neutron can pass over the
# name of the OVS bridge it is configured with
cfg.StrOpt('ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch',
deprecated_group='DEFAULT',
deprecated_name='neutron_ovs_bridge'),
cfg.IntOpt('extension_sync_interval',
default=600,
help='Number of seconds before querying neutron for'
' extensions',
deprecated_group='DEFAULT',
deprecated_name='neutron_extension_sync_interval'),
cfg.StrOpt('ca_certificates_file',
help='Location of CA certificates file to use for '
'neutron client requests.',
deprecated_group='DEFAULT',
deprecated_name='neutron_ca_certificates_file'),
]
CONF = cfg.CONF
# neutron_opts options in the DEFAULT group were deprecated in Juno
CONF.register_opts(neutron_opts, 'neutron')
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('flat_injected', 'nova.network.manager')
LOG = logging.getLogger(__name__)
class API(base_api.NetworkAPI):
"""API for interacting with the neutron 2.x API."""
def __init__(self):
super(API, self).__init__()
self.last_neutron_extension_sync = None
self.extensions = {}
self.conductor_api = conductor.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures."""
def _get_available_networks(self, context, project_id,
net_ids=None, neutron=None):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
if not neutron:
neutron = neutronv2.get_client(context)
if net_ids:
# If user has specified to attach instance only to specific
# networks then only add these to **search_opts. This search will
# also include 'shared' networks.
search_opts = {'id': net_ids}
nets = neutron.list_networks(**search_opts).get('networks', [])
else:
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {'tenant_id': project_id, 'shared': False}
nets = neutron.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
nets += neutron.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
if not context.is_admin:
for net in nets:
# Perform this check here rather than in validate_networks to
# ensure the check is performed every time
# allocate_for_instance is invoked
if net.get('router:external'):
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
return nets
def _create_port(self, port_client, instance, network_id, port_req_body,
fixed_ip=None, security_group_ids=None,
available_macs=None, dhcp_opts=None):
"""Attempts to create a port for the instance on the given network.
:param port_client: The client to use to create the port.
:param instance: Create the port for the given instance.
:param network_id: Create the port on the given network.
:param port_req_body: Pre-populated port request. Should have the
device_id, device_owner, and any required neutron extension values.
:param fixed_ip: Optional fixed IP to use from the given network.
:param security_group_ids: Optional list of security group IDs to
apply to the port.
:param available_macs: Optional set of available MAC addresses to use.
:param dhcp_opts: Optional DHCP options.
:returns: ID of the created port.
:raises PortLimitExceeded: If neutron fails with an OverQuota error.
"""
try:
if fixed_ip:
port_req_body['port']['fixed_ips'] = [{'ip_address': fixed_ip}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
if security_group_ids:
port_req_body['port']['security_groups'] = security_group_ids
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance['uuid'])
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
if dhcp_opts is not None:
port_req_body['port']['extra_dhcp_opts'] = dhcp_opts
port_id = port_client.create_port(port_req_body)['port']['id']
LOG.debug('Successfully created port: %s', port_id,
instance=instance)
return port_id
except neutron_client_exc.NeutronClientException as e:
# NOTE(mriedem): OverQuota in neutron is a 409
if e.status_code == 409:
LOG.warning(_('Neutron error: quota exceeded'))
raise exception.PortLimitExceeded()
with excutils.save_and_reraise_exception():
LOG.exception(_('Neutron error creating port on network %s'),
network_id, instance=instance)
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocate network resources for the instance.
:param requested_networks: optional value containing
network_id, fixed_ip, and port_id
:param security_groups: security groups to allocate for instance
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
NB: NeutronV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
"""
hypervisor_macs = kwargs.get('macs', None)
available_macs = None
if hypervisor_macs is not None:
# Make a copy we can mutate: records macs that have not been used
# to create a port on a network. If we find a mac with a
# pre-allocated port we also remove it from this set.
available_macs = set(hypervisor_macs)
neutron = neutronv2.get_client(context)
LOG.debug('allocate_for_instance()', instance=instance)
if not instance['project_id']:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance['uuid'])
requested_networks = kwargs.get('requested_networks')
dhcp_opts = kwargs.get('dhcp_options', None)
ports = {}
fixed_ips = {}
net_ids = []
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
port = neutron.show_port(port_id)['port']
if port.get('device_id'):
raise exception.PortInUse(port_id=port_id)
if hypervisor_macs is not None:
if port['mac_address'] not in hypervisor_macs:
raise exception.PortNotUsable(port_id=port_id,
instance=instance['uuid'])
else:
# Don't try to use this MAC if we need to create a
# port on the fly later. Identical MACs may be
# configured by users into multiple ports so we
# discard rather than popping.
available_macs.discard(port['mac_address'])
network_id = port['network_id']
ports[network_id] = port
elif fixed_ip and network_id:
fixed_ips[network_id] = fixed_ip
if network_id:
net_ids.append(network_id)
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
if not nets:
LOG.warn(_("No network configured!"), instance=instance)
return network_model.NetworkInfo([])
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
search_opts = {'tenant_id': instance['project_id']}
user_security_groups = neutron.list_security_groups(
**search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
if name_match:
raise exception.NoUniqueMatch(
_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific.") %
security_group)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if not name_match and not uuid_match:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
elif name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
touched_port_ids = []
created_port_ids = []
ports_in_requested_order = []
for network in nets:
# If security groups are requested on an instance then the
# network must has a subnet associated with it. Some plugins
# implement the port-security extension which requires
# 'port_security_enabled' to be True for security groups.
# That is why True is returned if 'port_security_enabled'
# is not found.
if (security_groups and not (
network['subnets']
and network.get('port_security_enabled', True))):
raise exception.SecurityGroupCannotBeApplied()
network_id = network['id']
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
try:
port = ports.get(network_id)
self._populate_neutron_extension_values(context, instance,
port_req_body)
# Requires admin creds to set port bindings
port_client = (neutron if not
self._has_port_binding_extension(context) else
neutronv2.get_client(context, admin=True))
if port:
port_client.update_port(port['id'], port_req_body)
touched_port_ids.append(port['id'])
ports_in_requested_order.append(port['id'])
else:
created_port = self._create_port(
port_client, instance, network_id,
port_req_body, fixed_ips.get(network_id),
security_group_ids, available_macs, dhcp_opts)
created_port_ids.append(created_port)
ports_in_requested_order.append(created_port)
except Exception:
with excutils.save_and_reraise_exception():
for port_id in touched_port_ids:
try:
port_req_body = {'port': {'device_id': ''}}
# Requires admin creds to set port bindings
if self._has_port_binding_extension(context):
port_req_body['port']['binding:host_id'] = None
port_client = neutronv2.get_client(
context, admin=True)
else:
port_client = neutron
port_client.update_port(port_id, port_req_body)
except Exception:
msg = _("Failed to update port %s")
LOG.exception(msg, port_id)
for port_id in created_port_ids:
try:
neutron.delete_port(port_id)
except Exception:
msg = _("Failed to delete port %s")
LOG.exception(msg, port_id)
nw_info = self.get_instance_nw_info(context, instance, networks=nets,
port_ids=ports_in_requested_order)
# NOTE(danms): Only return info about ports we created in this run.
# In the initial allocation case, this will be everything we created,
# and in later runs will only be what was created that time. Thus,
# this only affects the attach case, not the original use for this
# method.
return network_model.NetworkInfo([port for port in nw_info
if port['id'] in created_port_ids +
touched_port_ids])
def _refresh_neutron_extensions_cache(self, context):
"""Refresh the neutron extensions cache when necessary."""
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync)
>= CONF.neutron.extension_sync_interval)):
neutron = neutronv2.get_client(context)
extensions_list = neutron.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
self.extensions = dict((ext['name'], ext)
for ext in extensions_list)
def _has_port_binding_extension(self, context, refresh_cache=False):
if refresh_cache:
self._refresh_neutron_extensions_cache(context)
return constants.PORTBINDING_EXT in self.extensions
def _populate_neutron_extension_values(self, context, instance,
port_req_body):
"""Populate neutron extension values for the instance.
If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor.
"""
self._refresh_neutron_extensions_cache(context)
if constants.QOS_QUEUE in self.extensions:
flavor = flavors.extract_flavor(instance)
rxtx_factor = flavor.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
if self._has_port_binding_extension(context):
port_req_body['port']['binding:host_id'] = instance.get('host')
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug('deallocate_for_instance()', instance=instance)
search_opts = {'device_id': instance['uuid']}
neutron = neutronv2.get_client(context)
data = neutron.list_ports(**search_opts)
ports = [port['id'] for port in data.get('ports', [])]
requested_networks = kwargs.get('requested_networks') or {}
ports_to_skip = [port_id for nets, fips, port_id in requested_networks]
ports = set(ports) - set(ports_to_skip)
# Reset device_id and device_owner for the ports that are skipped
for port in ports_to_skip:
port_req_body = {'port': {'device_id': '', 'device_owner': ''}}
try:
neutronv2.get_client(context).update_port(port,
port_req_body)
except Exception:
LOG.info(_('Unable to reset device ID for port %s'), port,
instance=instance)
for port in ports:
try:
neutron.delete_port(port)
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
LOG.warning(_("Port %s does not exist"), port)
else:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete neutron port %s"),
port)
# NOTE(arosen): This clears out the network_cache only if the instance
# hasn't already been deleted. This is needed when an instance fails to
# launch and is rescheduled onto another compute node. If the instance
# has already been deleted this call does nothing.
base_api.update_instance_cache_with_nw_info(self, context, instance,
network_model.NetworkInfo([]))
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
"""Allocate a port for the instance."""
return self.allocate_for_instance(context, instance,
requested_networks=[(network_id, requested_ip, port_id)])
def deallocate_port_for_instance(self, context, instance, port_id):
"""Remove a specified port from the instance.
Return network information for the instance
"""
try:
neutronv2.get_client(context).delete_port(port_id)
except Exception:
LOG.exception(_("Failed to delete neutron port %s") %
port_id)
return self.get_instance_nw_info(context, instance)
def list_ports(self, context, **search_opts):
"""List ports for the client based on search options."""
return neutronv2.get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
"""Return the port for the client given the port id."""
return neutronv2.get_client(context).show_port(port_id)
def get_instance_nw_info(self, context, instance, networks=None,
port_ids=None, use_slave=False):
"""Return network information for specified instance
and update cache.
"""
# NOTE(geekinutah): It would be nice if use_slave had us call
# special APIs that pummeled slaves instead of
# the master. For now we just ignore this arg.
result = self._get_instance_nw_info(context, instance, networks,
port_ids)
base_api.update_instance_cache_with_nw_info(self, context, instance,
result, update_cells=False)
return result
def _get_instance_nw_info(self, context, instance, networks=None,
port_ids=None):
# keep this caching-free version of the get_instance_nw_info method
# because it is used by the caching logic itself.
LOG.debug('get_instance_nw_info()', instance=instance)
nw_info = self._build_network_info_model(context, instance, networks,
port_ids)
return network_model.NetworkInfo.hydrate(nw_info)
def _gather_port_ids_and_networks(self, context, instance, networks=None,
port_ids=None):
"""Return an instance's complete list of port_ids and networks."""
if ((networks is None and port_ids is not None) or
(port_ids is None and networks is not None)):
message = ("This method needs to be called with either "
"networks=None and port_ids=None or port_ids and "
" networks as not none.")
raise exception.NovaException(message=message)
ifaces = compute_utils.get_nw_info_for_instance(instance)
# This code path is only done when refreshing the network_cache
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
if networks is None:
networks = self._get_available_networks(context,
instance['project_id'],
net_ids)
# an interface was added/removed from instance.
else:
# Since networks does not contain the existing networks on the
# instance we use their values from the cache and add it.
networks = networks + [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces]
# Include existing interfaces so they are not removed from the db.
port_ids = [iface['id'] for iface in ifaces] + port_ids
return networks, port_ids
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Add a fixed ip to the instance from specified network."""
search_opts = {'network_id': network_id}
data = neutronv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'network_id': network_id}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
neutronv2.get_client(context).update_port(p['id'],
port_req_body)
return self._get_instance_nw_info(context, instance)
except Exception as ex:
msg = _("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex})
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Remove a fixed ip from the instance."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
neutronv2.get_client(context).update_port(p['id'],
port_req_body)
except Exception as ex:
msg = _("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex})
return self._get_instance_nw_info(context, instance)
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance['uuid'], ip=address)
def validate_networks(self, context, requested_networks, num_instances):
"""Validate that the tenant can use the requested networks.
Return the number of instances than can be successfully allocated
with the requested network configuration.
"""
LOG.debug('validate_networks() for %s',
requested_networks)
neutron = neutronv2.get_client(context)
ports_needed_per_instance = 0
if not requested_networks:
nets = self._get_available_networks(context, context.project_id,
neutron=neutron)
if len(nets) > 1:
# Attaching to more than one network by default doesn't
# make sense, as the order will be arbitrary and the guest OS
# won't know which to configure
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
else:
ports_needed_per_instance = 1
else:
instance_on_net_ids = []
net_ids_requested = []
for (net_id, fixed_ip, port_id) in requested_networks:
if port_id:
try:
port = neutron.show_port(port_id).get('port')
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
port = None
else:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to access port %s"),
port_id)
if not port:
raise exception.PortNotFound(port_id=port_id)
if port.get('device_id', None):
raise exception.PortInUse(port_id=port_id)
if not port.get('fixed_ips'):
raise exception.PortRequiresFixedIP(port_id=port_id)
net_id = port['network_id']
else:
ports_needed_per_instance += 1
net_ids_requested.append(net_id)
# NOTE(jecarey) There is currently a race condition.
# That is, if you have more than one request for a specific
# fixed IP at the same time then only one will be allocated
# the ip. The fixed IP will be allocated to only one of the
# instances that will run. The second instance will fail on
# spawn. That instance will go into error state.
# TODO(jecarey) Need to address this race condition once we
# have the ability to update mac addresses in Neutron.
if fixed_ip:
# TODO(jecarey) Need to look at consolidating list_port
# calls once able to OR filters.
search_opts = {'network_id': net_id,
'fixed_ips': 'ip_address=%s' % fixed_ip,
'fields': 'device_id'}
existing_ports = neutron.list_ports(
**search_opts)['ports']
if existing_ports:
i_uuid = existing_ports[0]['device_id']
raise exception.FixedIpAlreadyInUse(
address=fixed_ip,
instance_uuid=i_uuid)
if net_id in instance_on_net_ids:
raise exception.NetworkDuplicated(network_id=net_id)
instance_on_net_ids.append(net_id)
# Now check to see if all requested networks exist
if net_ids_requested:
nets = self._get_available_networks(
context, context.project_id, net_ids_requested,
neutron=neutron)
for net in nets:
if not net.get('subnets'):
raise exception.NetworkRequiresSubnet(
network_uuid=net['id'])
if len(nets) != len(net_ids_requested):
requested_netid_set = set(net_ids_requested)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requested_netid_set - returned_netid_set
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
# Note(PhilD): Ideally Nova would create all required ports as part of
# network validation, but port creation requires some details
# from the hypervisor. So we just check the quota and return
# how many of the requested number of instances can be created
if ports_needed_per_instance:
ports = neutron.list_ports(tenant_id=context.project_id)['ports']
quotas = neutron.show_quota(tenant_id=context.project_id)['quota']
if quotas.get('port') == -1:
# Unlimited Port Quota
return num_instances
else:
free_ports = quotas.get('port') - len(ports)
ports_needed = ports_needed_per_instance * num_instances
if free_ports >= ports_needed:
return num_instances
else:
return free_ports // ports_needed_per_instance
return num_instances
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given ip address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Return a list of dicts in the form of
[{'instance_uuid': uuid}] that matched the ip filter.
"""
# filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.')
ip = filters.get('ip')
# we remove ^$\ in the ip filer
if ip[0] == '^':
ip = ip[1:]
if ip[-1] == '$':
ip = ip[:-1]
ip = ip.replace('\\.', '.')
return self._get_instance_uuids_by_ip(context, ip)
def _get_port_id_by_fixed_address(self, client,
instance, address):
"""Return port_id from a fixed address."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating ip with a fixed ip."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
client.update_floatingip(fip['id'], {'floatingip': param})
if fip['port_id']:
port = client.show_port(fip['port_id'])['port']
orig_instance_uuid = port['device_id']
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict)
orig_instance = self.db.instance_get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
def get_all(self, context):
"""Get all networks for client."""
client = neutronv2.get_client(context)
networks = client.list_networks().get('networks')
for network in networks:
network['label'] = network['name']
return networks
def get(self, context, network_uuid):
"""Get specific network for client."""
client = neutronv2.get_client(context)
network = client.show_network(network_uuid).get('network') or {}
network['label'] = network['name']
return network
def delete(self, context, network_uuid):
"""Delete a network for client."""
raise NotImplementedError()
def disassociate(self, context, network_uuid):
"""Disassociate a network for client."""
raise NotImplementedError()
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate a network for client."""
raise NotImplementedError()
def get_fixed_ip(self, context, id):
"""Get a fixed ip from the id."""
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
"""Return instance uuids given an address."""
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, client, port_id):
if not port_id:
return {}
port = client.show_port(port_id)['port']
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return dict([(i['id'], i) for i in pools])
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return dict([(p['id'], p) for p in ports])
def get_floating_ip(self, context, id):
"""Return floating ip object given the floating ip id."""
client = neutronv2.get_client(context)
try:
fip = client.show_floatingip(id)['floatingip']
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
raise exception.FloatingIpNotFound(id=id)
else:
with excutils.save_and_reraise_exception():
LOG.exception(_('Unable to access floating IP %s'), id)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {constants.NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
"""Return floating ip pools."""
client = neutronv2.get_client(context)
pools = self._get_floating_ip_pools(client)
return [{'name': n['name'] or n['id']} for n in pools]
def _format_floating_ip_model(self, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
result = {'id': fip['id'],
'address': fip['floating_ip_address'],
'pool': pool['name'] or pool['id'],
'project_id': fip['tenant_id'],
# In Neutron v2, an exact fixed_ip_id does not exist.
'fixed_ip_id': fip['port_id'],
}
# In Neutron v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
result['fixed_ip'] = {'address': fip['fixed_ip_address']}
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
result['instance'] = {'uuid': instance_uuid}
else:
result['instance'] = None
return result
def get_floating_ip_by_address(self, context, address):
"""Return a floating ip given an address."""
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = neutronv2.get_client(context)
project_id = context.project_id
fips = client.list_floatingips(tenant_id=project_id)['floatingips']
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._format_floating_ip_model(fip, pool_dict, port_dict)
for fip in fips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
raise NotImplementedError()
def get_instance_id_by_floating_address(self, context, address):
"""Return the instance id a floating ip's fixed ip is allocated to."""
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
port = client.show_port(fip['port_id'])['port']
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating ip to a project from a pool."""
client = neutronv2.get_client(context)
pool = pool or CONF.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
# TODO(amotoki): handle exception during create_floatingip()
# At this timing it is ensured that a network for pool exists.
# quota error may be returned.
param = {'floatingip': {'floating_network_id': pool_id}}
try:
fip = client.create_floatingip(param)
except (neutron_client_exc.IpAddressGenerationFailureClient,
neutron_client_exc.ExternalIpAddressExhaustedClient) as e:
raise exception.NoMoreFloatingIps(unicode(e))
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
"""Get floatingip from floating ip address."""
if not address:
raise exception.FloatingIpNotFoundForAddress(address=address)
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
"""Get floatingips from fixed ip and port."""
try:
data = client.list_floatingips(fixed_ip_address=fixed_ip,
port_id=port)
# If a neutron plugin does not implement the L3 API a 404 from
# list_floatingips will be raised.
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
return []
with excutils.save_and_reraise_exception():
LOG.exception(_('Unable to access floating IP %(fixed_ip)s '
'for port %(port_id)s'),
{'fixed_ip': fixed_ip, 'port_id': port})
return data['floatingips']
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating ip with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating ip from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
if not self._has_port_binding_extension(context, refresh_cache=True):
return
neutron = neutronv2.get_client(context, admin=True)
search_opts = {'device_id': instance['uuid'],
'tenant_id': instance['project_id']}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
port_req_body = {'port': {'binding:host_id':
migration['dest_compute']}}
try:
neutron.update_port(p['id'], port_req_body)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Unable to update host of port %s")
LOG.exception(msg, p['id'])
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force add a network to the project."""
raise NotImplementedError()
def _nw_info_get_ips(self, client, port):
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
return network_IPs
def _nw_info_get_subnets(self, context, port, network_IPs):
subnets = self._get_subnets_from_port(context, port)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
return subnets
def _nw_info_build_network(self, port, networks, subnets):
network_name = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
tenant_id = net['tenant_id']
break
else:
tenant_id = port['tenant_id']
LOG.warning(_("Network %(id)s not matched with the tenants "
"network! The ports tenant %(tenant_id)s will be "
"used."),
{'id': port['network_id'], 'tenant_id': tenant_id})
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
# TODO(berrange) Neutron should pass the bridge name
# in another binding metadata field
if vif_type == network_model.VIF_TYPE_OVS:
bridge = CONF.neutron.ovs_bridge
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = "brq" + port['network_id']
should_create_bridge = True
if bridge is not None:
bridge = bridge[:network_model.NIC_NAME_LEN]
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=tenant_id
)
network['subnets'] = subnets
port_profile = port.get('binding:profile')
if port_profile:
physical_network = port_profile.get('physical_network')
if physical_network:
network['physical_network'] = physical_network
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
return network, ovs_interfaceid
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None):
"""Return list of ordered VIFs attached to instance.
:param context - request context.
:param instance - instance we are returning network info for.
:param networks - List of networks being attached to an instance.
If value is None this value will be populated
from the existing cached value.
:param port_ids - List of port_ids that are being attached to an
instance in order of attachment. If value is None
this value will be populated from the existing
cached value.
"""
search_opts = {'tenant_id': instance['project_id'],
'device_id': instance['uuid'], }
client = neutronv2.get_client(context, admin=True)
data = client.list_ports(**search_opts)
current_neutron_ports = data.get('ports', [])
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids)
nw_info = network_model.NetworkInfo()
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
vif_active = False
if (current_neutron_port['admin_state_up'] is False
or current_neutron_port['status'] == 'ACTIVE'):
vif_active = True
network_IPs = self._nw_info_get_ips(client,
current_neutron_port)
subnets = self._nw_info_get_subnets(context,
current_neutron_port,
network_IPs)
devname = "tap" + current_neutron_port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network, ovs_interfaceid = (
self._nw_info_build_network(current_neutron_port,
networks, subnets))
nw_info.append(network_model.VIF(
id=current_neutron_port['id'],
address=current_neutron_port['mac_address'],
network=network,
type=current_neutron_port.get('binding:vif_type'),
details=current_neutron_port.get('binding:vif_details'),
ovs_interfaceid=ovs_interfaceid,
devname=devname,
active=vif_active))
return nw_info
def _get_subnets_from_port(self, context, port):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = neutronv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
# attempt to populate DHCP server field
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = neutronv2.get_client(context).list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
# TODO(gongysh) get the routes for this subnet
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
"""Return a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
| 46.36623 | 79 | 0.583767 |
import time
from neutronclient.common import exceptions as neutron_client_exc
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import conductor
from nova import exception
from nova.network import base_api
from nova.network import model as network_model
from nova.network import neutronv2
from nova.network.neutronv2 import constants
from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
neutron_opts = [
cfg.StrOpt('url',
default='http://127.0.0.1:9696',
help='URL for connecting to neutron',
deprecated_group='DEFAULT',
deprecated_name='neutron_url'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds',
deprecated_group='DEFAULT',
deprecated_name='neutron_url_timeout'),
cfg.StrOpt('admin_username',
help='Username for connecting to neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_username'),
cfg.StrOpt('admin_password',
help='Password for connecting to neutron in admin context',
secret=True,
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_password'),
cfg.StrOpt('admin_tenant_id',
help='Tenant id for connecting to neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_tenant_id'),
cfg.StrOpt('admin_tenant_name',
help='Tenant name for connecting to neutron in admin context. '
'This option is mutually exclusive with '
'admin_tenant_id. Note that with Keystone V3 '
'tenant names are only unique within a domain.',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_tenant_name'),
cfg.StrOpt('region_name',
help='Region name for connecting to neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_region_name'),
cfg.StrOpt('admin_auth_url',
default='http://localhost:5000/v2.0',
help='Authorization URL for connecting to neutron in admin '
'context',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_auth_url'),
cfg.BoolOpt('api_insecure',
default=False,
help='If set, ignore any SSL validation issues',
deprecated_group='DEFAULT',
deprecated_name='neutron_api_insecure'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Authorization strategy for connecting to '
'neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_auth_strategy'),
cfg.StrOpt('ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch',
deprecated_group='DEFAULT',
deprecated_name='neutron_ovs_bridge'),
cfg.IntOpt('extension_sync_interval',
default=600,
help='Number of seconds before querying neutron for'
' extensions',
deprecated_group='DEFAULT',
deprecated_name='neutron_extension_sync_interval'),
cfg.StrOpt('ca_certificates_file',
help='Location of CA certificates file to use for '
'neutron client requests.',
deprecated_group='DEFAULT',
deprecated_name='neutron_ca_certificates_file'),
]
CONF = cfg.CONF
CONF.register_opts(neutron_opts, 'neutron')
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('flat_injected', 'nova.network.manager')
LOG = logging.getLogger(__name__)
class API(base_api.NetworkAPI):
def __init__(self):
super(API, self).__init__()
self.last_neutron_extension_sync = None
self.extensions = {}
self.conductor_api = conductor.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
def _get_available_networks(self, context, project_id,
net_ids=None, neutron=None):
if not neutron:
neutron = neutronv2.get_client(context)
if net_ids:
search_opts = {'id': net_ids}
nets = neutron.list_networks(**search_opts).get('networks', [])
else:
search_opts = {'tenant_id': project_id, 'shared': False}
nets = neutron.list_networks(**search_opts).get('networks', [])
search_opts = {'shared': True}
nets += neutron.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
if not context.is_admin:
for net in nets:
if net.get('router:external'):
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
return nets
def _create_port(self, port_client, instance, network_id, port_req_body,
fixed_ip=None, security_group_ids=None,
available_macs=None, dhcp_opts=None):
try:
if fixed_ip:
port_req_body['port']['fixed_ips'] = [{'ip_address': fixed_ip}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
if security_group_ids:
port_req_body['port']['security_groups'] = security_group_ids
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance['uuid'])
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
if dhcp_opts is not None:
port_req_body['port']['extra_dhcp_opts'] = dhcp_opts
port_id = port_client.create_port(port_req_body)['port']['id']
LOG.debug('Successfully created port: %s', port_id,
instance=instance)
return port_id
except neutron_client_exc.NeutronClientException as e:
if e.status_code == 409:
LOG.warning(_('Neutron error: quota exceeded'))
raise exception.PortLimitExceeded()
with excutils.save_and_reraise_exception():
LOG.exception(_('Neutron error creating port on network %s'),
network_id, instance=instance)
def allocate_for_instance(self, context, instance, **kwargs):
hypervisor_macs = kwargs.get('macs', None)
available_macs = None
if hypervisor_macs is not None:
available_macs = set(hypervisor_macs)
neutron = neutronv2.get_client(context)
LOG.debug('allocate_for_instance()', instance=instance)
if not instance['project_id']:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance['uuid'])
requested_networks = kwargs.get('requested_networks')
dhcp_opts = kwargs.get('dhcp_options', None)
ports = {}
fixed_ips = {}
net_ids = []
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
port = neutron.show_port(port_id)['port']
if port.get('device_id'):
raise exception.PortInUse(port_id=port_id)
if hypervisor_macs is not None:
if port['mac_address'] not in hypervisor_macs:
raise exception.PortNotUsable(port_id=port_id,
instance=instance['uuid'])
else:
# port on the fly later. Identical MACs may be
# configured by users into multiple ports so we
# discard rather than popping.
available_macs.discard(port['mac_address'])
network_id = port['network_id']
ports[network_id] = port
elif fixed_ip and network_id:
fixed_ips[network_id] = fixed_ip
if network_id:
net_ids.append(network_id)
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
if not nets:
LOG.warn(_("No network configured!"), instance=instance)
return network_model.NetworkInfo([])
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
search_opts = {'tenant_id': instance['project_id']}
user_security_groups = neutron.list_security_groups(
**search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
if name_match:
raise exception.NoUniqueMatch(
_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific.") %
security_group)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if not name_match and not uuid_match:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
elif name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
touched_port_ids = []
created_port_ids = []
ports_in_requested_order = []
for network in nets:
if (security_groups and not (
network['subnets']
and network.get('port_security_enabled', True))):
raise exception.SecurityGroupCannotBeApplied()
network_id = network['id']
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
try:
port = ports.get(network_id)
self._populate_neutron_extension_values(context, instance,
port_req_body)
port_client = (neutron if not
self._has_port_binding_extension(context) else
neutronv2.get_client(context, admin=True))
if port:
port_client.update_port(port['id'], port_req_body)
touched_port_ids.append(port['id'])
ports_in_requested_order.append(port['id'])
else:
created_port = self._create_port(
port_client, instance, network_id,
port_req_body, fixed_ips.get(network_id),
security_group_ids, available_macs, dhcp_opts)
created_port_ids.append(created_port)
ports_in_requested_order.append(created_port)
except Exception:
with excutils.save_and_reraise_exception():
for port_id in touched_port_ids:
try:
port_req_body = {'port': {'device_id': ''}}
if self._has_port_binding_extension(context):
port_req_body['port']['binding:host_id'] = None
port_client = neutronv2.get_client(
context, admin=True)
else:
port_client = neutron
port_client.update_port(port_id, port_req_body)
except Exception:
msg = _("Failed to update port %s")
LOG.exception(msg, port_id)
for port_id in created_port_ids:
try:
neutron.delete_port(port_id)
except Exception:
msg = _("Failed to delete port %s")
LOG.exception(msg, port_id)
nw_info = self.get_instance_nw_info(context, instance, networks=nets,
port_ids=ports_in_requested_order)
return network_model.NetworkInfo([port for port in nw_info
if port['id'] in created_port_ids +
touched_port_ids])
def _refresh_neutron_extensions_cache(self, context):
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync)
>= CONF.neutron.extension_sync_interval)):
neutron = neutronv2.get_client(context)
extensions_list = neutron.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
self.extensions = dict((ext['name'], ext)
for ext in extensions_list)
def _has_port_binding_extension(self, context, refresh_cache=False):
if refresh_cache:
self._refresh_neutron_extensions_cache(context)
return constants.PORTBINDING_EXT in self.extensions
def _populate_neutron_extension_values(self, context, instance,
port_req_body):
self._refresh_neutron_extensions_cache(context)
if constants.QOS_QUEUE in self.extensions:
flavor = flavors.extract_flavor(instance)
rxtx_factor = flavor.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
if self._has_port_binding_extension(context):
port_req_body['port']['binding:host_id'] = instance.get('host')
def deallocate_for_instance(self, context, instance, **kwargs):
LOG.debug('deallocate_for_instance()', instance=instance)
search_opts = {'device_id': instance['uuid']}
neutron = neutronv2.get_client(context)
data = neutron.list_ports(**search_opts)
ports = [port['id'] for port in data.get('ports', [])]
requested_networks = kwargs.get('requested_networks') or {}
ports_to_skip = [port_id for nets, fips, port_id in requested_networks]
ports = set(ports) - set(ports_to_skip)
for port in ports_to_skip:
port_req_body = {'port': {'device_id': '', 'device_owner': ''}}
try:
neutronv2.get_client(context).update_port(port,
port_req_body)
except Exception:
LOG.info(_('Unable to reset device ID for port %s'), port,
instance=instance)
for port in ports:
try:
neutron.delete_port(port)
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
LOG.warning(_("Port %s does not exist"), port)
else:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete neutron port %s"),
port)
# launch and is rescheduled onto another compute node. If the instance
# has already been deleted this call does nothing.
base_api.update_instance_cache_with_nw_info(self, context, instance,
network_model.NetworkInfo([]))
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
return self.allocate_for_instance(context, instance,
requested_networks=[(network_id, requested_ip, port_id)])
def deallocate_port_for_instance(self, context, instance, port_id):
try:
neutronv2.get_client(context).delete_port(port_id)
except Exception:
LOG.exception(_("Failed to delete neutron port %s") %
port_id)
return self.get_instance_nw_info(context, instance)
def list_ports(self, context, **search_opts):
return neutronv2.get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
return neutronv2.get_client(context).show_port(port_id)
def get_instance_nw_info(self, context, instance, networks=None,
port_ids=None, use_slave=False):
# NOTE(geekinutah): It would be nice if use_slave had us call
# special APIs that pummeled slaves instead of
# the master. For now we just ignore this arg.
result = self._get_instance_nw_info(context, instance, networks,
port_ids)
base_api.update_instance_cache_with_nw_info(self, context, instance,
result, update_cells=False)
return result
def _get_instance_nw_info(self, context, instance, networks=None,
port_ids=None):
# keep this caching-free version of the get_instance_nw_info method
# because it is used by the caching logic itself.
LOG.debug('get_instance_nw_info()', instance=instance)
nw_info = self._build_network_info_model(context, instance, networks,
port_ids)
return network_model.NetworkInfo.hydrate(nw_info)
def _gather_port_ids_and_networks(self, context, instance, networks=None,
port_ids=None):
if ((networks is None and port_ids is not None) or
(port_ids is None and networks is not None)):
message = ("This method needs to be called with either "
"networks=None and port_ids=None or port_ids and "
" networks as not none.")
raise exception.NovaException(message=message)
ifaces = compute_utils.get_nw_info_for_instance(instance)
# This code path is only done when refreshing the network_cache
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
if networks is None:
networks = self._get_available_networks(context,
instance['project_id'],
net_ids)
# an interface was added/removed from instance.
else:
# Since networks does not contain the existing networks on the
# instance we use their values from the cache and add it.
networks = networks + [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces]
# Include existing interfaces so they are not removed from the db.
port_ids = [iface['id'] for iface in ifaces] + port_ids
return networks, port_ids
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
search_opts = {'network_id': network_id}
data = neutronv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'network_id': network_id}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
neutronv2.get_client(context).update_port(p['id'],
port_req_body)
return self._get_instance_nw_info(context, instance)
except Exception as ex:
msg = _("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex})
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
neutronv2.get_client(context).update_port(p['id'],
port_req_body)
except Exception as ex:
msg = _("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex})
return self._get_instance_nw_info(context, instance)
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance['uuid'], ip=address)
def validate_networks(self, context, requested_networks, num_instances):
LOG.debug('validate_networks() for %s',
requested_networks)
neutron = neutronv2.get_client(context)
ports_needed_per_instance = 0
if not requested_networks:
nets = self._get_available_networks(context, context.project_id,
neutron=neutron)
if len(nets) > 1:
# Attaching to more than one network by default doesn't
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
else:
ports_needed_per_instance = 1
else:
instance_on_net_ids = []
net_ids_requested = []
for (net_id, fixed_ip, port_id) in requested_networks:
if port_id:
try:
port = neutron.show_port(port_id).get('port')
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
port = None
else:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to access port %s"),
port_id)
if not port:
raise exception.PortNotFound(port_id=port_id)
if port.get('device_id', None):
raise exception.PortInUse(port_id=port_id)
if not port.get('fixed_ips'):
raise exception.PortRequiresFixedIP(port_id=port_id)
net_id = port['network_id']
else:
ports_needed_per_instance += 1
net_ids_requested.append(net_id)
# NOTE(jecarey) There is currently a race condition.
# That is, if you have more than one request for a specific
# fixed IP at the same time then only one will be allocated
# the ip. The fixed IP will be allocated to only one of the
# instances that will run. The second instance will fail on
# spawn. That instance will go into error state.
# TODO(jecarey) Need to address this race condition once we
# have the ability to update mac addresses in Neutron.
if fixed_ip:
# TODO(jecarey) Need to look at consolidating list_port
# calls once able to OR filters.
search_opts = {'network_id': net_id,
'fixed_ips': 'ip_address=%s' % fixed_ip,
'fields': 'device_id'}
existing_ports = neutron.list_ports(
**search_opts)['ports']
if existing_ports:
i_uuid = existing_ports[0]['device_id']
raise exception.FixedIpAlreadyInUse(
address=fixed_ip,
instance_uuid=i_uuid)
if net_id in instance_on_net_ids:
raise exception.NetworkDuplicated(network_id=net_id)
instance_on_net_ids.append(net_id)
# Now check to see if all requested networks exist
if net_ids_requested:
nets = self._get_available_networks(
context, context.project_id, net_ids_requested,
neutron=neutron)
for net in nets:
if not net.get('subnets'):
raise exception.NetworkRequiresSubnet(
network_uuid=net['id'])
if len(nets) != len(net_ids_requested):
requested_netid_set = set(net_ids_requested)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requested_netid_set - returned_netid_set
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
# Note(PhilD): Ideally Nova would create all required ports as part of
# network validation, but port creation requires some details
# from the hypervisor. So we just check the quota and return
# how many of the requested number of instances can be created
if ports_needed_per_instance:
ports = neutron.list_ports(tenant_id=context.project_id)['ports']
quotas = neutron.show_quota(tenant_id=context.project_id)['quota']
if quotas.get('port') == -1:
# Unlimited Port Quota
return num_instances
else:
free_ports = quotas.get('port') - len(ports)
ports_needed = ports_needed_per_instance * num_instances
if free_ports >= ports_needed:
return num_instances
else:
return free_ports // ports_needed_per_instance
return num_instances
def _get_instance_uuids_by_ip(self, context, address):
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def get_instance_uuids_by_ip_filter(self, context, filters):
# filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.')
ip = filters.get('ip')
# we remove ^$\ in the ip filer
if ip[0] == '^':
ip = ip[1:]
if ip[-1] == '$':
ip = ip[:-1]
ip = ip.replace('\\.', '.')
return self._get_instance_uuids_by_ip(context, ip)
def _get_port_id_by_fixed_address(self, client,
instance, address):
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
client.update_floatingip(fip['id'], {'floatingip': param})
if fip['port_id']:
port = client.show_port(fip['port_id'])['port']
orig_instance_uuid = port['device_id']
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict)
orig_instance = self.db.instance_get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
def get_all(self, context):
client = neutronv2.get_client(context)
networks = client.list_networks().get('networks')
for network in networks:
network['label'] = network['name']
return networks
def get(self, context, network_uuid):
client = neutronv2.get_client(context)
network = client.show_network(network_uuid).get('network') or {}
network['label'] = network['name']
return network
def delete(self, context, network_uuid):
raise NotImplementedError()
def disassociate(self, context, network_uuid):
raise NotImplementedError()
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
raise NotImplementedError()
def get_fixed_ip(self, context, id):
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, client, port_id):
if not port_id:
return {}
port = client.show_port(port_id)['port']
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return dict([(i['id'], i) for i in pools])
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return dict([(p['id'], p) for p in ports])
def get_floating_ip(self, context, id):
client = neutronv2.get_client(context)
try:
fip = client.show_floatingip(id)['floatingip']
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
raise exception.FloatingIpNotFound(id=id)
else:
with excutils.save_and_reraise_exception():
LOG.exception(_('Unable to access floating IP %s'), id)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {constants.NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
client = neutronv2.get_client(context)
pools = self._get_floating_ip_pools(client)
return [{'name': n['name'] or n['id']} for n in pools]
def _format_floating_ip_model(self, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
result = {'id': fip['id'],
'address': fip['floating_ip_address'],
'pool': pool['name'] or pool['id'],
'project_id': fip['tenant_id'],
# In Neutron v2, an exact fixed_ip_id does not exist.
'fixed_ip_id': fip['port_id'],
}
# In Neutron v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
result['fixed_ip'] = {'address': fip['fixed_ip_address']}
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
result['instance'] = {'uuid': instance_uuid}
else:
result['instance'] = None
return result
def get_floating_ip_by_address(self, context, address):
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = neutronv2.get_client(context)
project_id = context.project_id
fips = client.list_floatingips(tenant_id=project_id)['floatingips']
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._format_floating_ip_model(fip, pool_dict, port_dict)
for fip in fips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
raise NotImplementedError()
def get_instance_id_by_floating_address(self, context, address):
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
port = client.show_port(fip['port_id'])['port']
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
client = neutronv2.get_client(context)
pool = pool or CONF.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
# TODO(amotoki): handle exception during create_floatingip()
# At this timing it is ensured that a network for pool exists.
# quota error may be returned.
param = {'floatingip': {'floating_network_id': pool_id}}
try:
fip = client.create_floatingip(param)
except (neutron_client_exc.IpAddressGenerationFailureClient,
neutron_client_exc.ExternalIpAddressExhaustedClient) as e:
raise exception.NoMoreFloatingIps(unicode(e))
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
if not address:
raise exception.FloatingIpNotFoundForAddress(address=address)
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
try:
data = client.list_floatingips(fixed_ip_address=fixed_ip,
port_id=port)
# If a neutron plugin does not implement the L3 API a 404 from
# list_floatingips will be raised.
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
return []
with excutils.save_and_reraise_exception():
LOG.exception(_('Unable to access floating IP %(fixed_ip)s '
'for port %(port_id)s'),
{'fixed_ip': fixed_ip, 'port_id': port})
return data['floatingips']
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
# NOTE(wenjianhn): just pass to make migrate instance doesn't
pass
def migrate_instance_finish(self, context, instance, migration):
if not self._has_port_binding_extension(context, refresh_cache=True):
return
neutron = neutronv2.get_client(context, admin=True)
search_opts = {'device_id': instance['uuid'],
'tenant_id': instance['project_id']}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
port_req_body = {'port': {'binding:host_id':
migration['dest_compute']}}
try:
neutron.update_port(p['id'], port_req_body)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Unable to update host of port %s")
LOG.exception(msg, p['id'])
def add_network_to_project(self, context, project_id, network_uuid=None):
raise NotImplementedError()
def _nw_info_get_ips(self, client, port):
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
return network_IPs
def _nw_info_get_subnets(self, context, port, network_IPs):
subnets = self._get_subnets_from_port(context, port)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
return subnets
def _nw_info_build_network(self, port, networks, subnets):
network_name = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
tenant_id = net['tenant_id']
break
else:
tenant_id = port['tenant_id']
LOG.warning(_("Network %(id)s not matched with the tenants "
"network! The ports tenant %(tenant_id)s will be "
"used."),
{'id': port['network_id'], 'tenant_id': tenant_id})
bridge = None
ovs_interfaceid = None
should_create_bridge = None
vif_type = port.get('binding:vif_type')
if vif_type == network_model.VIF_TYPE_OVS:
bridge = CONF.neutron.ovs_bridge
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = "brq" + port['network_id']
should_create_bridge = True
if bridge is not None:
bridge = bridge[:network_model.NIC_NAME_LEN]
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=tenant_id
)
network['subnets'] = subnets
port_profile = port.get('binding:profile')
if port_profile:
physical_network = port_profile.get('physical_network')
if physical_network:
network['physical_network'] = physical_network
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
return network, ovs_interfaceid
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None):
search_opts = {'tenant_id': instance['project_id'],
'device_id': instance['uuid'], }
client = neutronv2.get_client(context, admin=True)
data = client.list_ports(**search_opts)
current_neutron_ports = data.get('ports', [])
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids)
nw_info = network_model.NetworkInfo()
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
vif_active = False
if (current_neutron_port['admin_state_up'] is False
or current_neutron_port['status'] == 'ACTIVE'):
vif_active = True
network_IPs = self._nw_info_get_ips(client,
current_neutron_port)
subnets = self._nw_info_get_subnets(context,
current_neutron_port,
network_IPs)
devname = "tap" + current_neutron_port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network, ovs_interfaceid = (
self._nw_info_build_network(current_neutron_port,
networks, subnets))
nw_info.append(network_model.VIF(
id=current_neutron_port['id'],
address=current_neutron_port['mac_address'],
network=network,
type=current_neutron_port.get('binding:vif_type'),
details=current_neutron_port.get('binding:vif_details'),
ovs_interfaceid=ovs_interfaceid,
devname=devname,
active=vif_active))
return nw_info
def _get_subnets_from_port(self, context, port):
fixed_ips = port['fixed_ips']
if not fixed_ips:
return []
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = neutronv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = neutronv2.get_client(context).list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
raise NotImplementedError()
def _ensure_requested_network_ordering(accessor, unordered, preferred):
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
| true | true |
1c3452a3959b666a053422cb1b7c48ff627fee76 | 552 | py | Python | django_nginx_proxy/django_nginx_proxy/images/views.py | ghjan/blog-projects | aa6925724e457bec276d98cf7b55b5cdaf2ab5f4 | [
"MIT"
] | 66 | 2017-11-18T06:41:39.000Z | 2021-09-02T15:47:08.000Z | django_nginx_proxy/django_nginx_proxy/images/views.py | ghjan/blog-projects | aa6925724e457bec276d98cf7b55b5cdaf2ab5f4 | [
"MIT"
] | 2 | 2018-05-28T14:06:05.000Z | 2020-03-21T14:05:07.000Z | django_nginx_proxy/django_nginx_proxy/images/views.py | ghjan/blog-projects | aa6925724e457bec276d98cf7b55b5cdaf2ab5f4 | [
"MIT"
] | 35 | 2017-11-05T23:48:15.000Z | 2021-09-15T12:15:39.000Z | from django.http import HttpResponse
from rest_framework import viewsets
from .models import Image
from .serializers import ImageSerializer
class ImagesViewSet(viewsets.ModelViewSet):
queryset = Image.objects.all()
serializer_class = ImageSerializer
def download_image_view(request, image_id):
image = Image.objects.get(id=image_id)
response = HttpResponse()
response['X-Accel-Redirect'] = image.image_file.url
response['Content-Disposition'] = 'attachment; filename="{}"'.format(image.image_file.name)
return response
| 29.052632 | 95 | 0.766304 | from django.http import HttpResponse
from rest_framework import viewsets
from .models import Image
from .serializers import ImageSerializer
class ImagesViewSet(viewsets.ModelViewSet):
queryset = Image.objects.all()
serializer_class = ImageSerializer
def download_image_view(request, image_id):
image = Image.objects.get(id=image_id)
response = HttpResponse()
response['X-Accel-Redirect'] = image.image_file.url
response['Content-Disposition'] = 'attachment; filename="{}"'.format(image.image_file.name)
return response
| true | true |
1c345372755538992940b68fec8448c5fa57ad4b | 5,664 | py | Python | src/Python/Rendering/Rotations.py | cvandijck/VTKExamples | b6bb89414522afc1467be8a1f0089a37d0c16883 | [
"Apache-2.0"
] | 309 | 2017-05-21T09:07:19.000Z | 2022-03-15T09:18:55.000Z | src/Python/Rendering/Rotations.py | yijianmingliu/VTKExamples | dc8aac47c4384f9a2de9facbdd1ab3249f62ec99 | [
"Apache-2.0"
] | 379 | 2017-05-21T09:06:43.000Z | 2021-03-29T20:30:50.000Z | src/Python/Rendering/Rotations.py | yijianmingliu/VTKExamples | dc8aac47c4384f9a2de9facbdd1ab3249f62ec99 | [
"Apache-2.0"
] | 170 | 2017-05-17T14:47:41.000Z | 2022-03-31T13:16:26.000Z | #!/usr/local/bin/python
import vtk
def main():
"""
To match the illustrations in VTKTextbook.pdf, use BkgColor as the background and
Wheat as the cow colour.
Also comment out the lines:
modelActor->GetProperty()->SetSpecular(.6);
modelActor->GetProperty()->SetSpecularPower(30);
and use cow.g as the input data.
"""
file_name, figure, book_color = get_program_parameters()
rotate(file_name, figure, book_color)
def rotate(file_name, figure, book_color):
""""
This is where we do the rotations.
"""
# Create renderer stuff
#
colors = vtk.vtkNamedColors()
# Set the background color.
colors.SetColor("BkgColor", [26, 51, 102, 255])
# colors.SetColor("BkgColor", [60, 93, 144, 255])
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Create the pipeline.
#
polyData = ReadPolyData(file_name)
modelMapper = vtk.vtkPolyDataMapper()
modelMapper.SetInputData(polyData)
modelActor = vtk.vtkActor()
modelActor.SetMapper(modelMapper)
if book_color:
modelActor.GetProperty().SetDiffuseColor(colors.GetColor3d("Wheat"))
else:
modelActor.GetProperty().SetDiffuseColor(colors.GetColor3d("Crimson"))
modelActor.GetProperty().SetSpecular(.6)
modelActor.GetProperty().SetSpecularPower(30)
modelAxesSource = vtk.vtkAxes()
modelAxesSource.SetScaleFactor(10)
modelAxesSource.SetOrigin(0, 0, 0)
modelAxesMapper = vtk.vtkPolyDataMapper()
modelAxesMapper.SetInputConnection(modelAxesSource.GetOutputPort())
modelAxes = vtk.vtkActor()
modelAxes.SetMapper(modelAxesMapper)
ren1.AddActor(modelAxes)
modelAxes.VisibilityOff()
# Add the actors to the renderer, set the background and size.
#
ren1.AddActor(modelActor)
if book_color:
ren1.SetBackground(colors.GetColor3d("BkgColor"))
else:
ren1.SetBackground(colors.GetColor3d("Silver"))
renWin.SetSize(640, 480)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(0)
ren1.GetActiveCamera().SetClippingRange(.1, 1000.0)
modelAxes.VisibilityOn()
renWin.Render()
renWin.Render()
if figure == 1:
RotateX(renWin, modelActor)
elif figure == 2:
RotateY(renWin, modelActor)
elif figure == 3:
RotateZ(renWin, modelActor)
else:
RotateXY(renWin, modelActor)
renWin.EraseOff()
iren.Start()
def get_program_parameters():
import argparse
description = 'Perform rotations about the X, Y, Z and X then Y axes.'
epilogue = '''
Perform rotations about the X, Y, Z and X then Y axes.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue)
parser.add_argument('filename', help='The file cow.obj.')
parser.add_argument('figure', default=0, type=int, nargs='?',
help='The particular rotation that you want to view.')
parser.add_argument('book_color', default=False, type=bool, nargs='?',
help='If True then the vtk textbook colors are used.')
args = parser.parse_args()
return args.filename, args.figure, args.book_color
def RotateX(renWin, actor):
actor.SetOrientation(0, 0, 0)
renWin.Render()
renWin.Render()
renWin.EraseOff()
for i in range(0, 6):
actor.RotateX(60)
renWin.Render()
renWin.Render()
renWin.EraseOn()
def RotateY(renWin, actor):
actor.SetOrientation(0, 0, 0)
renWin.Render()
renWin.EraseOff()
for i in range(0, 6):
actor.RotateY(60)
renWin.Render()
renWin.Render()
renWin.EraseOn()
def RotateZ(renWin, actor):
actor.SetOrientation(0, 0, 0)
renWin.Render()
renWin.EraseOff()
for i in range(0, 6):
actor.RotateZ(60)
renWin.Render()
renWin.Render()
renWin.EraseOn()
def RotateXY(renWin, actor):
actor.SetOrientation(0, 0, 0)
actor.RotateX(60)
renWin.Render()
renWin.Render()
renWin.EraseOff()
for i in range(0, 6):
actor.RotateY(60)
renWin.Render()
renWin.Render()
renWin.EraseOn()
def ReadPolyData(file_name):
import os
path, extension = os.path.splitext(file_name)
extension = extension.lower()
if extension == ".ply":
reader = vtk.vtkPLYReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtp":
reader = vtk.vtkXMLpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".obj":
reader = vtk.vtkOBJReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".stl":
reader = vtk.vtkSTLReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtk":
reader = vtk.vtkpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".g":
reader = vtk.vtkBYUReader()
reader.SetGeometryFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
else:
# Return a sphere if the extension is unknown.
source = vtk.vtkSphereSource()
source.Update()
poly_data = source.GetOutput()
return poly_data
if __name__ == '__main__':
main()
| 27.100478 | 89 | 0.640007 |
import vtk
def main():
file_name, figure, book_color = get_program_parameters()
rotate(file_name, figure, book_color)
def rotate(file_name, figure, book_color):
colors = vtk.vtkNamedColors()
colors.SetColor("BkgColor", [26, 51, 102, 255])
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
polyData = ReadPolyData(file_name)
modelMapper = vtk.vtkPolyDataMapper()
modelMapper.SetInputData(polyData)
modelActor = vtk.vtkActor()
modelActor.SetMapper(modelMapper)
if book_color:
modelActor.GetProperty().SetDiffuseColor(colors.GetColor3d("Wheat"))
else:
modelActor.GetProperty().SetDiffuseColor(colors.GetColor3d("Crimson"))
modelActor.GetProperty().SetSpecular(.6)
modelActor.GetProperty().SetSpecularPower(30)
modelAxesSource = vtk.vtkAxes()
modelAxesSource.SetScaleFactor(10)
modelAxesSource.SetOrigin(0, 0, 0)
modelAxesMapper = vtk.vtkPolyDataMapper()
modelAxesMapper.SetInputConnection(modelAxesSource.GetOutputPort())
modelAxes = vtk.vtkActor()
modelAxes.SetMapper(modelAxesMapper)
ren1.AddActor(modelAxes)
modelAxes.VisibilityOff()
ren1.AddActor(modelActor)
if book_color:
ren1.SetBackground(colors.GetColor3d("BkgColor"))
else:
ren1.SetBackground(colors.GetColor3d("Silver"))
renWin.SetSize(640, 480)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(0)
ren1.GetActiveCamera().SetClippingRange(.1, 1000.0)
modelAxes.VisibilityOn()
renWin.Render()
renWin.Render()
if figure == 1:
RotateX(renWin, modelActor)
elif figure == 2:
RotateY(renWin, modelActor)
elif figure == 3:
RotateZ(renWin, modelActor)
else:
RotateXY(renWin, modelActor)
renWin.EraseOff()
iren.Start()
def get_program_parameters():
import argparse
description = 'Perform rotations about the X, Y, Z and X then Y axes.'
epilogue = '''
Perform rotations about the X, Y, Z and X then Y axes.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue)
parser.add_argument('filename', help='The file cow.obj.')
parser.add_argument('figure', default=0, type=int, nargs='?',
help='The particular rotation that you want to view.')
parser.add_argument('book_color', default=False, type=bool, nargs='?',
help='If True then the vtk textbook colors are used.')
args = parser.parse_args()
return args.filename, args.figure, args.book_color
def RotateX(renWin, actor):
actor.SetOrientation(0, 0, 0)
renWin.Render()
renWin.Render()
renWin.EraseOff()
for i in range(0, 6):
actor.RotateX(60)
renWin.Render()
renWin.Render()
renWin.EraseOn()
def RotateY(renWin, actor):
actor.SetOrientation(0, 0, 0)
renWin.Render()
renWin.EraseOff()
for i in range(0, 6):
actor.RotateY(60)
renWin.Render()
renWin.Render()
renWin.EraseOn()
def RotateZ(renWin, actor):
actor.SetOrientation(0, 0, 0)
renWin.Render()
renWin.EraseOff()
for i in range(0, 6):
actor.RotateZ(60)
renWin.Render()
renWin.Render()
renWin.EraseOn()
def RotateXY(renWin, actor):
actor.SetOrientation(0, 0, 0)
actor.RotateX(60)
renWin.Render()
renWin.Render()
renWin.EraseOff()
for i in range(0, 6):
actor.RotateY(60)
renWin.Render()
renWin.Render()
renWin.EraseOn()
def ReadPolyData(file_name):
import os
path, extension = os.path.splitext(file_name)
extension = extension.lower()
if extension == ".ply":
reader = vtk.vtkPLYReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtp":
reader = vtk.vtkXMLpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".obj":
reader = vtk.vtkOBJReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".stl":
reader = vtk.vtkSTLReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtk":
reader = vtk.vtkpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".g":
reader = vtk.vtkBYUReader()
reader.SetGeometryFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
else:
source = vtk.vtkSphereSource()
source.Update()
poly_data = source.GetOutput()
return poly_data
if __name__ == '__main__':
main()
| true | true |
1c345500fafb45a8bdc24f42c1f4a6ff976133f4 | 125 | py | Python | Python/interview/qumulo/RangeQuery.py | darrencheng0817/AlgorithmLearning | aec1ddd0c51b619c1bae1e05f940d9ed587aa82f | [
"MIT"
] | 2 | 2015-12-02T06:44:01.000Z | 2016-05-04T21:40:54.000Z | Python/interview/qumulo/RangeQuery.py | darrencheng0817/AlgorithmLearning | aec1ddd0c51b619c1bae1e05f940d9ed587aa82f | [
"MIT"
] | null | null | null | Python/interview/qumulo/RangeQuery.py | darrencheng0817/AlgorithmLearning | aec1ddd0c51b619c1bae1e05f940d9ed587aa82f | [
"MIT"
] | null | null | null | '''
Created on 2016年2月3日
给一个stream的integer,然后要提供query返回当前所有的range,举例: 读了8, 6, 4, 7 这个时候query的结果是 4, 6-8
@author: Darren
'''
| 20.833333 | 79 | 0.744 | true | true | |
1c3455318e97efa1db73dd592fe94267ca56a011 | 341 | py | Python | GlobalDataset/scripts/runChi_ca.py | gehilley/GlobalSteepness | 62a1a5b66adb230d5bbbc004aa5d2c5b618a2fdd | [
"MIT"
] | 3 | 2019-09-19T00:04:27.000Z | 2020-02-17T16:17:55.000Z | GlobalDataset/scripts/runChi_ca.py | gehilley/GlobalSteepness | 62a1a5b66adb230d5bbbc004aa5d2c5b618a2fdd | [
"MIT"
] | null | null | null | GlobalDataset/scripts/runChi_ca.py | gehilley/GlobalSteepness | 62a1a5b66adb230d5bbbc004aa5d2c5b618a2fdd | [
"MIT"
] | 1 | 2020-12-17T07:35:23.000Z | 2020-12-17T07:35:23.000Z | from denudationRateAnalysis import create_chi_grid_for_geographic_prefix as create_chi
prefix = 'ca'
thetas = [0.4]
Ao = 1000000
basin_lengths = [200000, 400000]
create_chi(prefix, thetas, Ao, basin_lengths)
thetas = [0.5, 0.6]
Ao = 1000000
basin_lengths = [50000, 100000, 200000, 400000]
create_chi(prefix, thetas, Ao, basin_lengths)
| 21.3125 | 86 | 0.762463 | from denudationRateAnalysis import create_chi_grid_for_geographic_prefix as create_chi
prefix = 'ca'
thetas = [0.4]
Ao = 1000000
basin_lengths = [200000, 400000]
create_chi(prefix, thetas, Ao, basin_lengths)
thetas = [0.5, 0.6]
Ao = 1000000
basin_lengths = [50000, 100000, 200000, 400000]
create_chi(prefix, thetas, Ao, basin_lengths)
| true | true |
1c3455572835a396c5b9ce8dbc523e4c7cdab412 | 25,400 | py | Python | include/HydrusPaths.py | sorashi/hydrus | 0544a75d2117904b42e935d264ae35ded5cbf36a | [
"WTFPL"
] | null | null | null | include/HydrusPaths.py | sorashi/hydrus | 0544a75d2117904b42e935d264ae35ded5cbf36a | [
"WTFPL"
] | null | null | null | include/HydrusPaths.py | sorashi/hydrus | 0544a75d2117904b42e935d264ae35ded5cbf36a | [
"WTFPL"
] | null | null | null | import gc
from . import HydrusConstants as HC
from . import HydrusData
from . import HydrusExceptions
from . import HydrusGlobals as HG
from . import HydrusThreading
import os
import psutil
import re
import send2trash
import shlex
import shutil
import stat
import subprocess
import sys
import tempfile
import threading
import traceback
TEMP_PATH_LOCK = threading.Lock()
IN_USE_TEMP_PATHS = set()
def AddBaseDirToEnvPath():
# this is a thing to get mpv working, loading the dll/so from the base dir using ctypes
if 'PATH' in os.environ:
os.environ[ 'PATH' ] = HC.BASE_DIR + os.pathsep + os.environ[ 'PATH' ]
def AppendPathUntilNoConflicts( path ):
( path_absent_ext, ext ) = os.path.splitext( path )
good_path_absent_ext = path_absent_ext
i = 0
while os.path.exists( good_path_absent_ext + ext ):
good_path_absent_ext = path_absent_ext + '_' + str( i )
i += 1
return good_path_absent_ext + ext
def CleanUpTempPath( os_file_handle, temp_path ):
try:
os.close( os_file_handle )
except OSError:
gc.collect()
try:
os.close( os_file_handle )
except OSError:
HydrusData.Print( 'Could not close the temporary file ' + temp_path )
return
try:
os.remove( temp_path )
except OSError:
with TEMP_PATH_LOCK:
IN_USE_TEMP_PATHS.add( ( HydrusData.GetNow(), temp_path ) )
def CleanUpOldTempPaths():
with TEMP_PATH_LOCK:
data = list( IN_USE_TEMP_PATHS )
for row in data:
( time_failed, temp_path ) = row
if HydrusData.TimeHasPassed( time_failed + 60 ):
try:
os.remove( temp_path )
IN_USE_TEMP_PATHS.discard( row )
except OSError:
if HydrusData.TimeHasPassed( time_failed + 600 ):
IN_USE_TEMP_PATHS.discard( row )
def ConvertAbsPathToPortablePath( abs_path, base_dir_override = None ):
try:
if base_dir_override is None:
base_dir = HG.controller.GetDBDir()
else:
base_dir = base_dir_override
portable_path = os.path.relpath( abs_path, base_dir )
if portable_path.startswith( '..' ):
portable_path = abs_path
except:
portable_path = abs_path
if HC.PLATFORM_WINDOWS:
portable_path = portable_path.replace( '\\', '/' ) # store seps as /, to maintain multiplatform uniformity
return portable_path
def ConvertPortablePathToAbsPath( portable_path, base_dir_override = None ):
portable_path = os.path.normpath( portable_path ) # collapses .. stuff and converts / to \\ for windows only
if os.path.isabs( portable_path ):
abs_path = portable_path
else:
if base_dir_override is None:
base_dir = HG.controller.GetDBDir()
else:
base_dir = base_dir_override
abs_path = os.path.normpath( os.path.join( base_dir, portable_path ) )
if not HC.PLATFORM_WINDOWS and not os.path.exists( abs_path ):
abs_path = abs_path.replace( '\\', '/' )
return abs_path
def CopyAndMergeTree( source, dest ):
pauser = HydrusData.BigJobPauser()
MakeSureDirectoryExists( dest )
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
dest_root = root.replace( source, dest )
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory copy abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
ok = MirrorFile( source_path, dest_path )
if not ok:
num_errors += 1
def CopyFileLikeToFileLike( f_source, f_dest ):
for block in ReadFileLikeAsBlocks( f_source ): f_dest.write( block )
def DeletePath( path ):
if HG.file_report_mode:
HydrusData.ShowText( 'Deleting {}'.format( path ) )
HydrusData.ShowText( ''.join( traceback.format_stack() ) )
if os.path.exists( path ):
MakeFileWritable( path )
try:
if os.path.isdir( path ):
shutil.rmtree( path )
else:
os.remove( path )
except Exception as e:
if 'Error 32' in str( e ):
# file in use by another process
HydrusData.DebugPrint( 'Trying to delete ' + path + ' failed because it was in use by another process.' )
else:
HydrusData.ShowText( 'Trying to delete ' + path + ' caused the following error:' )
HydrusData.ShowException( e )
def DirectoryIsWritable( path ):
if not os.path.exists( path ):
return False
try:
t = tempfile.TemporaryFile( dir = path )
t.close()
return True
except:
return False
def FilterFreePaths( paths ):
free_paths = []
for path in paths:
HydrusThreading.CheckIfThreadShuttingDown()
if PathIsFree( path ):
free_paths.append( path )
return free_paths
def GetCurrentTempDir():
return tempfile.gettempdir()
def GetDefaultLaunchPath():
if HC.PLATFORM_WINDOWS:
return 'windows is called directly'
elif HC.PLATFORM_MACOS:
return 'open "%path%"'
elif HC.PLATFORM_LINUX:
return 'xdg-open "%path%"'
def GetDevice( path ):
path = path.lower()
try:
partition_infos = psutil.disk_partitions( all = True )
def sort_descending_mountpoint( partition_info ): # i.e. put '/home' before '/'
return - len( partition_info.mountpoint )
partition_infos.sort( key = sort_descending_mountpoint )
for partition_info in partition_infos:
if path.startswith( partition_info.mountpoint.lower() ):
return partition_info.device
except UnicodeDecodeError: # wew lad psutil on some russian lad's fun filesystem
return None
return None
def GetFreeSpace( path ):
disk_usage = psutil.disk_usage( path )
return disk_usage.free
def GetTempDir( dir = None ):
return tempfile.mkdtemp( prefix = 'hydrus', dir = dir )
def SetEnvTempDir( path ):
if os.path.exists( path ) and not os.path.isdir( path ):
raise Exception( 'The given temp directory, "{}", does not seem to be a directory!'.format( path ) )
try:
MakeSureDirectoryExists( path )
except Exception as e:
raise Exception( 'Could not create the temp dir: {}'.format( e ) )
if not DirectoryIsWritable( path ):
raise Exception( 'The given temp directory, "{}", does not seem to be writable-to!'.format( path ) )
for tmp_name in ( 'TMPDIR', 'TEMP', 'TMP' ):
if tmp_name in os.environ:
os.environ[ tmp_name ] = path
tempfile.tempdir = path
def GetTempPath( suffix = '', dir = None ):
return tempfile.mkstemp( suffix = suffix, prefix = 'hydrus', dir = dir )
def HasSpaceForDBTransaction( db_dir, num_bytes ):
if HG.no_db_temp_files:
space_needed = int( num_bytes * 1.1 )
approx_available_memory = psutil.virtual_memory().available * 4 / 5
if approx_available_memory < num_bytes:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' available memory, since you are running in no_db_temp_files mode, but you only seem to have ' + HydrusData.ToHumanBytes( approx_available_memory ) + '.' )
db_disk_free_space = GetFreeSpace( db_dir )
if db_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your db\'s partition, but you only seem to have ' + HydrusData.ToHumanBytes( db_disk_free_space ) + '.' )
else:
temp_dir = tempfile.gettempdir()
temp_disk_free_space = GetFreeSpace( temp_dir )
temp_and_db_on_same_device = GetDevice( temp_dir ) == GetDevice( db_dir )
if temp_and_db_on_same_device:
space_needed = int( num_bytes * 2.2 )
if temp_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your db\'s partition, which I think also holds your temporary path, but you only seem to have ' + HydrusData.ToHumanBytes( temp_disk_free_space ) + '.' )
else:
space_needed = int( num_bytes * 1.1 )
if temp_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your temporary path\'s partition, which I think is ' + temp_dir + ', but you only seem to have ' + HydrusData.ToHumanBytes( temp_disk_free_space ) + '.' )
db_disk_free_space = GetFreeSpace( db_dir )
if db_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your db\'s partition, but you only seem to have ' + HydrusData.ToHumanBytes( db_disk_free_space ) + '.' )
return ( True, 'You seem to have enough space!' )
def LaunchDirectory( path ):
def do_it():
if HC.PLATFORM_WINDOWS:
os.startfile( path )
else:
if HC.PLATFORM_MACOS:
cmd = [ 'open', path ]
elif HC.PLATFORM_LINUX:
cmd = [ 'xdg-open', path ]
# setsid call un-childs this new process
sbp_kwargs = HydrusData.GetSubprocessKWArgs()
process = subprocess.Popen( cmd, preexec_fn = os.setsid, **sbp_kwargs )
process.communicate()
thread = threading.Thread( target = do_it )
thread.daemon = True
thread.start()
def LaunchFile( path, launch_path = None ):
def do_it( launch_path ):
if HC.PLATFORM_WINDOWS and launch_path is None:
os.startfile( path )
else:
if launch_path is None:
launch_path = GetDefaultLaunchPath()
complete_launch_path = launch_path.replace( '%path%', path )
hide_terminal = False
if HC.PLATFORM_WINDOWS:
cmd = complete_launch_path
preexec_fn = None
else:
cmd = shlex.split( complete_launch_path )
# un-childs this new process
preexec_fn = os.setsid
if HG.subprocess_report_mode:
message = 'Attempting to launch ' + path + ' using command ' + repr( cmd ) + '.'
HydrusData.ShowText( message )
try:
sbp_kwargs = HydrusData.GetSubprocessKWArgs( hide_terminal = hide_terminal, text = True )
process = subprocess.Popen( cmd, preexec_fn = preexec_fn, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, **sbp_kwargs )
( stdout, stderr ) = process.communicate()
if HG.subprocess_report_mode:
if stdout is None and stderr is None:
HydrusData.ShowText( 'No stdout or stderr came back.' )
if stdout is not None:
HydrusData.ShowText( 'stdout: ' + repr( stdout ) )
if stderr is not None:
HydrusData.ShowText( 'stderr: ' + repr( stderr ) )
except Exception as e:
HydrusData.ShowText( 'Could not launch a file! Command used was:' + os.linesep + str( cmd ) )
HydrusData.ShowException( e )
thread = threading.Thread( target = do_it, args = ( launch_path, ) )
thread.daemon = True
thread.start()
def MakeSureDirectoryExists( path ):
os.makedirs( path, exist_ok = True )
def MakeFileWritable( path ):
if not os.path.exists( path ):
return
try:
stat_result = os.stat( path )
current_bits = stat_result.st_mode
if HC.PLATFORM_WINDOWS:
# this is actually the same value as S_IWUSR, but let's not try to second guess ourselves
desired_bits = stat.S_IREAD | stat.S_IWRITE
else:
# guarantee 644 for regular files m8
desired_bits = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if not ( desired_bits & current_bits ) == desired_bits:
os.chmod( path, current_bits | desired_bits )
except Exception as e:
HydrusData.Print( 'Wanted to add write permission to "{}", but had an error: {}'.format( path, str( e ) ) )
def MergeFile( source, dest ):
if not os.path.isdir( source ):
MakeFileWritable( source )
if PathsHaveSameSizeAndDate( source, dest ):
DeletePath( source )
else:
try:
# this overwrites on conflict without hassle
shutil.move( source, dest )
except Exception as e:
HydrusData.ShowText( 'Trying to move ' + source + ' to ' + dest + ' caused the following problem:' )
HydrusData.ShowException( e )
return False
return True
def MergeTree( source, dest, text_update_hook = None ):
pauser = HydrusData.BigJobPauser()
if not os.path.exists( dest ):
try:
shutil.move( source, dest )
except OSError:
# if there were read only files in source and this was partition to partition, the copy2 goes ok but the subsequent source unlink fails
# so, if it seems this has happened, let's just try a walking mergetree, which should be able to deal with these readonlies on a file-by-file basis
if os.path.exists( dest ):
MergeTree( source, dest, text_update_hook = text_update_hook )
else:
if len( os.listdir( dest ) ) == 0:
for filename in os.listdir( source ):
source_path = os.path.join( source, filename )
dest_path = os.path.join( dest, filename )
if not os.path.isdir( source_path ):
MakeFileWritable( source_path )
shutil.move( source_path, dest_path )
else:
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
if text_update_hook is not None:
text_update_hook( 'Copying ' + root + '.' )
dest_root = root.replace( source, dest )
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory move abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
ok = MergeFile( source_path, dest_path )
if not ok:
num_errors += 1
if num_errors == 0:
DeletePath( source )
def MirrorFile( source, dest ):
if not PathsHaveSameSizeAndDate( source, dest ):
try:
MakeFileWritable( dest )
# this overwrites on conflict without hassle
shutil.copy2( source, dest )
except Exception as e:
HydrusData.ShowText( 'Trying to copy ' + source + ' to ' + dest + ' caused the following problem:' )
HydrusData.ShowException( e )
return False
return True
def MirrorTree( source, dest, text_update_hook = None, is_cancelled_hook = None ):
pauser = HydrusData.BigJobPauser()
MakeSureDirectoryExists( dest )
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
if is_cancelled_hook is not None and is_cancelled_hook():
return
if text_update_hook is not None:
text_update_hook( 'Copying ' + root + '.' )
dest_root = root.replace( source, dest )
surplus_dest_paths = { os.path.join( dest_root, dest_filename ) for dest_filename in os.listdir( dest_root ) }
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
surplus_dest_paths.discard( dest_path )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory copy abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
surplus_dest_paths.discard( dest_path )
ok = MirrorFile( source_path, dest_path )
if not ok:
num_errors += 1
for dest_path in surplus_dest_paths:
pauser.Pause()
DeletePath( dest_path )
def OpenFileLocation( path ):
def do_it():
if HC.PLATFORM_WINDOWS:
cmd = [ 'explorer', '/select,', path ]
elif HC.PLATFORM_MACOS:
cmd = [ 'open', '-R', path ]
elif HC.PLATFORM_LINUX:
raise NotImplementedError( 'Linux cannot open file locations!' )
sbp_kwargs = HydrusData.GetSubprocessKWArgs( hide_terminal = False )
process = subprocess.Popen( cmd, **sbp_kwargs )
process.communicate()
thread = threading.Thread( target = do_it )
thread.daemon = True
thread.start()
def PathsHaveSameSizeAndDate( path1, path2 ):
if os.path.exists( path1 ) and os.path.exists( path2 ):
same_size = os.path.getsize( path1 ) == os.path.getsize( path2 )
same_modified_time = int( os.path.getmtime( path1 ) ) == int( os.path.getmtime( path2 ) )
if same_size and same_modified_time:
return True
return False
def PathIsFree( path ):
try:
stat_result = os.stat( path )
current_bits = stat_result.st_mode
if not current_bits & stat.S_IWRITE:
# read-only file, cannot do the rename check
return True
os.rename( path, path ) # rename a path to itself
return True
except OSError as e: # 'already in use by another process' or an odd filename too long error
HydrusData.Print( 'Already in use/inaccessible: ' + path )
return False
def ReadFileLikeAsBlocks( f ):
next_block = f.read( HC.READ_BLOCK_SIZE )
while len( next_block ) > 0:
yield next_block
next_block = f.read( HC.READ_BLOCK_SIZE )
def RecyclePath( path ):
if HG.file_report_mode:
HydrusData.ShowText( 'Recycling {}'.format( path ) )
HydrusData.ShowText( ''.join( traceback.format_stack() ) )
if os.path.exists( path ):
MakeFileWritable( path )
try:
send2trash.send2trash( path )
except:
HydrusData.Print( 'Trying to recycle ' + path + ' created this error:' )
HydrusData.DebugPrint( traceback.format_exc() )
HydrusData.Print( 'It has been fully deleted instead.' )
DeletePath( path )
def SanitizeFilename( filename ):
if HC.PLATFORM_WINDOWS:
# \, /, :, *, ?, ", <, >, |
filename = re.sub( r'\\|/|:|\*|\?|"|<|>|\|', '_', filename )
else:
filename = re.sub( '/', '_', filename )
return filename
| 27.021277 | 263 | 0.474685 | import gc
from . import HydrusConstants as HC
from . import HydrusData
from . import HydrusExceptions
from . import HydrusGlobals as HG
from . import HydrusThreading
import os
import psutil
import re
import send2trash
import shlex
import shutil
import stat
import subprocess
import sys
import tempfile
import threading
import traceback
TEMP_PATH_LOCK = threading.Lock()
IN_USE_TEMP_PATHS = set()
def AddBaseDirToEnvPath():
if 'PATH' in os.environ:
os.environ[ 'PATH' ] = HC.BASE_DIR + os.pathsep + os.environ[ 'PATH' ]
def AppendPathUntilNoConflicts( path ):
( path_absent_ext, ext ) = os.path.splitext( path )
good_path_absent_ext = path_absent_ext
i = 0
while os.path.exists( good_path_absent_ext + ext ):
good_path_absent_ext = path_absent_ext + '_' + str( i )
i += 1
return good_path_absent_ext + ext
def CleanUpTempPath( os_file_handle, temp_path ):
try:
os.close( os_file_handle )
except OSError:
gc.collect()
try:
os.close( os_file_handle )
except OSError:
HydrusData.Print( 'Could not close the temporary file ' + temp_path )
return
try:
os.remove( temp_path )
except OSError:
with TEMP_PATH_LOCK:
IN_USE_TEMP_PATHS.add( ( HydrusData.GetNow(), temp_path ) )
def CleanUpOldTempPaths():
with TEMP_PATH_LOCK:
data = list( IN_USE_TEMP_PATHS )
for row in data:
( time_failed, temp_path ) = row
if HydrusData.TimeHasPassed( time_failed + 60 ):
try:
os.remove( temp_path )
IN_USE_TEMP_PATHS.discard( row )
except OSError:
if HydrusData.TimeHasPassed( time_failed + 600 ):
IN_USE_TEMP_PATHS.discard( row )
def ConvertAbsPathToPortablePath( abs_path, base_dir_override = None ):
try:
if base_dir_override is None:
base_dir = HG.controller.GetDBDir()
else:
base_dir = base_dir_override
portable_path = os.path.relpath( abs_path, base_dir )
if portable_path.startswith( '..' ):
portable_path = abs_path
except:
portable_path = abs_path
if HC.PLATFORM_WINDOWS:
portable_path = portable_path.replace( '\\', '/' )
return portable_path
def ConvertPortablePathToAbsPath( portable_path, base_dir_override = None ):
portable_path = os.path.normpath( portable_path )
if os.path.isabs( portable_path ):
abs_path = portable_path
else:
if base_dir_override is None:
base_dir = HG.controller.GetDBDir()
else:
base_dir = base_dir_override
abs_path = os.path.normpath( os.path.join( base_dir, portable_path ) )
if not HC.PLATFORM_WINDOWS and not os.path.exists( abs_path ):
abs_path = abs_path.replace( '\\', '/' )
return abs_path
def CopyAndMergeTree( source, dest ):
pauser = HydrusData.BigJobPauser()
MakeSureDirectoryExists( dest )
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
dest_root = root.replace( source, dest )
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory copy abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
ok = MirrorFile( source_path, dest_path )
if not ok:
num_errors += 1
def CopyFileLikeToFileLike( f_source, f_dest ):
for block in ReadFileLikeAsBlocks( f_source ): f_dest.write( block )
def DeletePath( path ):
if HG.file_report_mode:
HydrusData.ShowText( 'Deleting {}'.format( path ) )
HydrusData.ShowText( ''.join( traceback.format_stack() ) )
if os.path.exists( path ):
MakeFileWritable( path )
try:
if os.path.isdir( path ):
shutil.rmtree( path )
else:
os.remove( path )
except Exception as e:
if 'Error 32' in str( e ):
HydrusData.DebugPrint( 'Trying to delete ' + path + ' failed because it was in use by another process.' )
else:
HydrusData.ShowText( 'Trying to delete ' + path + ' caused the following error:' )
HydrusData.ShowException( e )
def DirectoryIsWritable( path ):
if not os.path.exists( path ):
return False
try:
t = tempfile.TemporaryFile( dir = path )
t.close()
return True
except:
return False
def FilterFreePaths( paths ):
free_paths = []
for path in paths:
HydrusThreading.CheckIfThreadShuttingDown()
if PathIsFree( path ):
free_paths.append( path )
return free_paths
def GetCurrentTempDir():
return tempfile.gettempdir()
def GetDefaultLaunchPath():
if HC.PLATFORM_WINDOWS:
return 'windows is called directly'
elif HC.PLATFORM_MACOS:
return 'open "%path%"'
elif HC.PLATFORM_LINUX:
return 'xdg-open "%path%"'
def GetDevice( path ):
path = path.lower()
try:
partition_infos = psutil.disk_partitions( all = True )
def sort_descending_mountpoint( partition_info ):
return - len( partition_info.mountpoint )
partition_infos.sort( key = sort_descending_mountpoint )
for partition_info in partition_infos:
if path.startswith( partition_info.mountpoint.lower() ):
return partition_info.device
except UnicodeDecodeError:
return None
return None
def GetFreeSpace( path ):
disk_usage = psutil.disk_usage( path )
return disk_usage.free
def GetTempDir( dir = None ):
return tempfile.mkdtemp( prefix = 'hydrus', dir = dir )
def SetEnvTempDir( path ):
if os.path.exists( path ) and not os.path.isdir( path ):
raise Exception( 'The given temp directory, "{}", does not seem to be a directory!'.format( path ) )
try:
MakeSureDirectoryExists( path )
except Exception as e:
raise Exception( 'Could not create the temp dir: {}'.format( e ) )
if not DirectoryIsWritable( path ):
raise Exception( 'The given temp directory, "{}", does not seem to be writable-to!'.format( path ) )
for tmp_name in ( 'TMPDIR', 'TEMP', 'TMP' ):
if tmp_name in os.environ:
os.environ[ tmp_name ] = path
tempfile.tempdir = path
def GetTempPath( suffix = '', dir = None ):
return tempfile.mkstemp( suffix = suffix, prefix = 'hydrus', dir = dir )
def HasSpaceForDBTransaction( db_dir, num_bytes ):
if HG.no_db_temp_files:
space_needed = int( num_bytes * 1.1 )
approx_available_memory = psutil.virtual_memory().available * 4 / 5
if approx_available_memory < num_bytes:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' available memory, since you are running in no_db_temp_files mode, but you only seem to have ' + HydrusData.ToHumanBytes( approx_available_memory ) + '.' )
db_disk_free_space = GetFreeSpace( db_dir )
if db_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your db\'s partition, but you only seem to have ' + HydrusData.ToHumanBytes( db_disk_free_space ) + '.' )
else:
temp_dir = tempfile.gettempdir()
temp_disk_free_space = GetFreeSpace( temp_dir )
temp_and_db_on_same_device = GetDevice( temp_dir ) == GetDevice( db_dir )
if temp_and_db_on_same_device:
space_needed = int( num_bytes * 2.2 )
if temp_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your db\'s partition, which I think also holds your temporary path, but you only seem to have ' + HydrusData.ToHumanBytes( temp_disk_free_space ) + '.' )
else:
space_needed = int( num_bytes * 1.1 )
if temp_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your temporary path\'s partition, which I think is ' + temp_dir + ', but you only seem to have ' + HydrusData.ToHumanBytes( temp_disk_free_space ) + '.' )
db_disk_free_space = GetFreeSpace( db_dir )
if db_disk_free_space < space_needed:
return ( False, 'I believe you need about ' + HydrusData.ToHumanBytes( space_needed ) + ' on your db\'s partition, but you only seem to have ' + HydrusData.ToHumanBytes( db_disk_free_space ) + '.' )
return ( True, 'You seem to have enough space!' )
def LaunchDirectory( path ):
def do_it():
if HC.PLATFORM_WINDOWS:
os.startfile( path )
else:
if HC.PLATFORM_MACOS:
cmd = [ 'open', path ]
elif HC.PLATFORM_LINUX:
cmd = [ 'xdg-open', path ]
# setsid call un-childs this new process
sbp_kwargs = HydrusData.GetSubprocessKWArgs()
process = subprocess.Popen( cmd, preexec_fn = os.setsid, **sbp_kwargs )
process.communicate()
thread = threading.Thread( target = do_it )
thread.daemon = True
thread.start()
def LaunchFile( path, launch_path = None ):
def do_it( launch_path ):
if HC.PLATFORM_WINDOWS and launch_path is None:
os.startfile( path )
else:
if launch_path is None:
launch_path = GetDefaultLaunchPath()
complete_launch_path = launch_path.replace( '%path%', path )
hide_terminal = False
if HC.PLATFORM_WINDOWS:
cmd = complete_launch_path
preexec_fn = None
else:
cmd = shlex.split( complete_launch_path )
# un-childs this new process
preexec_fn = os.setsid
if HG.subprocess_report_mode:
message = 'Attempting to launch ' + path + ' using command ' + repr( cmd ) + '.'
HydrusData.ShowText( message )
try:
sbp_kwargs = HydrusData.GetSubprocessKWArgs( hide_terminal = hide_terminal, text = True )
process = subprocess.Popen( cmd, preexec_fn = preexec_fn, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, **sbp_kwargs )
( stdout, stderr ) = process.communicate()
if HG.subprocess_report_mode:
if stdout is None and stderr is None:
HydrusData.ShowText( 'No stdout or stderr came back.' )
if stdout is not None:
HydrusData.ShowText( 'stdout: ' + repr( stdout ) )
if stderr is not None:
HydrusData.ShowText( 'stderr: ' + repr( stderr ) )
except Exception as e:
HydrusData.ShowText( 'Could not launch a file! Command used was:' + os.linesep + str( cmd ) )
HydrusData.ShowException( e )
thread = threading.Thread( target = do_it, args = ( launch_path, ) )
thread.daemon = True
thread.start()
def MakeSureDirectoryExists( path ):
os.makedirs( path, exist_ok = True )
def MakeFileWritable( path ):
if not os.path.exists( path ):
return
try:
stat_result = os.stat( path )
current_bits = stat_result.st_mode
if HC.PLATFORM_WINDOWS:
# this is actually the same value as S_IWUSR, but let's not try to second guess ourselves
desired_bits = stat.S_IREAD | stat.S_IWRITE
else:
desired_bits = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if not ( desired_bits & current_bits ) == desired_bits:
os.chmod( path, current_bits | desired_bits )
except Exception as e:
HydrusData.Print( 'Wanted to add write permission to "{}", but had an error: {}'.format( path, str( e ) ) )
def MergeFile( source, dest ):
if not os.path.isdir( source ):
MakeFileWritable( source )
if PathsHaveSameSizeAndDate( source, dest ):
DeletePath( source )
else:
try:
shutil.move( source, dest )
except Exception as e:
HydrusData.ShowText( 'Trying to move ' + source + ' to ' + dest + ' caused the following problem:' )
HydrusData.ShowException( e )
return False
return True
def MergeTree( source, dest, text_update_hook = None ):
pauser = HydrusData.BigJobPauser()
if not os.path.exists( dest ):
try:
shutil.move( source, dest )
except OSError:
if os.path.exists( dest ):
MergeTree( source, dest, text_update_hook = text_update_hook )
else:
if len( os.listdir( dest ) ) == 0:
for filename in os.listdir( source ):
source_path = os.path.join( source, filename )
dest_path = os.path.join( dest, filename )
if not os.path.isdir( source_path ):
MakeFileWritable( source_path )
shutil.move( source_path, dest_path )
else:
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
if text_update_hook is not None:
text_update_hook( 'Copying ' + root + '.' )
dest_root = root.replace( source, dest )
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory move abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
ok = MergeFile( source_path, dest_path )
if not ok:
num_errors += 1
if num_errors == 0:
DeletePath( source )
def MirrorFile( source, dest ):
if not PathsHaveSameSizeAndDate( source, dest ):
try:
MakeFileWritable( dest )
# this overwrites on conflict without hassle
shutil.copy2( source, dest )
except Exception as e:
HydrusData.ShowText( 'Trying to copy ' + source + ' to ' + dest + ' caused the following problem:' )
HydrusData.ShowException( e )
return False
return True
def MirrorTree( source, dest, text_update_hook = None, is_cancelled_hook = None ):
pauser = HydrusData.BigJobPauser()
MakeSureDirectoryExists( dest )
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
if is_cancelled_hook is not None and is_cancelled_hook():
return
if text_update_hook is not None:
text_update_hook( 'Copying ' + root + '.' )
dest_root = root.replace( source, dest )
surplus_dest_paths = { os.path.join( dest_root, dest_filename ) for dest_filename in os.listdir( dest_root ) }
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
surplus_dest_paths.discard( dest_path )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory copy abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
surplus_dest_paths.discard( dest_path )
ok = MirrorFile( source_path, dest_path )
if not ok:
num_errors += 1
for dest_path in surplus_dest_paths:
pauser.Pause()
DeletePath( dest_path )
def OpenFileLocation( path ):
def do_it():
if HC.PLATFORM_WINDOWS:
cmd = [ 'explorer', '/select,', path ]
elif HC.PLATFORM_MACOS:
cmd = [ 'open', '-R', path ]
elif HC.PLATFORM_LINUX:
raise NotImplementedError( 'Linux cannot open file locations!' )
sbp_kwargs = HydrusData.GetSubprocessKWArgs( hide_terminal = False )
process = subprocess.Popen( cmd, **sbp_kwargs )
process.communicate()
thread = threading.Thread( target = do_it )
thread.daemon = True
thread.start()
def PathsHaveSameSizeAndDate( path1, path2 ):
if os.path.exists( path1 ) and os.path.exists( path2 ):
same_size = os.path.getsize( path1 ) == os.path.getsize( path2 )
same_modified_time = int( os.path.getmtime( path1 ) ) == int( os.path.getmtime( path2 ) )
if same_size and same_modified_time:
return True
return False
def PathIsFree( path ):
try:
stat_result = os.stat( path )
current_bits = stat_result.st_mode
if not current_bits & stat.S_IWRITE:
# read-only file, cannot do the rename check
return True
os.rename( path, path ) # rename a path to itself
return True
except OSError as e: # 'already in use by another process' or an odd filename too long error
HydrusData.Print( 'Already in use/inaccessible: ' + path )
return False
def ReadFileLikeAsBlocks( f ):
next_block = f.read( HC.READ_BLOCK_SIZE )
while len( next_block ) > 0:
yield next_block
next_block = f.read( HC.READ_BLOCK_SIZE )
def RecyclePath( path ):
if HG.file_report_mode:
HydrusData.ShowText( 'Recycling {}'.format( path ) )
HydrusData.ShowText( ''.join( traceback.format_stack() ) )
if os.path.exists( path ):
MakeFileWritable( path )
try:
send2trash.send2trash( path )
except:
HydrusData.Print( 'Trying to recycle ' + path + ' created this error:' )
HydrusData.DebugPrint( traceback.format_exc() )
HydrusData.Print( 'It has been fully deleted instead.' )
DeletePath( path )
def SanitizeFilename( filename ):
if HC.PLATFORM_WINDOWS:
# \, /, :, *, ?, ", <, >, |
filename = re.sub( r'\\|/|:|\*|\?|"|<|>|\|', '_', filename )
else:
filename = re.sub( '/', '_', filename )
return filename
| true | true |
1c3455ca9c971c916f4933e0da598e6c42614779 | 839 | py | Python | redis/komand_redis/connection/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | redis/komand_redis/connection/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | redis/komand_redis/connection/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
DB = "db"
HOST = "host"
PORT = "port"
class ConnectionSchema(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"db": {
"type": "integer",
"title": "Db",
"description": "Db to use usually (0-15)",
"default": 0,
"order": 3
},
"host": {
"type": "string",
"title": "Host",
"description": "Host, e.g. 10.4.4.4",
"order": 1
},
"port": {
"type": "integer",
"title": "Port",
"description": "Port",
"default": 6379,
"order": 2
}
},
"required": [
"host",
"port",
"db"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 17.122449 | 57 | 0.481526 |
import komand
import json
class Input:
DB = "db"
HOST = "host"
PORT = "port"
class ConnectionSchema(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"db": {
"type": "integer",
"title": "Db",
"description": "Db to use usually (0-15)",
"default": 0,
"order": 3
},
"host": {
"type": "string",
"title": "Host",
"description": "Host, e.g. 10.4.4.4",
"order": 1
},
"port": {
"type": "integer",
"title": "Port",
"description": "Port",
"default": 6379,
"order": 2
}
},
"required": [
"host",
"port",
"db"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| true | true |
1c3457250f101412380d1086885f23899c6583c6 | 3,076 | py | Python | tests/test_meta_info_in_datapipe_events.py | epoch8/datapipe | 2358e13f6699b44950dc87fda6136f34fa719094 | [
"BSD-3-Clause"
] | 2 | 2021-12-20T10:17:25.000Z | 2022-02-16T09:00:51.000Z | tests/test_meta_info_in_datapipe_events.py | epoch8/datapipe | 2358e13f6699b44950dc87fda6136f34fa719094 | [
"BSD-3-Clause"
] | 25 | 2021-12-18T21:19:19.000Z | 2022-03-30T18:53:22.000Z | tests/test_meta_info_in_datapipe_events.py | epoch8/datapipe | 2358e13f6699b44950dc87fda6136f34fa719094 | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
from sqlalchemy.sql.expression import select
from datapipe.run_config import RunConfig
from datapipe.store.database import TableStoreDB
from datapipe.datatable import DataStore
from datapipe.compute import Catalog, Pipeline,\
Table
from datapipe.core_steps import BatchTransform, BatchGenerate
from datapipe.compute import run_pipeline
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.sqltypes import Integer, JSON
TEST_SCHEMA = [
Column('pipeline_id', Integer(), primary_key=True),
Column('offer_id', Integer(), primary_key=True),
Column('test_field', JSON)
]
def generate_data():
df_data = [{
"pipeline_id": 1,
"offer_id": 1,
"test_field": {"a": 1}
}]
yield pd.DataFrame(data=df_data)
def update_data(df: pd.DataFrame) -> pd.DataFrame:
df["test_field"].apply(lambda x: {**x, "b": 2})
df.index = df.index.astype('str')
return df
def test_meta_info_in_datapipe_events(dbconn) -> None:
ds = DataStore(dbconn)
run_config = RunConfig(
filters={
"pipeline_id": 1
},
labels={
"pipeline_name": 'test_name',
"pipeline_id": 1
}
)
catalog = Catalog({
'test_generate': Table(
store=TableStoreDB(
dbconn,
'test_generate_data',
TEST_SCHEMA
)
),
'test_transform': Table(
store=TableStoreDB(
dbconn,
'test_transform_data',
TEST_SCHEMA
)
)
})
pipeline = Pipeline([
BatchGenerate(
generate_data,
outputs=["test_generate"],
),
BatchTransform(
update_data,
inputs=["test_generate"],
outputs=["test_transform"],
)
])
run_pipeline(ds, catalog, pipeline, run_config)
df_events = pd.read_sql_query(select(catalog.get_datatable(ds, 'test_generate').event_logger.events_table), dbconn.con)
assert df_events.loc[0]["event"] == {
"meta": {
"labels": {
"step_name": "generate_data",
"pipeline_name": "test_name",
"pipeline_id": 1,
},
"filters": {
"pipeline_id": 1,
}
},
"data": {
"table_name": "test_generate",
"added_count": 1,
"updated_count": 0,
"deleted_count": 0,
"processed_count": 1
}
}
assert df_events.loc[1]["event"] == {
"meta": {
"labels": {
"step_name": "update_data",
"pipeline_name": "test_name",
"pipeline_id": 1,
},
"filters": {
"pipeline_id": 1,
}
},
"data": {
"table_name": "test_transform",
"added_count": 1,
"updated_count": 0,
"deleted_count": 0,
"processed_count": 1
}
}
| 25.00813 | 123 | 0.525033 | import pandas as pd
from sqlalchemy.sql.expression import select
from datapipe.run_config import RunConfig
from datapipe.store.database import TableStoreDB
from datapipe.datatable import DataStore
from datapipe.compute import Catalog, Pipeline,\
Table
from datapipe.core_steps import BatchTransform, BatchGenerate
from datapipe.compute import run_pipeline
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.sqltypes import Integer, JSON
TEST_SCHEMA = [
Column('pipeline_id', Integer(), primary_key=True),
Column('offer_id', Integer(), primary_key=True),
Column('test_field', JSON)
]
def generate_data():
df_data = [{
"pipeline_id": 1,
"offer_id": 1,
"test_field": {"a": 1}
}]
yield pd.DataFrame(data=df_data)
def update_data(df: pd.DataFrame) -> pd.DataFrame:
df["test_field"].apply(lambda x: {**x, "b": 2})
df.index = df.index.astype('str')
return df
def test_meta_info_in_datapipe_events(dbconn) -> None:
ds = DataStore(dbconn)
run_config = RunConfig(
filters={
"pipeline_id": 1
},
labels={
"pipeline_name": 'test_name',
"pipeline_id": 1
}
)
catalog = Catalog({
'test_generate': Table(
store=TableStoreDB(
dbconn,
'test_generate_data',
TEST_SCHEMA
)
),
'test_transform': Table(
store=TableStoreDB(
dbconn,
'test_transform_data',
TEST_SCHEMA
)
)
})
pipeline = Pipeline([
BatchGenerate(
generate_data,
outputs=["test_generate"],
),
BatchTransform(
update_data,
inputs=["test_generate"],
outputs=["test_transform"],
)
])
run_pipeline(ds, catalog, pipeline, run_config)
df_events = pd.read_sql_query(select(catalog.get_datatable(ds, 'test_generate').event_logger.events_table), dbconn.con)
assert df_events.loc[0]["event"] == {
"meta": {
"labels": {
"step_name": "generate_data",
"pipeline_name": "test_name",
"pipeline_id": 1,
},
"filters": {
"pipeline_id": 1,
}
},
"data": {
"table_name": "test_generate",
"added_count": 1,
"updated_count": 0,
"deleted_count": 0,
"processed_count": 1
}
}
assert df_events.loc[1]["event"] == {
"meta": {
"labels": {
"step_name": "update_data",
"pipeline_name": "test_name",
"pipeline_id": 1,
},
"filters": {
"pipeline_id": 1,
}
},
"data": {
"table_name": "test_transform",
"added_count": 1,
"updated_count": 0,
"deleted_count": 0,
"processed_count": 1
}
}
| true | true |
1c3457b3679d6e51416fa4b3b071fcd14187e1ea | 50 | py | Python | final_project/machinetranslation/__init__.py | aneeshmraj/xzceb-flask_eng_fr | 0c39955f8d478b9a39c4699ee5a34a0358567fe2 | [
"Apache-2.0"
] | null | null | null | final_project/machinetranslation/__init__.py | aneeshmraj/xzceb-flask_eng_fr | 0c39955f8d478b9a39c4699ee5a34a0358567fe2 | [
"Apache-2.0"
] | null | null | null | final_project/machinetranslation/__init__.py | aneeshmraj/xzceb-flask_eng_fr | 0c39955f8d478b9a39c4699ee5a34a0358567fe2 | [
"Apache-2.0"
] | null | null | null | from . import translator
from .translator import * | 25 | 25 | 0.8 | from . import translator
from .translator import * | true | true |
1c3457fc93584b1aaf78aaf5fba0b0196d489046 | 5,561 | py | Python | ckanext/stats/tests/test_stats_lib.py | Gnafu/ckan | d81f69b90291e50ef7e85821ccb83daa94eb3bb7 | [
"BSD-3-Clause"
] | 2 | 2021-02-19T20:06:52.000Z | 2021-04-15T20:42:11.000Z | ckanext/stats/tests/test_stats_lib.py | Gnafu/ckan | d81f69b90291e50ef7e85821ccb83daa94eb3bb7 | [
"BSD-3-Clause"
] | null | null | null | ckanext/stats/tests/test_stats_lib.py | Gnafu/ckan | d81f69b90291e50ef7e85821ccb83daa94eb3bb7 | [
"BSD-3-Clause"
] | 4 | 2016-12-17T22:26:06.000Z | 2017-01-20T21:51:24.000Z | import datetime
from nose.tools import assert_equal
from ckan.lib.create_test_data import CreateTestData
from ckan import model
from ckanext.stats.stats import Stats, RevisionStats
from ckanext.stats.tests import StatsFixture
class TestStatsPlugin(StatsFixture):
@classmethod
def setup_class(cls):
super(TestStatsPlugin, cls).setup_class()
CreateTestData.create_arbitrary([
{'name':'test1', 'groups':['grp1'], 'tags':['tag1']},
{'name':'test2', 'groups':['grp1', 'grp2'], 'tags':['tag1']},
{'name':'test3', 'groups':['grp1', 'grp2'], 'tags':['tag1', 'tag2']},
{'name':'test4'},
],
extra_user_names=['bob'],
admins=['bob'],
)
# hack revision timestamps to be this date
week1 = datetime.datetime(2011, 1, 5)
for rev in model.Session.query(model.Revision):
rev.timestamp = week1 + datetime.timedelta(seconds=1)
# week 2
rev = model.repo.new_revision()
rev.author = 'bob'
rev.timestamp = datetime.datetime(2011, 1, 12)
model.Package.by_name(u'test2').delete()
model.repo.commit_and_remove()
# week 3
rev = model.repo.new_revision()
rev.author = 'sandra'
rev.timestamp = datetime.datetime(2011, 1, 19)
model.Package.by_name(u'test3').title = 'Test 3'
model.repo.commit_and_remove()
rev = model.repo.new_revision()
rev.author = 'sandra'
rev.timestamp = datetime.datetime(2011, 1, 20)
model.Package.by_name(u'test4').title = 'Test 4'
model.repo.commit_and_remove()
# week 4
rev = model.repo.new_revision()
rev.author = 'bob'
rev.timestamp = datetime.datetime(2011, 1, 26)
model.Package.by_name(u'test3').notes = 'Test 3 notes'
model.repo.commit_and_remove()
def test_top_rated_packages(self):
pkgs = Stats.top_rated_packages()
assert pkgs == []
def test_most_edited_packages(self):
pkgs = Stats.most_edited_packages()
pkgs = [(pkg.name, count) for pkg, count in pkgs]
assert_equal(pkgs[0], ('test3', 3))
assert_equal(pkgs[1][1], 2)
assert_equal(pkgs[2][1], 2)
assert_equal(pkgs[3], ('test1', 1))
def test_largest_groups(self):
grps = Stats.largest_groups()
grps = [(grp.name, count) for grp, count in grps]
assert_equal(grps, [('grp1', 3),
('grp2', 2)])
def test_top_tags(self):
tags = Stats.top_tags()
tags = [(tag.name, count) for tag, count in tags]
assert_equal(tags, [('tag1', 3),
('tag2', 1)])
def test_top_package_owners(self):
owners = Stats.top_package_owners()
owners = [(owner.name, count) for owner, count in owners]
assert_equal(owners, [('bob', 4)])
def test_new_packages_by_week(self):
new_packages_by_week = RevisionStats.get_by_week('new_packages')
def get_results(week_number):
date, ids, num, cumulative = new_packages_by_week[week_number]
return (date, set([model.Session.query(model.Package).get(id).name for id in ids]), num, cumulative)
assert_equal(get_results(0),
('2011-01-03', set((u'test1', u'test2', u'test3', u'test4')), 4, 4))
assert_equal(get_results(1),
('2011-01-10', set([]), 0, 4))
assert_equal(get_results(2),
('2011-01-17', set([]), 0, 4))
assert_equal(get_results(3),
('2011-01-24', set([]), 0, 4))
def test_deleted_packages_by_week(self):
deleted_packages_by_week = RevisionStats.get_by_week('deleted_packages')
def get_results(week_number):
date, ids, num, cumulative = deleted_packages_by_week[week_number]
return (date, [model.Session.query(model.Package).get(id).name for id in ids], num, cumulative)
assert_equal(get_results(0),
('2011-01-10', [u'test2'], 1, 1))
assert_equal(get_results(1),
('2011-01-17', [], 0, 1))
assert_equal(get_results(2),
('2011-01-24', [], 0, 1))
assert_equal(get_results(3),
('2011-01-31', [], 0, 1))
def test_revisions_by_week(self):
revisions_by_week = RevisionStats.get_by_week('package_revisions')
def get_results(week_number):
date, ids, num, cumulative = revisions_by_week[week_number]
return (date, num, cumulative)
num_setup_revs = revisions_by_week[0][2]
assert 6 > num_setup_revs > 2, num_setup_revs
assert_equal(get_results(0),
('2011-01-03', num_setup_revs, num_setup_revs))
assert_equal(get_results(1),
('2011-01-10', 1, num_setup_revs+1))
assert_equal(get_results(2),
('2011-01-17', 2, num_setup_revs+3))
assert_equal(get_results(3),
('2011-01-24', 1, num_setup_revs+4))
def test_num_packages_by_week(self):
num_packages_by_week = RevisionStats.get_num_packages_by_week()
# e.g. [('2011-05-30', 3, 3)]
assert_equal(num_packages_by_week[0], ('2011-01-03', 4, 4))
assert_equal(num_packages_by_week[1], ('2011-01-10', -1, 3))
assert_equal(num_packages_by_week[2], ('2011-01-17', 0, 3))
assert_equal(num_packages_by_week[3], ('2011-01-24', 0, 3))
| 41.192593 | 112 | 0.583168 | import datetime
from nose.tools import assert_equal
from ckan.lib.create_test_data import CreateTestData
from ckan import model
from ckanext.stats.stats import Stats, RevisionStats
from ckanext.stats.tests import StatsFixture
class TestStatsPlugin(StatsFixture):
@classmethod
def setup_class(cls):
super(TestStatsPlugin, cls).setup_class()
CreateTestData.create_arbitrary([
{'name':'test1', 'groups':['grp1'], 'tags':['tag1']},
{'name':'test2', 'groups':['grp1', 'grp2'], 'tags':['tag1']},
{'name':'test3', 'groups':['grp1', 'grp2'], 'tags':['tag1', 'tag2']},
{'name':'test4'},
],
extra_user_names=['bob'],
admins=['bob'],
)
week1 = datetime.datetime(2011, 1, 5)
for rev in model.Session.query(model.Revision):
rev.timestamp = week1 + datetime.timedelta(seconds=1)
rev = model.repo.new_revision()
rev.author = 'bob'
rev.timestamp = datetime.datetime(2011, 1, 12)
model.Package.by_name(u'test2').delete()
model.repo.commit_and_remove()
rev = model.repo.new_revision()
rev.author = 'sandra'
rev.timestamp = datetime.datetime(2011, 1, 19)
model.Package.by_name(u'test3').title = 'Test 3'
model.repo.commit_and_remove()
rev = model.repo.new_revision()
rev.author = 'sandra'
rev.timestamp = datetime.datetime(2011, 1, 20)
model.Package.by_name(u'test4').title = 'Test 4'
model.repo.commit_and_remove()
rev = model.repo.new_revision()
rev.author = 'bob'
rev.timestamp = datetime.datetime(2011, 1, 26)
model.Package.by_name(u'test3').notes = 'Test 3 notes'
model.repo.commit_and_remove()
def test_top_rated_packages(self):
pkgs = Stats.top_rated_packages()
assert pkgs == []
def test_most_edited_packages(self):
pkgs = Stats.most_edited_packages()
pkgs = [(pkg.name, count) for pkg, count in pkgs]
assert_equal(pkgs[0], ('test3', 3))
assert_equal(pkgs[1][1], 2)
assert_equal(pkgs[2][1], 2)
assert_equal(pkgs[3], ('test1', 1))
def test_largest_groups(self):
grps = Stats.largest_groups()
grps = [(grp.name, count) for grp, count in grps]
assert_equal(grps, [('grp1', 3),
('grp2', 2)])
def test_top_tags(self):
tags = Stats.top_tags()
tags = [(tag.name, count) for tag, count in tags]
assert_equal(tags, [('tag1', 3),
('tag2', 1)])
def test_top_package_owners(self):
owners = Stats.top_package_owners()
owners = [(owner.name, count) for owner, count in owners]
assert_equal(owners, [('bob', 4)])
def test_new_packages_by_week(self):
new_packages_by_week = RevisionStats.get_by_week('new_packages')
def get_results(week_number):
date, ids, num, cumulative = new_packages_by_week[week_number]
return (date, set([model.Session.query(model.Package).get(id).name for id in ids]), num, cumulative)
assert_equal(get_results(0),
('2011-01-03', set((u'test1', u'test2', u'test3', u'test4')), 4, 4))
assert_equal(get_results(1),
('2011-01-10', set([]), 0, 4))
assert_equal(get_results(2),
('2011-01-17', set([]), 0, 4))
assert_equal(get_results(3),
('2011-01-24', set([]), 0, 4))
def test_deleted_packages_by_week(self):
deleted_packages_by_week = RevisionStats.get_by_week('deleted_packages')
def get_results(week_number):
date, ids, num, cumulative = deleted_packages_by_week[week_number]
return (date, [model.Session.query(model.Package).get(id).name for id in ids], num, cumulative)
assert_equal(get_results(0),
('2011-01-10', [u'test2'], 1, 1))
assert_equal(get_results(1),
('2011-01-17', [], 0, 1))
assert_equal(get_results(2),
('2011-01-24', [], 0, 1))
assert_equal(get_results(3),
('2011-01-31', [], 0, 1))
def test_revisions_by_week(self):
revisions_by_week = RevisionStats.get_by_week('package_revisions')
def get_results(week_number):
date, ids, num, cumulative = revisions_by_week[week_number]
return (date, num, cumulative)
num_setup_revs = revisions_by_week[0][2]
assert 6 > num_setup_revs > 2, num_setup_revs
assert_equal(get_results(0),
('2011-01-03', num_setup_revs, num_setup_revs))
assert_equal(get_results(1),
('2011-01-10', 1, num_setup_revs+1))
assert_equal(get_results(2),
('2011-01-17', 2, num_setup_revs+3))
assert_equal(get_results(3),
('2011-01-24', 1, num_setup_revs+4))
def test_num_packages_by_week(self):
num_packages_by_week = RevisionStats.get_num_packages_by_week()
assert_equal(num_packages_by_week[0], ('2011-01-03', 4, 4))
assert_equal(num_packages_by_week[1], ('2011-01-10', -1, 3))
assert_equal(num_packages_by_week[2], ('2011-01-17', 0, 3))
assert_equal(num_packages_by_week[3], ('2011-01-24', 0, 3))
| true | true |
1c345875ce04261a77333e4c94e02945a5609af8 | 856 | py | Python | test/test_ndb.py | jinglundong/GuessGame | d6953d279d476c1281d15369ee18135c241441a5 | [
"MIT"
] | null | null | null | test/test_ndb.py | jinglundong/GuessGame | d6953d279d476c1281d15369ee18135c241441a5 | [
"MIT"
] | null | null | null | test/test_ndb.py | jinglundong/GuessGame | d6953d279d476c1281d15369ee18135c241441a5 | [
"MIT"
] | null | null | null | import unittest
import cgi
import os
from google.appengine.ext import ndb
from google.appengine.api import memcache
from google.appengine.ext import testbed
class Account(ndb.Model):
username = ndb.StringProperty()
userid = ndb.IntegerProperty()
email = ndb.StringProperty()
class TestAccount(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache()
def tearDown(self):
self.testbed.deactivate()
def test_read_after_write(self):
sandy = Account(
username='Sandy', userid=123, email='sandy@example.com')
sandy_key = sandy.put()
sandy = sandy_key.get()
self.assertEqual('Sandy', sandy.username)
| 27.612903 | 68 | 0.682243 | import unittest
import cgi
import os
from google.appengine.ext import ndb
from google.appengine.api import memcache
from google.appengine.ext import testbed
class Account(ndb.Model):
username = ndb.StringProperty()
userid = ndb.IntegerProperty()
email = ndb.StringProperty()
class TestAccount(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache()
def tearDown(self):
self.testbed.deactivate()
def test_read_after_write(self):
sandy = Account(
username='Sandy', userid=123, email='sandy@example.com')
sandy_key = sandy.put()
sandy = sandy_key.get()
self.assertEqual('Sandy', sandy.username)
| true | true |
1c345971a15ac04b067a535bad2f3fc25ddfe4ab | 4,667 | py | Python | server/auvsi_suas/views/map.py | UnmannedAerialSystems/interop | cb36506f7fb795ef2432a5e5a0b7b29917eddbdc | [
"Apache-2.0"
] | 175 | 2015-09-15T15:37:06.000Z | 2022-02-14T23:21:48.000Z | server/auvsi_suas/views/map.py | UnmannedAerialSystems/interop | cb36506f7fb795ef2432a5e5a0b7b29917eddbdc | [
"Apache-2.0"
] | 376 | 2015-09-16T19:34:15.000Z | 2022-02-19T12:55:55.000Z | server/auvsi_suas/views/map.py | UnmannedAerialSystems/interop | cb36506f7fb795ef2432a5e5a0b7b29917eddbdc | [
"Apache-2.0"
] | 109 | 2015-09-16T17:05:14.000Z | 2022-01-26T12:49:38.000Z | """Map view."""
from PIL import Image
import io
import json
import logging
import os
import os.path
from auvsi_suas.models.map import Map
from auvsi_suas.models.mission_config import MissionConfig
from auvsi_suas.proto import interop_admin_api_pb2
from auvsi_suas.views.decorators import require_login
from auvsi_suas.views.decorators import require_superuser
from django.contrib.auth.models import User
from django.core.files.images import ImageFile
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseForbidden
from django.http import HttpResponseNotFound
from django.http import HttpResponseServerError
from django.utils.decorators import method_decorator
from django.views.generic import View
from sendfile import sendfile
logger = logging.getLogger(__name__)
def find_map(mission_pk, user_pk):
"""Lookup requested Map model.
Only the request's user's map will be returned.
Args:
mission_pk: Mission primary key.
user_pk: The user which owns the map.
Raises:
Map.DoesNotExist: Map not found
"""
return Map.objects.get(mission_id=mission_pk, user_id=user_pk)
class MapImage(View):
"""Get or update a map."""
@method_decorator(require_login)
def dispatch(self, *args, **kwargs):
return super(MapImage, self).dispatch(*args, **kwargs)
def get(self, request, mission_pk, username):
mission_pk = int(mission_pk)
if username != request.user.username and not request.user.is_superuser:
return HttpResponseForbidden(
'User [%s] is not able to access maps owned by user [%s]' %
(request.user.username, username))
try:
m = find_map(mission_pk, request.user.pk)
except Map.DoesNotExist:
return HttpResponseNotFound('Map not found.')
if not m.uploaded_map or not m.uploaded_map.name:
return HttpResponseNotFound('Map not found.')
# Tell sendfile to serve the map.
return sendfile(request, m.uploaded_map.path)
def put(self, request, mission_pk, username):
mission_pk = int(mission_pk)
if username != request.user.username and not request.user.is_superuser:
return HttpResponseForbidden(
'User [%s] is not able to access maps owned by user [%s]' %
(request.user.username, username))
try:
m = find_map(mission_pk, request.user.pk)
except:
m = Map()
m.mission_id = mission_pk
m.user = request.user
# Request body is the file
f = io.BytesIO(request.body)
# Verify that this is a valid image
try:
i = Image.open(f)
i.verify()
except IOError as e:
return HttpResponseBadRequest(str(e))
if i.format not in ['JPEG', 'PNG']:
return HttpResponseBadRequest(
'Invalid image format %s, only JPEG and PNG allowed' %
(i.format))
# Clear review state.
if m.quality is not None:
m.quality = None
# Save the map, note old path.
old_path = m.uploaded_map.path if m.uploaded_map else None
m.uploaded_map.save(
'%d-%d.%s' % (mission_pk, request.user.pk, i.format), ImageFile(f))
# Map has been updated.
m.save()
# Check whether old map should be deleted. Ignore errors.
if old_path and m.uploaded_map.path != old_path:
try:
os.remove(old_path)
except OSError as e:
logger.warning("Unable to delete old map: %s", e)
return HttpResponse("Map uploaded.")
def delete(self, request, mission_pk, username):
mission_pk = int(mission_pk)
if username != request.user.username and not request.user.is_superuser:
return HttpResponseForbidden(
'User [%s] is not able to access maps owned by user [%s]' %
(request.user.username, username))
try:
m = find_map(mission_pk, request.user.pk)
except:
return HttpResponseNotFound('Map not found.')
if not m.uploaded_map or not m.uploaded_map.path:
return HttpResponseNotFound('Map not found.')
path = m.uploaded_map.path
# Delete the map. Note this does not cleanup the image.
m.delete()
# Delete the image file.
try:
os.remove(path)
except OSError as e:
logger.warning("Unable to delete map: %s", e)
return HttpResponse("Map deleted.")
| 31.748299 | 79 | 0.635526 |
from PIL import Image
import io
import json
import logging
import os
import os.path
from auvsi_suas.models.map import Map
from auvsi_suas.models.mission_config import MissionConfig
from auvsi_suas.proto import interop_admin_api_pb2
from auvsi_suas.views.decorators import require_login
from auvsi_suas.views.decorators import require_superuser
from django.contrib.auth.models import User
from django.core.files.images import ImageFile
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseForbidden
from django.http import HttpResponseNotFound
from django.http import HttpResponseServerError
from django.utils.decorators import method_decorator
from django.views.generic import View
from sendfile import sendfile
logger = logging.getLogger(__name__)
def find_map(mission_pk, user_pk):
return Map.objects.get(mission_id=mission_pk, user_id=user_pk)
class MapImage(View):
@method_decorator(require_login)
def dispatch(self, *args, **kwargs):
return super(MapImage, self).dispatch(*args, **kwargs)
def get(self, request, mission_pk, username):
mission_pk = int(mission_pk)
if username != request.user.username and not request.user.is_superuser:
return HttpResponseForbidden(
'User [%s] is not able to access maps owned by user [%s]' %
(request.user.username, username))
try:
m = find_map(mission_pk, request.user.pk)
except Map.DoesNotExist:
return HttpResponseNotFound('Map not found.')
if not m.uploaded_map or not m.uploaded_map.name:
return HttpResponseNotFound('Map not found.')
return sendfile(request, m.uploaded_map.path)
def put(self, request, mission_pk, username):
mission_pk = int(mission_pk)
if username != request.user.username and not request.user.is_superuser:
return HttpResponseForbidden(
'User [%s] is not able to access maps owned by user [%s]' %
(request.user.username, username))
try:
m = find_map(mission_pk, request.user.pk)
except:
m = Map()
m.mission_id = mission_pk
m.user = request.user
f = io.BytesIO(request.body)
try:
i = Image.open(f)
i.verify()
except IOError as e:
return HttpResponseBadRequest(str(e))
if i.format not in ['JPEG', 'PNG']:
return HttpResponseBadRequest(
'Invalid image format %s, only JPEG and PNG allowed' %
(i.format))
if m.quality is not None:
m.quality = None
old_path = m.uploaded_map.path if m.uploaded_map else None
m.uploaded_map.save(
'%d-%d.%s' % (mission_pk, request.user.pk, i.format), ImageFile(f))
m.save()
if old_path and m.uploaded_map.path != old_path:
try:
os.remove(old_path)
except OSError as e:
logger.warning("Unable to delete old map: %s", e)
return HttpResponse("Map uploaded.")
def delete(self, request, mission_pk, username):
mission_pk = int(mission_pk)
if username != request.user.username and not request.user.is_superuser:
return HttpResponseForbidden(
'User [%s] is not able to access maps owned by user [%s]' %
(request.user.username, username))
try:
m = find_map(mission_pk, request.user.pk)
except:
return HttpResponseNotFound('Map not found.')
if not m.uploaded_map or not m.uploaded_map.path:
return HttpResponseNotFound('Map not found.')
path = m.uploaded_map.path
m.delete()
try:
os.remove(path)
except OSError as e:
logger.warning("Unable to delete map: %s", e)
return HttpResponse("Map deleted.")
| true | true |
1c3459e9b4d5a65bbdbfb0b4c76dbef94560f5e8 | 2,409 | py | Python | studies/tests/tests_fonctionnal/test_selenium.py | tbuglioni/unlimited-studies | 8d5e75d1c3767b7e108e6cf8737462f891410af0 | [
"MIT"
] | null | null | null | studies/tests/tests_fonctionnal/test_selenium.py | tbuglioni/unlimited-studies | 8d5e75d1c3767b7e108e6cf8737462f891410af0 | [
"MIT"
] | 25 | 2021-07-07T15:05:15.000Z | 2021-11-22T13:46:37.000Z | studies/tests/tests_fonctionnal/test_selenium.py | tbuglioni/unlimited-studies | 8d5e75d1c3767b7e108e6cf8737462f891410af0 | [
"MIT"
] | null | null | null | import time
from django.contrib.auth import get_user_model
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from studies.tests.speed_set_up import SpeedSetUP
from unlimited_studies.settings import BASE_DIR
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("window-size=1920x1080")
User = get_user_model()
class MySeleniumTests(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.browser = webdriver.Chrome(
executable_path=str(BASE_DIR / "webdrivers" / "chromedriver"),
options=chrome_options,
)
cls.browser.implicitly_wait(30)
cls.browser.maximize_window()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.browser.quit()
def setUp(self):
speed_set_up = SpeedSetUP()
self.user_a = speed_set_up.set_up_user_a()
self.book_1 = speed_set_up.create_book_owner(self.user_a, order_book=1)
def test_nav(self):
self.browser.get(("%s%s" % (self.live_server_url, "/logout/")))
self.browser.get(("%s%s" % (self.live_server_url, "/login/")))
mail = self.browser.find_element_by_id("id_email")
password = self.browser.find_element_by_id("id_password")
submit = self.browser.find_element_by_id("submit_login")
mail.send_keys("john@invalid.com")
password.send_keys("some_123_password")
submit.click()
time.sleep(1)
cur_url = self.browser.current_url
self.assertEqual(cur_url, (self.live_server_url + "/"))
perso_home = self.browser.find_element_by_id("button_personal_home")
perso_home.click()
time.sleep(3)
cur_url = self.browser.current_url
self.assertEqual(cur_url, (self.live_server_url + "/studies/"))
notice_page = self.browser.find_element_by_id("button_notice_page")
notice_page.click()
time.sleep(3)
cur_url = self.browser.current_url
self.assertEqual(cur_url, (self.live_server_url + "/#help-section"))
account_page = self.browser.find_element_by_id("account_page")
account_page.click()
time.sleep(3)
cur_url = self.browser.current_url
self.assertEqual(cur_url, (self.live_server_url + "/account/"))
| 35.426471 | 79 | 0.686177 | import time
from django.contrib.auth import get_user_model
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from studies.tests.speed_set_up import SpeedSetUP
from unlimited_studies.settings import BASE_DIR
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("window-size=1920x1080")
User = get_user_model()
class MySeleniumTests(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.browser = webdriver.Chrome(
executable_path=str(BASE_DIR / "webdrivers" / "chromedriver"),
options=chrome_options,
)
cls.browser.implicitly_wait(30)
cls.browser.maximize_window()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.browser.quit()
def setUp(self):
speed_set_up = SpeedSetUP()
self.user_a = speed_set_up.set_up_user_a()
self.book_1 = speed_set_up.create_book_owner(self.user_a, order_book=1)
def test_nav(self):
self.browser.get(("%s%s" % (self.live_server_url, "/logout/")))
self.browser.get(("%s%s" % (self.live_server_url, "/login/")))
mail = self.browser.find_element_by_id("id_email")
password = self.browser.find_element_by_id("id_password")
submit = self.browser.find_element_by_id("submit_login")
mail.send_keys("john@invalid.com")
password.send_keys("some_123_password")
submit.click()
time.sleep(1)
cur_url = self.browser.current_url
self.assertEqual(cur_url, (self.live_server_url + "/"))
perso_home = self.browser.find_element_by_id("button_personal_home")
perso_home.click()
time.sleep(3)
cur_url = self.browser.current_url
self.assertEqual(cur_url, (self.live_server_url + "/studies/"))
notice_page = self.browser.find_element_by_id("button_notice_page")
notice_page.click()
time.sleep(3)
cur_url = self.browser.current_url
self.assertEqual(cur_url, (self.live_server_url + "/#help-section"))
account_page = self.browser.find_element_by_id("account_page")
account_page.click()
time.sleep(3)
cur_url = self.browser.current_url
self.assertEqual(cur_url, (self.live_server_url + "/account/"))
| true | true |
1c345a091eae0ec0dabc1e2a8d867aff74d232ac | 998 | py | Python | ex2_4.py | leo-gal/pyplus_exercise | 223d3c16fe485a0ee99c3ab7d161a758975a9d7b | [
"Apache-2.0"
] | null | null | null | ex2_4.py | leo-gal/pyplus_exercise | 223d3c16fe485a0ee99c3ab7d161a758975a9d7b | [
"Apache-2.0"
] | null | null | null | ex2_4.py | leo-gal/pyplus_exercise | 223d3c16fe485a0ee99c3ab7d161a758975a9d7b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from netmiko import ConnectHandler
from getpass import getpass
from pprint import pprint
from datetime import datetime
device1 = {
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": getpass(),
"device_type": "cisco_ios",
# "session_log": session_log.txt,
}
cfg = [
"ip name-server 1.1.1.1",
"no ip name-server 1.0.0.1",
"ip domain-lookup",
]
start_time = datetime.now()
net_connect = ConnectHandler(**device1)
print(net_connect.find_prompt())
output = net_connect.send_config_set(cfg)
pprint(output)
ping_output= net_connect.send_command("ping google.com")
if "!!" in ping_output:
print("Ping Successful:")
print("\n\nPing Output: {}\n\n".format(ping_output))
else:
raise ValueError("\n\nPing Failed: {}\n\n".format(ping_output))
net_connect.disconnect()
end_time = datetime.now()
net_connect.disconnect()
print("Total Execution Time: {}\n\n".format(end_time - start_time))
| 23.209302 | 67 | 0.675351 |
from netmiko import ConnectHandler
from getpass import getpass
from pprint import pprint
from datetime import datetime
device1 = {
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": getpass(),
"device_type": "cisco_ios",
}
cfg = [
"ip name-server 1.1.1.1",
"no ip name-server 1.0.0.1",
"ip domain-lookup",
]
start_time = datetime.now()
net_connect = ConnectHandler(**device1)
print(net_connect.find_prompt())
output = net_connect.send_config_set(cfg)
pprint(output)
ping_output= net_connect.send_command("ping google.com")
if "!!" in ping_output:
print("Ping Successful:")
print("\n\nPing Output: {}\n\n".format(ping_output))
else:
raise ValueError("\n\nPing Failed: {}\n\n".format(ping_output))
net_connect.disconnect()
end_time = datetime.now()
net_connect.disconnect()
print("Total Execution Time: {}\n\n".format(end_time - start_time))
| true | true |
1c345b10dc647f9eb8d6b8cf83725d3b7601e99a | 21,926 | py | Python | sunpy/image/coalignment.py | jgieseler/sunpy | 9eb01ce9eea43512cc928b17c6d79ac06dce0ece | [
"BSD-2-Clause"
] | null | null | null | sunpy/image/coalignment.py | jgieseler/sunpy | 9eb01ce9eea43512cc928b17c6d79ac06dce0ece | [
"BSD-2-Clause"
] | null | null | null | sunpy/image/coalignment.py | jgieseler/sunpy | 9eb01ce9eea43512cc928b17c6d79ac06dce0ece | [
"BSD-2-Clause"
] | null | null | null | """
This module provides routines for the co-alignment of images and
`~sunpy.map.mapsequence.MapSequence`.
Currently this module provides image co-alignment by template matching.
Which is partially inspired by the SSWIDL routine
`tr_get_disp.pro <http://www.heliodocs.com/php/xdoc_print.php?file=$SSW/trace/idl/util/tr_get_disp.pro>`__.
In this implementation, the template matching is handled via the scikit-image
routine `skimage.feature.match_template`.
References
----------
* http://scribblethink.org/Work/nvisionInterface/nip.html
* J.P. Lewis, Fast Template Matching, Vision Interface 95, Canadian Image
Processing and Pattern Recognition Society, Quebec City, Canada, May 15-19,
1995, p. 120-123 http://www.scribblethink.org/Work/nvisionInterface/vi95_lewis.pdf.
"""
from copy import deepcopy
import numpy as np
from scipy.ndimage import shift
from skimage.feature import match_template
import astropy.units as u
import sunpy.map
from sunpy.map.mapbase import GenericMap
from sunpy.util.exceptions import warn_user
__all__ = ['calculate_shift', 'clip_edges', 'calculate_clipping',
'match_template_to_layer', 'find_best_match_location',
'get_correlation_shifts', 'parabolic_turning_point',
'check_for_nonfinite_entries',
'apply_shifts', 'mapsequence_coalign_by_match_template',
'calculate_match_template_shift']
def _default_fmap_function(data):
"""
This function ensures that the data are floats.
It is the default data manipulation function for the coalignment
method.
"""
return np.float64(data)
def calculate_shift(this_layer, template):
"""
Calculates the pixel shift required to put the template in the "best"
position on a layer.
Parameters
----------
this_layer : `numpy.ndarray`
A numpy array of size ``(ny, nx)``, where the first two dimensions are
spatial dimensions.
template : `numpy.ndarray`
A numpy array of size ``(N, M)`` where ``N < ny`` and ``M < nx``.
Returns
-------
`tuple`
Pixel shifts ``(yshift, xshift)`` relative to the offset of the template
to the input array.
"""
# Warn user if any NANs, Infs, etc are present in the layer or the template
check_for_nonfinite_entries(this_layer, template)
# Calculate the correlation array matching the template to this layer
corr = match_template_to_layer(this_layer, template)
# Calculate the y and x shifts in pixels
return find_best_match_location(corr)
@u.quantity_input
def clip_edges(data, yclips: u.pix, xclips: u.pix):
"""
Clips off the "y" and "x" edges of a 2D array according to a list of pixel
values. This function is useful for removing data at the edge of 2d images
that may be affected by shifts from solar de- rotation and layer co-
registration, leaving an image unaffected by edge effects.
Parameters
----------
data : `numpy.ndarray`
A numpy array of shape ``(ny, nx)``.
yclips : `astropy.units.Quantity`
The amount to clip in the y-direction of the data. Has units of
pixels, and values should be whole non-negative numbers.
xclips : `astropy.units.Quantity`
The amount to clip in the x-direction of the data. Has units of
pixels, and values should be whole non-negative numbers.
Returns
-------
`numpy.ndarray`
A 2D image with edges clipped off according to ``yclips`` and ``xclips``
arrays.
"""
ny = data.shape[0]
nx = data.shape[1]
# The purpose of the int below is to ensure integer type since by default
# astropy quantities are converted to floats.
return data[int(yclips[0].value): ny - int(yclips[1].value),
int(xclips[0].value): nx - int(xclips[1].value)]
@u.quantity_input
def calculate_clipping(y: u.pix, x: u.pix):
"""
Return the upper and lower clipping values for the "y" and "x" directions.
Parameters
----------
y : `astropy.units.Quantity`
An array of pixel shifts in the y-direction for an image.
x : `astropy.units.Quantity`
An array of pixel shifts in the x-direction for an image.
Returns
-------
`tuple`
The tuple is of the form ``([y0, y1], [x0, x1])``.
The number of (integer) pixels that need to be clipped off at each
edge in an image. The first element in the tuple is a list that gives
the number of pixels to clip in the y-direction. The first element in
that list is the number of rows to clip at the lower edge of the image
in y. The clipped image has "clipping[0][0]" rows removed from its
lower edge when compared to the original image. The second element in
that list is the number of rows to clip at the upper edge of the image
in y. The clipped image has "clipping[0][1]" rows removed from its
upper edge when compared to the original image. The second element in
the "clipping" tuple applies similarly to the x-direction (image
columns). The parameters ``y0, y1, x0, x1`` have the type
`~astropy.units.Quantity`.
"""
return ([_lower_clip(y.value), _upper_clip(y.value)] * u.pix,
[_lower_clip(x.value), _upper_clip(x.value)] * u.pix)
def _upper_clip(z):
"""
Find smallest integer bigger than all the positive entries in the input
array.
"""
zupper = 0
zcond = z >= 0
if np.any(zcond):
zupper = int(np.max(np.ceil(z[zcond])))
return zupper
def _lower_clip(z):
"""
Find smallest positive integer bigger than the absolute values of the
negative entries in the input array.
"""
zlower = 0
zcond = z <= 0
if np.any(zcond):
zlower = int(np.max(np.ceil(-z[zcond])))
return zlower
def match_template_to_layer(layer, template):
"""
Calculate the correlation array that describes how well the template
matches the layer. All inputs are assumed to be numpy arrays.
Parameters
----------
layer : `numpy.ndarray`
A numpy array of size ``(ny, nx)``.
template : `numpy.ndarray`
A numpy array of size ``(N, M)`` where ``N < ny`` and ``M < nx``.
Returns
-------
`numpy.ndarray`
A correlation array between the layer and the template.
The values in the array range between 0 and 1.
"""
return match_template(layer, template)
def find_best_match_location(corr):
"""
Calculate an estimate of the location of the peak of the correlation result
in image pixels.
Parameters
----------
corr : `numpy.ndarray`
A 2D correlation array.
Returns
-------
`~astropy.units.Quantity`
The shift amounts ``(y, x)`` in image pixels. Subpixel values are
possible.
"""
# Get the index of the maximum in the correlation function
ij = np.unravel_index(np.argmax(corr), corr.shape)
cor_max_x, cor_max_y = ij[::-1]
# Get the correlation function around the maximum
array_maximum = corr[np.max([0, cor_max_y - 1]): np.min([cor_max_y + 2, corr.shape[0] - 1]),
np.max([0, cor_max_x - 1]): np.min([cor_max_x + 2, corr.shape[1] - 1])]
y_shift_maximum, x_shift_maximum = get_correlation_shifts(array_maximum)
# Get shift relative to correlation array
y_shift_correlation_array = y_shift_maximum + cor_max_y * u.pix
x_shift_correlation_array = x_shift_maximum + cor_max_x * u.pix
return y_shift_correlation_array, x_shift_correlation_array
def get_correlation_shifts(array):
"""
Estimate the location of the maximum of a fit to the input array. The
estimation in the "x" and "y" directions are done separately. The location
estimates can be used to implement subpixel shifts between two different
images.
Parameters
----------
array : `numpy.ndarray`
An array with at least one dimension that has three elements. The
input array is at most a 3x3 array of correlation values calculated
by matching a template to an image.
Returns
-------
`~astropy.units.Quantity`
The ``(y, x)`` location of the peak of a parabolic fit, in image pixels.
"""
# Check input shape
ny = array.shape[0]
nx = array.shape[1]
if nx > 3 or ny > 3:
raise ValueError("Input array dimension should not be greater than 3 in any dimension.")
# Find where the maximum of the input array is
ij = np.unravel_index(np.argmax(array), array.shape)
x_max_location, y_max_location = ij[::-1]
# Estimate the location of the parabolic peak if there is enough data.
# Otherwise, just return the location of the maximum in a particular
# direction.
if ny == 3:
y_location = parabolic_turning_point(array[:, x_max_location])
else:
y_location = 1.0 * y_max_location
if nx == 3:
x_location = parabolic_turning_point(array[y_max_location, :])
else:
x_location = 1.0 * x_max_location
return y_location * u.pix, x_location * u.pix
def parabolic_turning_point(y):
"""
Find the location of the turning point for a parabola
``y(x) = ax^2 + bx + c``, given input values ``y(-1), y(0), y(1)``.
The maximum is located at ``x0 = -b / 2a``. Assumes
that the input array represents an equally spaced sampling at the
locations ``y(-1), y(0) and y(1)``.
Parameters
----------
y : `numpy.ndarray`
A one dimensional numpy array of shape "3" with entries that sample the
parabola at "-1", "0", and "1".
Returns
-------
`float`
A float, the location of the parabola maximum.
"""
numerator = -0.5 * y.dot([-1, 0, 1])
denominator = y.dot([1, -2, 1])
return numerator / denominator
def check_for_nonfinite_entries(layer_image, template_image):
"""
Issue a warning if there is any nonfinite entry in the layer or template images.
Parameters
----------
layer_image : `numpy.ndarray`
A two-dimensional `numpy.ndarray`.
template_image : `numpy.ndarray`
A two-dimensional `numpy.ndarray`.
"""
if not np.all(np.isfinite(layer_image)):
warn_user('The layer image has nonfinite entries. '
'This could cause errors when calculating shift between two '
'images. Please make sure there are no infinity or '
'Not a Number values. For instance, replacing them with a '
'local mean.')
if not np.all(np.isfinite(template_image)):
warn_user('The template image has nonfinite entries. '
'This could cause errors when calculating shift between two '
'images. Please make sure there are no infinity or '
'Not a Number values. For instance, replacing them with a '
'local mean.')
@u.quantity_input
def apply_shifts(mc, yshift: u.pix, xshift: u.pix, clip=True, **kwargs):
"""
Apply a set of pixel shifts to a `~sunpy.map.MapSequence`, and return a new
`~sunpy.map.MapSequence`.
Parameters
----------
mc : `sunpy.map.MapSequence`
A `~sunpy.map.MapSequence` of shape ``(ny, nx, nt)``, where ``nt`` is the number of
layers in the `~sunpy.map.MapSequence`. ``ny`` is the number of pixels in the
"y" direction, ``nx`` is the number of pixels in the "x" direction.
yshift : `~astropy.units.Quantity`
An array of pixel shifts in the y-direction for an image.
xshift : `~astropy.units.Quantity`
An array of pixel shifts in the x-direction for an image.
clip : `bool`, optional
If `True` (default), then clip off "x", "y" edges of the maps in the sequence that are
potentially affected by edges effects.
Notes
-----
All other keywords are passed to `scipy.ndimage.shift`.
Returns
-------
`sunpy.map.MapSequence`
A `~sunpy.map.MapSequence` of the same shape as the input. All layers in
the `~sunpy.map.MapSequence` have been shifted according the input shifts.
"""
# New mapsequence will be constructed from this list
new_mc = []
# Calculate the clipping
if clip:
yclips, xclips = calculate_clipping(-yshift, -xshift)
# Shift the data and construct the mapsequence
for i, m in enumerate(mc):
shifted_data = shift(deepcopy(m.data), [yshift[i].value, xshift[i].value], **kwargs)
new_meta = deepcopy(m.meta)
# Clip if required. Use the submap function to return the appropriate
# portion of the data.
if clip:
shifted_data = clip_edges(shifted_data, yclips, xclips)
new_meta['naxis1'] = shifted_data.shape[1]
new_meta['naxis2'] = shifted_data.shape[0]
# Add one to go from zero-based to one-based indexing
new_meta['crpix1'] = m.reference_pixel.x.value + 1 + xshift[i].value - xshift[0].value
new_meta['crpix2'] = m.reference_pixel.y.value + 1 + yshift[i].value - yshift[0].value
new_map = sunpy.map.Map(shifted_data, new_meta)
# Append to the list
new_mc.append(new_map)
return sunpy.map.Map(new_mc, sequence=True)
def calculate_match_template_shift(mc, template=None, layer_index=0,
func=_default_fmap_function):
"""
Calculate the arcsecond shifts necessary to co-register the layers in a
`~sunpy.map.MapSequence` according to a template taken from that
`~sunpy.map.MapSequence`.
When using this functionality, it is a good idea to check that the shifts
that were applied to were reasonable and expected. One way of checking this
is to animate the original `~sunpy.map.MapSequence`, animate the coaligned
`~sunpy.map.MapSequence`, and compare the differences you see to the
calculated shifts.
Parameters
----------
mc : `sunpy.map.MapSequence`
A `~sunpy.map.MapSequence` of shape ``(ny, nx, nt)``, where ``nt`` is the number of
layers in the `~sunpy.map.MapSequence`.
template : {`None` | `~sunpy.map.Map` | `numpy.ndarray`}, optional
The template used in the matching. If an ~numpy.ndarray` is passed,
the `numpy.ndarray` has to have two dimensions.
layer_index : `int`, optional
The template is assumed to refer to the map in the `~sunpy.map.MapSequence`
indexed by the value of "layer_index". Displacements of all maps in the
`~sunpy.map.MapSequence` are assumed to be relative to this layer. The
displacements of the template relative to this layer are therefore
``(0, 0)``.
func : function, optional
A function which is applied to the data values before the coalignment
method is applied. This can be useful in coalignment, because it is
sometimes better to co-align on a function of the data rather than the
data itself. The calculated shifts are applied to the original data.
Examples of useful functions to consider for EUV images are the
logarithm or the square root. The function is of the form
``func = F(data)``. The default function ensures that the data are
floats.
"""
# Size of the data
ny = mc.maps[layer_index].data.shape[0]
nx = mc.maps[layer_index].data.shape[1]
nt = len(mc.maps)
# Calculate a template. If no template is passed then define one
# from the index layer.
if template is None:
tplate = mc.maps[layer_index].data[int(ny/4): int(3*ny/4),
int(nx/4): int(3*nx/4)]
elif isinstance(template, GenericMap):
tplate = template.data
elif isinstance(template, np.ndarray):
tplate = template
else:
raise ValueError('Invalid template.')
# Apply the function to the template
tplate = func(tplate)
# Storage for the pixel shift
xshift_keep = np.zeros(nt) * u.pix
yshift_keep = np.zeros_like(xshift_keep)
# Storage for the arcsecond shift
xshift_arcseconds = np.zeros(nt) * u.arcsec
yshift_arcseconds = np.zeros_like(xshift_arcseconds)
# Match the template and calculate shifts
for i, m in enumerate(mc.maps):
# Get the next 2-d data array
this_layer = func(m.data)
# Calculate the y and x shifts in pixels
yshift, xshift = calculate_shift(this_layer, tplate)
# Keep shifts in pixels
yshift_keep[i] = yshift
xshift_keep[i] = xshift
# Calculate shifts relative to the template layer
yshift_keep = yshift_keep - yshift_keep[layer_index]
xshift_keep = xshift_keep - xshift_keep[layer_index]
for i, m in enumerate(mc.maps):
# Calculate the shifts required in physical units, which are
# presumed to be arcseconds.
xshift_arcseconds[i] = xshift_keep[i] * m.scale[0]
yshift_arcseconds[i] = yshift_keep[i] * m.scale[1]
return {"x": xshift_arcseconds, "y": yshift_arcseconds}
# Coalignment by matching a template
def mapsequence_coalign_by_match_template(mc, template=None, layer_index=0,
func=_default_fmap_function, clip=True,
shift=None, **kwargs):
"""
Co-register the layers in a `~sunpy.map.MapSequence` according to a
template taken from that `~sunpy.map.MapSequence`. This method REQUIRES
that scikit-image be installed. When using this functionality, it is a good
idea to check that the shifts that were applied to were reasonable and
expected. One way of checking this is to animate the original
`~sunpy.map.MapSequence`, animate the coaligned `~sunpy.map.MapSequence`,
and compare the differences you see to the calculated shifts.
Parameters
----------
mc : `sunpy.map.MapSequence`
A `~sunpy.map.MapSequence` of shape ``(ny, nx, nt)``, where ``nt`` is the number of
layers in the `~sunpy.map.MapSequence`.
template : {None | sunpy.map.Map | `numpy.ndarray`}, optional
The template used in the matching. If an `numpy.ndarray` is passed,
the `numpy.ndarray` has to have two dimensions.
layer_index : `int`, optional
The template is assumed to refer to the map in the `~sunpy.map.MapSequence`
indexed by the value of ``layer_index``. Displacements of all maps in the
`~sunpy.map.MapSequence` are assumed to be relative to this layer. The
displacements of the template relative to this layer are therefore
``(0, 0)``.
func : function, optional
A function which is applied to the data values before the coalignment
method is applied. This can be useful in coalignment, because it is
sometimes better to co-align on a function of the data rather than the
data itself. The calculated shifts are applied to the original data.
Examples of useful functions to consider for EUV images are the
logarithm or the square root. The function is of the form
``func = F(data)``. The default function ensures that the data are
floats.
clip : bool, optional
If True, then clip off x, y edges of the maps in the sequence that are
potentially affected by edges effects.
shift : dict, optional
A dictionary with two keys, 'x' and 'y'. Key 'x' is an astropy
quantities array of corresponding to the amount of shift in the
x-direction (in arcseconds, assuming the helio-projective
Cartesian co-ordinate system) that is applied to the input
`~sunpy.map.MapSequence`. Key 'y' is an `~astropy.units.Quantity` array
corresponding to the amount of shift in the y-direction (in arcseconds,
assuming the helio-projective Cartesian co-ordinate system) that is
applied to the input `~sunpy.map.MapSequence`. The number of elements in
each array must be the same as the number of maps in the
`~sunpy.map.MapSequence`. If a shift is passed in to the function, that
shift is applied to the input `~sunpy.map.MapSequence` and the template
matching algorithm is not used.
Notes
-----
The remaining keyword arguments are sent to `sunpy.image.coalignment.apply_shifts`.
Returns
-------
`sunpy.map.MapSequence`
A `~sunpy.map.MapSequence` that has co-aligned by matching the template.
Examples
--------
>>> from sunpy.image.coalignment import mapsequence_coalign_by_match_template as mc_coalign
>>> coaligned_mc = mc_coalign(mc) # doctest: +SKIP
>>> coaligned_mc = mc_coalign(mc, layer_index=-1) # doctest: +SKIP
>>> coaligned_mc = mc_coalign(mc, clip=False) # doctest: +SKIP
>>> coaligned_mc = mc_coalign(mc, template=sunpy_map) # doctest: +SKIP
>>> coaligned_mc = mc_coalign(mc, template=two_dimensional_ndarray) # doctest: +SKIP
>>> coaligned_mc = mc_coalign(mc, func=np.log) # doctest: +SKIP
"""
# Number of maps
nt = len(mc.maps)
# Storage for the pixel shifts and the shifts in arcseconds
xshift_keep = np.zeros(nt) * u.pix
yshift_keep = np.zeros_like(xshift_keep)
if shift is None:
shifts = calculate_match_template_shift(mc, template=template,
layer_index=layer_index,
func=func)
xshift_arcseconds = shifts['x']
yshift_arcseconds = shifts['y']
else:
xshift_arcseconds = shift['x']
yshift_arcseconds = shift['y']
# Calculate the pixel shifts
for i, m in enumerate(mc):
xshift_keep[i] = (xshift_arcseconds[i] / m.scale[0])
yshift_keep[i] = (yshift_arcseconds[i] / m.scale[1])
# Apply the shifts and return the coaligned mapsequence
return apply_shifts(mc, -yshift_keep, -xshift_keep, clip=clip, **kwargs)
| 39.223614 | 107 | 0.654793 | from copy import deepcopy
import numpy as np
from scipy.ndimage import shift
from skimage.feature import match_template
import astropy.units as u
import sunpy.map
from sunpy.map.mapbase import GenericMap
from sunpy.util.exceptions import warn_user
__all__ = ['calculate_shift', 'clip_edges', 'calculate_clipping',
'match_template_to_layer', 'find_best_match_location',
'get_correlation_shifts', 'parabolic_turning_point',
'check_for_nonfinite_entries',
'apply_shifts', 'mapsequence_coalign_by_match_template',
'calculate_match_template_shift']
def _default_fmap_function(data):
return np.float64(data)
def calculate_shift(this_layer, template):
check_for_nonfinite_entries(this_layer, template)
corr = match_template_to_layer(this_layer, template)
return find_best_match_location(corr)
@u.quantity_input
def clip_edges(data, yclips: u.pix, xclips: u.pix):
ny = data.shape[0]
nx = data.shape[1]
return data[int(yclips[0].value): ny - int(yclips[1].value),
int(xclips[0].value): nx - int(xclips[1].value)]
@u.quantity_input
def calculate_clipping(y: u.pix, x: u.pix):
return ([_lower_clip(y.value), _upper_clip(y.value)] * u.pix,
[_lower_clip(x.value), _upper_clip(x.value)] * u.pix)
def _upper_clip(z):
zupper = 0
zcond = z >= 0
if np.any(zcond):
zupper = int(np.max(np.ceil(z[zcond])))
return zupper
def _lower_clip(z):
zlower = 0
zcond = z <= 0
if np.any(zcond):
zlower = int(np.max(np.ceil(-z[zcond])))
return zlower
def match_template_to_layer(layer, template):
return match_template(layer, template)
def find_best_match_location(corr):
ij = np.unravel_index(np.argmax(corr), corr.shape)
cor_max_x, cor_max_y = ij[::-1]
array_maximum = corr[np.max([0, cor_max_y - 1]): np.min([cor_max_y + 2, corr.shape[0] - 1]),
np.max([0, cor_max_x - 1]): np.min([cor_max_x + 2, corr.shape[1] - 1])]
y_shift_maximum, x_shift_maximum = get_correlation_shifts(array_maximum)
y_shift_correlation_array = y_shift_maximum + cor_max_y * u.pix
x_shift_correlation_array = x_shift_maximum + cor_max_x * u.pix
return y_shift_correlation_array, x_shift_correlation_array
def get_correlation_shifts(array):
ny = array.shape[0]
nx = array.shape[1]
if nx > 3 or ny > 3:
raise ValueError("Input array dimension should not be greater than 3 in any dimension.")
ij = np.unravel_index(np.argmax(array), array.shape)
x_max_location, y_max_location = ij[::-1]
if ny == 3:
y_location = parabolic_turning_point(array[:, x_max_location])
else:
y_location = 1.0 * y_max_location
if nx == 3:
x_location = parabolic_turning_point(array[y_max_location, :])
else:
x_location = 1.0 * x_max_location
return y_location * u.pix, x_location * u.pix
def parabolic_turning_point(y):
numerator = -0.5 * y.dot([-1, 0, 1])
denominator = y.dot([1, -2, 1])
return numerator / denominator
def check_for_nonfinite_entries(layer_image, template_image):
if not np.all(np.isfinite(layer_image)):
warn_user('The layer image has nonfinite entries. '
'This could cause errors when calculating shift between two '
'images. Please make sure there are no infinity or '
'Not a Number values. For instance, replacing them with a '
'local mean.')
if not np.all(np.isfinite(template_image)):
warn_user('The template image has nonfinite entries. '
'This could cause errors when calculating shift between two '
'images. Please make sure there are no infinity or '
'Not a Number values. For instance, replacing them with a '
'local mean.')
@u.quantity_input
def apply_shifts(mc, yshift: u.pix, xshift: u.pix, clip=True, **kwargs):
new_mc = []
if clip:
yclips, xclips = calculate_clipping(-yshift, -xshift)
for i, m in enumerate(mc):
shifted_data = shift(deepcopy(m.data), [yshift[i].value, xshift[i].value], **kwargs)
new_meta = deepcopy(m.meta)
if clip:
shifted_data = clip_edges(shifted_data, yclips, xclips)
new_meta['naxis1'] = shifted_data.shape[1]
new_meta['naxis2'] = shifted_data.shape[0]
new_meta['crpix1'] = m.reference_pixel.x.value + 1 + xshift[i].value - xshift[0].value
new_meta['crpix2'] = m.reference_pixel.y.value + 1 + yshift[i].value - yshift[0].value
new_map = sunpy.map.Map(shifted_data, new_meta)
new_mc.append(new_map)
return sunpy.map.Map(new_mc, sequence=True)
def calculate_match_template_shift(mc, template=None, layer_index=0,
func=_default_fmap_function):
ny = mc.maps[layer_index].data.shape[0]
nx = mc.maps[layer_index].data.shape[1]
nt = len(mc.maps)
if template is None:
tplate = mc.maps[layer_index].data[int(ny/4): int(3*ny/4),
int(nx/4): int(3*nx/4)]
elif isinstance(template, GenericMap):
tplate = template.data
elif isinstance(template, np.ndarray):
tplate = template
else:
raise ValueError('Invalid template.')
tplate = func(tplate)
xshift_keep = np.zeros(nt) * u.pix
yshift_keep = np.zeros_like(xshift_keep)
xshift_arcseconds = np.zeros(nt) * u.arcsec
yshift_arcseconds = np.zeros_like(xshift_arcseconds)
for i, m in enumerate(mc.maps):
this_layer = func(m.data)
yshift, xshift = calculate_shift(this_layer, tplate)
yshift_keep[i] = yshift
xshift_keep[i] = xshift
yshift_keep = yshift_keep - yshift_keep[layer_index]
xshift_keep = xshift_keep - xshift_keep[layer_index]
for i, m in enumerate(mc.maps):
xshift_arcseconds[i] = xshift_keep[i] * m.scale[0]
yshift_arcseconds[i] = yshift_keep[i] * m.scale[1]
return {"x": xshift_arcseconds, "y": yshift_arcseconds}
def mapsequence_coalign_by_match_template(mc, template=None, layer_index=0,
func=_default_fmap_function, clip=True,
shift=None, **kwargs):
nt = len(mc.maps)
xshift_keep = np.zeros(nt) * u.pix
yshift_keep = np.zeros_like(xshift_keep)
if shift is None:
shifts = calculate_match_template_shift(mc, template=template,
layer_index=layer_index,
func=func)
xshift_arcseconds = shifts['x']
yshift_arcseconds = shifts['y']
else:
xshift_arcseconds = shift['x']
yshift_arcseconds = shift['y']
for i, m in enumerate(mc):
xshift_keep[i] = (xshift_arcseconds[i] / m.scale[0])
yshift_keep[i] = (yshift_arcseconds[i] / m.scale[1])
return apply_shifts(mc, -yshift_keep, -xshift_keep, clip=clip, **kwargs)
| true | true |
1c345be2960605a8a694db8b16371dd381f0a450 | 294 | py | Python | Books/GodOfPython/P13_Exception/Error7.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | 2 | 2020-12-05T07:42:55.000Z | 2021-01-06T23:23:18.000Z | Books/GodOfPython/P13_Exception/Error7.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | Books/GodOfPython/P13_Exception/Error7.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | dic = {'apple':2, 'banana':10, 'fineapple':5}
while True:
data = input('>')
try:
dic[data]
except KeyError:
print('There is no data.')
except KeyboardInterrupt:
break
else:
print('{} : {}개'.format(data, dic[data]))
print('continue...') | 19.6 | 49 | 0.527211 | dic = {'apple':2, 'banana':10, 'fineapple':5}
while True:
data = input('>')
try:
dic[data]
except KeyError:
print('There is no data.')
except KeyboardInterrupt:
break
else:
print('{} : {}개'.format(data, dic[data]))
print('continue...') | true | true |
1c345ccde033993b4622a4caa582ed4f70588da8 | 2,065 | py | Python | pyusermanager/Token/token_activation_class.py | Aurvandill137/pyusermanager | 56bb16b3ed510eee70ff33ccafdc0a9b0fc673b0 | [
"MIT"
] | 3 | 2022-02-13T14:10:35.000Z | 2022-02-14T00:20:02.000Z | pyusermanager/Token/token_activation_class.py | Aurvandill137/pyusermanager | 56bb16b3ed510eee70ff33ccafdc0a9b0fc673b0 | [
"MIT"
] | null | null | null | pyusermanager/Token/token_activation_class.py | Aurvandill137/pyusermanager | 56bb16b3ed510eee70ff33ccafdc0a9b0fc673b0 | [
"MIT"
] | 1 | 2022-02-13T14:10:03.000Z | 2022-02-13T14:10:03.000Z | from pony.orm import db_session
from .. import custom_exceptions as PyUserExceptions
from .token_base_class import Token
import datetime
###########################
#
# Activation Token
#
###########################
class Activation(Token):
"""For Activation Tokens"""
def __init__(self, config, token=None, username=None):
self.config = config
self.type = self.config.db.ActivationCode
super().__init__(token,username)
def verify(self, ip=None):
with db_session:
try:
now = datetime.datetime.now()
valid_until = found_token.valid_until
found_token = self.type.get(token=self.token)
user = found_token.user
if now <= valid_until:
user.activated = True
found_token.delete()
self.token = None
self.username = user.username
return True
else:
return False
# no token given or no token found
except (ValueError, AttributeError):
return False
def create(self, valid_days=1):
valid_until = datetime.datetime.now() + datetime.timedelta(days=valid_days)
token_to_hash = f"{self.username}-activation;valid_until:{str(valid_until)}"
print(token_to_hash)
super().hash(token_to_hash)
with db_session:
try:
found_user = self.config.db.User[self.username]
except Exception:
raise PyUserExceptions.MissingUserException
token = self.config.db.ActivationCode.get(user=self.username)
# create new token if no token exists
if token is None:
self.config.db.ActivationCode(user=found_user.username, token=self.token, valid_until=valid_until)
# if token exists update it
else:
token.token = self.token
token.valid_until = valid_until
return True
| 32.777778 | 114 | 0.565617 | from pony.orm import db_session
from .. import custom_exceptions as PyUserExceptions
from .token_base_class import Token
import datetime
return True
else:
return False
except (ValueError, AttributeError):
return False
def create(self, valid_days=1):
valid_until = datetime.datetime.now() + datetime.timedelta(days=valid_days)
token_to_hash = f"{self.username}-activation;valid_until:{str(valid_until)}"
print(token_to_hash)
super().hash(token_to_hash)
with db_session:
try:
found_user = self.config.db.User[self.username]
except Exception:
raise PyUserExceptions.MissingUserException
token = self.config.db.ActivationCode.get(user=self.username)
if token is None:
self.config.db.ActivationCode(user=found_user.username, token=self.token, valid_until=valid_until)
else:
token.token = self.token
token.valid_until = valid_until
return True
| true | true |
1c345ce488eb0af0c8bfd2eb609b9a26bd994d93 | 7,157 | py | Python | syph_visualizer.py | michellejlin/tprk | 04758e40c7dd9060d4d613c52b650e250297cb7a | [
"MIT"
] | 1 | 2020-08-26T22:27:17.000Z | 2020-08-26T22:27:17.000Z | syph_visualizer.py | michellejlin/tprk | 04758e40c7dd9060d4d613c52b650e250297cb7a | [
"MIT"
] | null | null | null | syph_visualizer.py | michellejlin/tprk | 04758e40c7dd9060d4d613c52b650e250297cb7a | [
"MIT"
] | 3 | 2019-11-01T00:46:06.000Z | 2020-08-26T22:27:23.000Z | import numpy as np
import pandas as pd
import argparse
import sys
from bokeh.palettes import brewer
from bokeh import events
from bokeh.io import export_png, save, export_svgs
from bokeh.resources import CDN
from bokeh.embed import components, file_html, autoload_static
from bokeh.plotting import figure, show, output_file
from bokeh.models import ColumnDataSource, Jitter, HoverTool, Slider, CustomJS, Label, WheelZoomTool, ResetTool, Button, TextInput
from bokeh.transform import jitter, factor_cmap
from bokeh.models.widgets import Panel, Tabs, Paragraph, Div, CheckboxGroup
from bokeh.layouts import column, layout, widgetbox, row
import subprocess
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Syph visualizer for single isolates')
parser.add_argument('final_data', help='data table from syph.py, in csv format.')
parser.add_argument('-t', '--title', help='name of sample to use as plot title.')
parser.add_argument('-o', '--output', help='output folder to export plots.')
parser.add_argument('-svg', action='store_true', help='Use this flag to output graphs in .svg format. '
'By default, plots will be in .html.')
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
# Setting variables
# Reads data from syph.py output.
strain = args.title
table = pd.read_csv(args.final_data,index_col=False)
source = ColumnDataSource(table)
#TOOLTIPS = [
# ("Read", "@Read"),
# ("Count", "@Count"),
# ("Relative Frequency", "@RelativeFreq"+"%"),
#]
plot_title = strain
output_path = args.output
if args.svg:
fig1 = figure(x_range = (0,80), y_range = (0,105), plot_height = 330, plot_width = 540, title = strain, toolbar_location = None)
else:
fig1 = figure(x_range = (0,80), y_range = (0,105), plot_height = 1000, plot_width = 1600, title = strain, toolbar_location = None)
#TODO: Fix 0 on x-axis
fig1.xaxis.major_label_overrides = dict(zip([10,20,30,40,50,60,70,80], ["V1", "V2", "V3", "V4", "V5", "V6", "V7",""]))
fig1.xaxis.minor_tick_line_color = None
fig1.title.text_font_size = "18pt"
fig1.xaxis.major_label_text_font_size = '16pt'
fig1.yaxis.major_label_text_font_size = '16pt'
fig1.yaxis.axis_label = "Relative Frequency"
fig1.yaxis.axis_label_text_font_size = '18pt'
blues = brewer['Blues'][9]
bugn = brewer['BuGn'][9]
bupu = brewer['Purples'][8]
orrd = brewer['OrRd'][9]
gnbu = brewer['GnBu'][9]
purd = brewer['PuRd'][9]
ylgn = brewer['YlGn'][9]
v1_bottom = 0
v2_bottom = 0
v3_bottom = 0
v4_bottom = 0
v5_bottom = 0
v6_bottom = 0
v7_bottom = 0
region_totals = [0,0,0,0,0,0,0]
region_counts = [0,0,0,0,0,0,0]
for line in open(args.final_data):
region, read, relativefreq, count = line.split(',')
if (region == "V1"):
bar = fig1.vbar(x=10, width = 6, bottom = v1_bottom, top = [v1_bottom + float(relativefreq)],
color=blues[region_counts[0]%9])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v1_bottom = v1_bottom + float(relativefreq)
region_totals[0] = region_totals[0] + int(count)
region_counts[0] = region_counts[0] + 1
elif (region == "V2"):
bar = fig1.vbar(x=20, width = 6, bottom = v2_bottom, top = [v2_bottom + float(relativefreq)],
color=bugn[region_counts[1]%9])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v2_bottom = v2_bottom + float(relativefreq)
region_totals[1] = region_totals[1] + int(count)
region_counts[1] = region_counts[1] + 1
elif (region == "V3"):
bar = fig1.vbar(x=30, width = 6, bottom = v3_bottom, top = [v3_bottom + float(relativefreq)],
color=bupu[region_counts[2]%8])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v3_bottom = v3_bottom + float(relativefreq)
region_totals[2] = region_totals[2] + int(count)
region_counts[2] = region_counts[2] + 1
elif (region == "V4"):
bar = fig1.vbar(x=40, width = 6, bottom = v4_bottom, top = [v4_bottom + float(relativefreq)],
color=orrd[region_counts[3]%9])
hover = HoverTool(renderers = [bar],
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v4_bottom = v4_bottom + float(relativefreq)
region_totals[3] = region_totals[3] + int(count)
region_counts[3] = region_counts[3] + 1
elif (region == "V5"):
bar = fig1.vbar(x=50, width = 6, bottom = v5_bottom, top = [v5_bottom + float(relativefreq)],
color=gnbu[region_counts[4]%9])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v5_bottom = v5_bottom + float(relativefreq)
region_totals[4] = region_totals[4] + int(count)
region_counts[4] = region_counts[4] + 1
elif (region == "V6"):
bar = fig1.vbar(x=60, width = 6, bottom = v6_bottom, top = [v6_bottom + float(relativefreq)],
color=ylgn[region_counts[5]%9])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v6_bottom = v6_bottom + float(relativefreq)
region_totals[5] = region_totals[5] + int(count)
region_counts[5] = region_counts[5] + 1
elif (region == "V7"):
bar = fig1.vbar(x=70, width = 6, bottom = v7_bottom, top = [v7_bottom + float(relativefreq)],
color=purd[region_counts[6]%9])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v7_bottom = v7_bottom + float(relativefreq)
region_totals[6] = region_totals[6] + int(count)
region_counts[6] = region_counts[6] + 1
for index, total in enumerate(region_totals):
label = Label(x = (index + 1) * 10, y = 101, text = str(region_counts[index]) + ", " + str(total), border_line_color = None, text_align = 'center', text_font_size = '11pt')
fig1.add_layout(label)
# fig2 = figure(plot_width = 1600, plot_height = 1000, title = "Fig 2", y_range = (0,105), x_range = table.Region.unique(), tooltips=TOOLTIPS)
# circle = fig2.circle(x=jitter('Region', width = 0.3, range=fig2.x_range), y='RelativeFreq', size = 15, alpha = 0.8, source=source, color='cornflowerblue')
# fig2.title.text_font_size = "18pt"
# fig2.xaxis.major_label_text_font_size = '12pt'
# fig2.yaxis.major_label_text_font_size = '12pt'
# fig2.yaxis.axis_label = "Relative Frequency"
if args.svg:
fig1.output_backend = "svg"
output_filename = (strain + "_RelativeFreqPlot.svg")
output_file(output_filename)
export_svgs(fig1, filename=output_filename)
else:
output_filename = (strain + "_RelativeFreqPlot.html")
output_file(output_filename, title=strain)
save(fig1)
# subprocess.call("mv " + output_filename + " " + args.output + "/" + output_filename, shell=True)
| 42.349112 | 174 | 0.691072 | import numpy as np
import pandas as pd
import argparse
import sys
from bokeh.palettes import brewer
from bokeh import events
from bokeh.io import export_png, save, export_svgs
from bokeh.resources import CDN
from bokeh.embed import components, file_html, autoload_static
from bokeh.plotting import figure, show, output_file
from bokeh.models import ColumnDataSource, Jitter, HoverTool, Slider, CustomJS, Label, WheelZoomTool, ResetTool, Button, TextInput
from bokeh.transform import jitter, factor_cmap
from bokeh.models.widgets import Panel, Tabs, Paragraph, Div, CheckboxGroup
from bokeh.layouts import column, layout, widgetbox, row
import subprocess
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Syph visualizer for single isolates')
parser.add_argument('final_data', help='data table from syph.py, in csv format.')
parser.add_argument('-t', '--title', help='name of sample to use as plot title.')
parser.add_argument('-o', '--output', help='output folder to export plots.')
parser.add_argument('-svg', action='store_true', help='Use this flag to output graphs in .svg format. '
'By default, plots will be in .html.')
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
strain = args.title
table = pd.read_csv(args.final_data,index_col=False)
source = ColumnDataSource(table)
plot_title = strain
output_path = args.output
if args.svg:
fig1 = figure(x_range = (0,80), y_range = (0,105), plot_height = 330, plot_width = 540, title = strain, toolbar_location = None)
else:
fig1 = figure(x_range = (0,80), y_range = (0,105), plot_height = 1000, plot_width = 1600, title = strain, toolbar_location = None)
fig1.xaxis.major_label_overrides = dict(zip([10,20,30,40,50,60,70,80], ["V1", "V2", "V3", "V4", "V5", "V6", "V7",""]))
fig1.xaxis.minor_tick_line_color = None
fig1.title.text_font_size = "18pt"
fig1.xaxis.major_label_text_font_size = '16pt'
fig1.yaxis.major_label_text_font_size = '16pt'
fig1.yaxis.axis_label = "Relative Frequency"
fig1.yaxis.axis_label_text_font_size = '18pt'
blues = brewer['Blues'][9]
bugn = brewer['BuGn'][9]
bupu = brewer['Purples'][8]
orrd = brewer['OrRd'][9]
gnbu = brewer['GnBu'][9]
purd = brewer['PuRd'][9]
ylgn = brewer['YlGn'][9]
v1_bottom = 0
v2_bottom = 0
v3_bottom = 0
v4_bottom = 0
v5_bottom = 0
v6_bottom = 0
v7_bottom = 0
region_totals = [0,0,0,0,0,0,0]
region_counts = [0,0,0,0,0,0,0]
for line in open(args.final_data):
region, read, relativefreq, count = line.split(',')
if (region == "V1"):
bar = fig1.vbar(x=10, width = 6, bottom = v1_bottom, top = [v1_bottom + float(relativefreq)],
color=blues[region_counts[0]%9])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v1_bottom = v1_bottom + float(relativefreq)
region_totals[0] = region_totals[0] + int(count)
region_counts[0] = region_counts[0] + 1
elif (region == "V2"):
bar = fig1.vbar(x=20, width = 6, bottom = v2_bottom, top = [v2_bottom + float(relativefreq)],
color=bugn[region_counts[1]%9])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v2_bottom = v2_bottom + float(relativefreq)
region_totals[1] = region_totals[1] + int(count)
region_counts[1] = region_counts[1] + 1
elif (region == "V3"):
bar = fig1.vbar(x=30, width = 6, bottom = v3_bottom, top = [v3_bottom + float(relativefreq)],
color=bupu[region_counts[2]%8])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v3_bottom = v3_bottom + float(relativefreq)
region_totals[2] = region_totals[2] + int(count)
region_counts[2] = region_counts[2] + 1
elif (region == "V4"):
bar = fig1.vbar(x=40, width = 6, bottom = v4_bottom, top = [v4_bottom + float(relativefreq)],
color=orrd[region_counts[3]%9])
hover = HoverTool(renderers = [bar],
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v4_bottom = v4_bottom + float(relativefreq)
region_totals[3] = region_totals[3] + int(count)
region_counts[3] = region_counts[3] + 1
elif (region == "V5"):
bar = fig1.vbar(x=50, width = 6, bottom = v5_bottom, top = [v5_bottom + float(relativefreq)],
color=gnbu[region_counts[4]%9])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v5_bottom = v5_bottom + float(relativefreq)
region_totals[4] = region_totals[4] + int(count)
region_counts[4] = region_counts[4] + 1
elif (region == "V6"):
bar = fig1.vbar(x=60, width = 6, bottom = v6_bottom, top = [v6_bottom + float(relativefreq)],
color=ylgn[region_counts[5]%9])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v6_bottom = v6_bottom + float(relativefreq)
region_totals[5] = region_totals[5] + int(count)
region_counts[5] = region_counts[5] + 1
elif (region == "V7"):
bar = fig1.vbar(x=70, width = 6, bottom = v7_bottom, top = [v7_bottom + float(relativefreq)],
color=purd[region_counts[6]%9])
hover = HoverTool(renderers = [bar], toggleable = False,
tooltips=[("Read", read), ("Count", count), ("Relative Frequency", relativefreq)])
fig1.add_tools(hover)
v7_bottom = v7_bottom + float(relativefreq)
region_totals[6] = region_totals[6] + int(count)
region_counts[6] = region_counts[6] + 1
for index, total in enumerate(region_totals):
label = Label(x = (index + 1) * 10, y = 101, text = str(region_counts[index]) + ", " + str(total), border_line_color = None, text_align = 'center', text_font_size = '11pt')
fig1.add_layout(label)
if args.svg:
fig1.output_backend = "svg"
output_filename = (strain + "_RelativeFreqPlot.svg")
output_file(output_filename)
export_svgs(fig1, filename=output_filename)
else:
output_filename = (strain + "_RelativeFreqPlot.html")
output_file(output_filename, title=strain)
save(fig1)
| true | true |
1c345d218b09b979ba38f61cf27f3d2c15376caf | 55 | py | Python | src/whylogs/src/whylabs/logs/_version.py | bernease/cli-demo-1 | 895d9eddc95ca3dd43b7ae8b33a8fbdedbc855f5 | [
"Apache-2.0"
] | null | null | null | src/whylogs/src/whylabs/logs/_version.py | bernease/cli-demo-1 | 895d9eddc95ca3dd43b7ae8b33a8fbdedbc855f5 | [
"Apache-2.0"
] | null | null | null | src/whylogs/src/whylabs/logs/_version.py | bernease/cli-demo-1 | 895d9eddc95ca3dd43b7ae8b33a8fbdedbc855f5 | [
"Apache-2.0"
] | null | null | null | """WhyLabs version number."""
__version__ = "0.0.2b3"
| 13.75 | 29 | 0.654545 |
__version__ = "0.0.2b3"
| true | true |
1c345db80ab641d6f598e92745e655a04bceda85 | 3,339 | py | Python | critiquebrainz/frontend/external/musicbrainz_db/event.py | shagun6/critiquebrainz | b7ae41fb09ff4dd4e34847b294fbee4ccc76bad5 | [
"Apache-2.0"
] | null | null | null | critiquebrainz/frontend/external/musicbrainz_db/event.py | shagun6/critiquebrainz | b7ae41fb09ff4dd4e34847b294fbee4ccc76bad5 | [
"Apache-2.0"
] | null | null | null | critiquebrainz/frontend/external/musicbrainz_db/event.py | shagun6/critiquebrainz | b7ae41fb09ff4dd4e34847b294fbee4ccc76bad5 | [
"Apache-2.0"
] | 1 | 2020-02-06T19:26:10.000Z | 2020-02-06T19:26:10.000Z | from collections import defaultdict
from mbdata import models
from critiquebrainz.frontend.external.musicbrainz_db import mb_session, DEFAULT_CACHE_EXPIRATION
from critiquebrainz.frontend.external.musicbrainz_db.utils import get_entities_by_gids
from critiquebrainz.frontend.external.musicbrainz_db.includes import check_includes
from critiquebrainz.frontend.external.musicbrainz_db.serialize import to_dict_events
from critiquebrainz.frontend.external.musicbrainz_db.helpers import get_relationship_info
from brainzutils import cache
def get_event_by_id(mbid):
"""Get event with the MusicBrainz ID.
Args:
mbid (uuid): MBID(gid) of the event.
Returns:
Dictionary containing the event information.
"""
key = cache.gen_key(mbid)
event = cache.get(key)
if not event:
event = _get_event_by_id(mbid)
cache.set(key=key, val=event, time=DEFAULT_CACHE_EXPIRATION)
return event
def _get_event_by_id(mbid):
return fetch_multiple_events(
[mbid],
includes=['artist-rels', 'place-rels', 'series-rels', 'url-rels', 'release-group-rels'],
).get(mbid)
def fetch_multiple_events(mbids, *, includes=None):
"""Get info related to multiple events using their MusicBrainz IDs.
Args:
mbids (list): List of MBIDs of events.
includes (list): List of information to be included.
Returns:
Dictionary containing info of multiple events keyed by their mbid.
"""
if includes is None:
includes = []
includes_data = defaultdict(dict)
check_includes('event', includes)
with mb_session() as db:
query = db.query(models.Event)
events = get_entities_by_gids(
query=query,
entity_type='event',
mbids=mbids,
)
event_ids = [event.id for event in events.values()]
if 'artist-rels' in includes:
get_relationship_info(
db=db,
target_type='artist',
source_type='event',
source_entity_ids=event_ids,
includes_data=includes_data,
)
if 'place-rels' in includes:
get_relationship_info(
db=db,
target_type='place',
source_type='event',
source_entity_ids=event_ids,
includes_data=includes_data,
)
if 'series-rels' in includes:
get_relationship_info(
db=db,
target_type='series',
source_type='event',
source_entity_ids=event_ids,
includes_data=includes_data,
)
if 'url-rels' in includes:
get_relationship_info(
db=db,
target_type='url',
source_type='event',
source_entity_ids=event_ids,
includes_data=includes_data,
)
if 'release-group-rels' in includes:
get_relationship_info(
db=db,
target_type='release_group',
source_type='event',
source_entity_ids=event_ids,
includes_data=includes_data,
)
return {str(mbid): to_dict_events(events[mbid], includes_data[events[mbid].id]) for mbid in mbids}
| 34.071429 | 102 | 0.614256 | from collections import defaultdict
from mbdata import models
from critiquebrainz.frontend.external.musicbrainz_db import mb_session, DEFAULT_CACHE_EXPIRATION
from critiquebrainz.frontend.external.musicbrainz_db.utils import get_entities_by_gids
from critiquebrainz.frontend.external.musicbrainz_db.includes import check_includes
from critiquebrainz.frontend.external.musicbrainz_db.serialize import to_dict_events
from critiquebrainz.frontend.external.musicbrainz_db.helpers import get_relationship_info
from brainzutils import cache
def get_event_by_id(mbid):
key = cache.gen_key(mbid)
event = cache.get(key)
if not event:
event = _get_event_by_id(mbid)
cache.set(key=key, val=event, time=DEFAULT_CACHE_EXPIRATION)
return event
def _get_event_by_id(mbid):
return fetch_multiple_events(
[mbid],
includes=['artist-rels', 'place-rels', 'series-rels', 'url-rels', 'release-group-rels'],
).get(mbid)
def fetch_multiple_events(mbids, *, includes=None):
if includes is None:
includes = []
includes_data = defaultdict(dict)
check_includes('event', includes)
with mb_session() as db:
query = db.query(models.Event)
events = get_entities_by_gids(
query=query,
entity_type='event',
mbids=mbids,
)
event_ids = [event.id for event in events.values()]
if 'artist-rels' in includes:
get_relationship_info(
db=db,
target_type='artist',
source_type='event',
source_entity_ids=event_ids,
includes_data=includes_data,
)
if 'place-rels' in includes:
get_relationship_info(
db=db,
target_type='place',
source_type='event',
source_entity_ids=event_ids,
includes_data=includes_data,
)
if 'series-rels' in includes:
get_relationship_info(
db=db,
target_type='series',
source_type='event',
source_entity_ids=event_ids,
includes_data=includes_data,
)
if 'url-rels' in includes:
get_relationship_info(
db=db,
target_type='url',
source_type='event',
source_entity_ids=event_ids,
includes_data=includes_data,
)
if 'release-group-rels' in includes:
get_relationship_info(
db=db,
target_type='release_group',
source_type='event',
source_entity_ids=event_ids,
includes_data=includes_data,
)
return {str(mbid): to_dict_events(events[mbid], includes_data[events[mbid].id]) for mbid in mbids}
| true | true |
1c345f209c6a268c29238a3acd4162bb45be0880 | 1,141 | py | Python | akamaiopen/cloudlets/matches/GeoMatches.py | lukaszczerpak/vpf-cli | fb572152fcce934cb4a1718a788b0b6402e83e83 | [
"MIT"
] | 2 | 2021-02-04T20:38:25.000Z | 2021-09-24T09:18:10.000Z | akamaiopen/cloudlets/matches/GeoMatches.py | lukaszczerpak/vpf-cli | fb572152fcce934cb4a1718a788b0b6402e83e83 | [
"MIT"
] | null | null | null | akamaiopen/cloudlets/matches/GeoMatches.py | lukaszczerpak/vpf-cli | fb572152fcce934cb4a1718a788b0b6402e83e83 | [
"MIT"
] | null | null | null | import abc
from enum import Enum
from akamaiopen.cloudlets.matches.Match import Match, MatchOperator
class CheckIPsType(Enum):
CONNECTING = 'CONNECTING_IP'
XFF = 'XFF_HEADERS'
BOTH = 'CONNECTING_IP XFF_HEADERS'
class GeoMatch(Match, metaclass=abc.ABCMeta):
def __init__(self, match_value=None, match_operator: MatchOperator = MatchOperator.EQUALS, negate=False, case_sensitive=False, checkips=CheckIPsType.BOTH):
super().__init__(match_value, match_operator, negate, case_sensitive)
self.checkips = checkips
def to_json(self):
o = super().to_json()
o['checkIPs'] = self.checkips.value
return o
def from_csv(self, value):
super().from_csv(value)
# region_info = record['region'].split(';', 1)
# region = region_info[0]
# checkips = CheckIPsType.BOTH if len(region_info) == 1 else CheckIPsType(region_info[1])
return self
class CountryCodeMatch(GeoMatch):
@staticmethod
def match_type():
return 'countrycode'
class RegionCodeMatch(GeoMatch):
@staticmethod
def match_type():
return 'regioncode'
| 26.534884 | 159 | 0.684487 | import abc
from enum import Enum
from akamaiopen.cloudlets.matches.Match import Match, MatchOperator
class CheckIPsType(Enum):
CONNECTING = 'CONNECTING_IP'
XFF = 'XFF_HEADERS'
BOTH = 'CONNECTING_IP XFF_HEADERS'
class GeoMatch(Match, metaclass=abc.ABCMeta):
def __init__(self, match_value=None, match_operator: MatchOperator = MatchOperator.EQUALS, negate=False, case_sensitive=False, checkips=CheckIPsType.BOTH):
super().__init__(match_value, match_operator, negate, case_sensitive)
self.checkips = checkips
def to_json(self):
o = super().to_json()
o['checkIPs'] = self.checkips.value
return o
def from_csv(self, value):
super().from_csv(value)
return self
class CountryCodeMatch(GeoMatch):
@staticmethod
def match_type():
return 'countrycode'
class RegionCodeMatch(GeoMatch):
@staticmethod
def match_type():
return 'regioncode'
| true | true |
1c345f360494b8f42d09ea0515866ef611bb87e4 | 3,860 | py | Python | data_layers/toi_reg_data_layer.py | ilikepistachio/TCNN_STCNN | 925939adfb009bee55add0a7ae9cf5db29c83871 | [
"MIT"
] | 33 | 2018-06-19T08:50:09.000Z | 2021-10-03T07:18:34.000Z | data_layers/toi_reg_data_layer.py | ilikepistachio/TCNN_STCNN | 925939adfb009bee55add0a7ae9cf5db29c83871 | [
"MIT"
] | 7 | 2018-07-20T06:31:39.000Z | 2020-06-30T03:39:52.000Z | data_layers/toi_reg_data_layer.py | ilikepistachio/TCNN_STCNN | 925939adfb009bee55add0a7ae9cf5db29c83871 | [
"MIT"
] | 14 | 2018-07-10T06:32:34.000Z | 2022-03-17T04:01:15.000Z | '''
The Caffe data layer for training label classifier.
This layer will parse pixel values and actionness labels to the network.
'''
import sys
sys.path.insert(0, '/home/rhou/caffe/python')
import caffe
from dataset.ucf_sports import UcfSports
import numpy as np
from utils.cython_bbox import bbox_overlaps
from utils.bbox_transform import bbox_transform
class RegDataLayer(caffe.Layer):
def setup(self, bottom, top):
self._batch_size = 1
self._depth = 8
self._height = 300
self._width = 400
self._num_anchors = 10
self.dataset = UcfSports('train', [self._height, self._width],
'/home/rhou/ucf_sports')
self.anchors = self.dataset.get_anchors()
def reshape(self, bottom, top):
# Clip data.
top[0].reshape(self._batch_size, 3, self._depth, self._height, self._width)
# Ground truth labels.
top[1].reshape(self._batch_size * 32, 40)
# Ground truth tois.
top[2].reshape(self._batch_size * 32, 5)
# Mask
top[3].reshape(self._batch_size * 32, 40)
# GT labels
#top[4].reshape(self._batch_size * 32)
def forward(self, bottom, top):
[clips, labels, tmp_bboxes, _] \
= self.dataset.next_batch(self._batch_size, self._depth)
batch_clip = clips.transpose((0, 4, 1, 2, 3))
batch_tois = np.empty((0, 5))
batch_targets = np.empty((0, 40))
batch_masks = np.empty((0, 40))
batch_labels = np.empty((0))
i = 0
for box in tmp_bboxes:
gt_bboxes = np.mean(box, axis=1) / 16
overlaps = bbox_overlaps(
np.ascontiguousarray(self.anchors, dtype=np.float),
np.ascontiguousarray(gt_bboxes, dtype=np.float))
max_overlaps = overlaps.max(axis=1)
gt_argmax_overlaps = overlaps.argmax(axis=0)
argmax_overlaps = overlaps.argmax(axis=1)
curr_labels = np.ones(self.anchors.shape[0]) * (-1)
curr_labels[max_overlaps < 0.3] = 0
curr_labels[max_overlaps >= 0.7] = labels[i]
curr_labels[gt_argmax_overlaps] = labels[i]
fg_inds = np.where(curr_labels > 0)[0]
num_fg = len(fg_inds)
if len(fg_inds) > 16:
fg_inds = np.random.choice(fg_inds, size=(16))
num_fg = 16
bg_inds = np.where(curr_labels == 0)[0]
num_bg = num_fg
bg_inds = np.random.choice(bg_inds, size=(num_bg))
inds = np.hstack((fg_inds, bg_inds))
curr_bboxes = np.hstack((np.ones((len(inds), 1)) * i,
self.anchors[inds]))
[curr_targets, masks] = _map(curr_labels[fg_inds], self.anchors[fg_inds],
gt_bboxes[argmax_overlaps[fg_inds]],
10, len(inds))
batch_tois = np.vstack((batch_tois, curr_bboxes))
batch_targets = np.vstack((batch_targets, curr_targets))
batch_masks = np.vstack((batch_masks, masks))
batch_labels = np.hstack((batch_labels, curr_labels[inds]))
i += 1
top[1].reshape(*batch_targets.shape)
top[2].reshape(*batch_tois.shape)
top[3].reshape(*batch_masks.shape)
#top[4].reshape(*batch_labels.shape)
top[0].data[...] = batch_clip.astype(np.float32, copy=False)
top[1].data[...] = batch_targets.astype(np.float32, copy=False)
top[2].data[...] = batch_tois.astype(np.float32, copy=False)
top[3].data[...] = batch_masks.astype(np.float32, copy=False)
#top[4].data[...] = batch_labels.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def _map(label, target, gt_bbox, l, n):
diff = bbox_transform(target, gt_bbox)
r_diff = np.zeros((n, l * 4))
mask = np.zeros((n, l * 4))
for i in xrange(len(label)):
curr_label = int(label[i] - 1)
r_diff[i, curr_label * 4 : curr_label * 4 + 4] = diff[i]
mask[i, curr_label * 4 : curr_label * 4 + 4] = 1
return r_diff, mask | 35.740741 | 79 | 0.637824 | import sys
sys.path.insert(0, '/home/rhou/caffe/python')
import caffe
from dataset.ucf_sports import UcfSports
import numpy as np
from utils.cython_bbox import bbox_overlaps
from utils.bbox_transform import bbox_transform
class RegDataLayer(caffe.Layer):
def setup(self, bottom, top):
self._batch_size = 1
self._depth = 8
self._height = 300
self._width = 400
self._num_anchors = 10
self.dataset = UcfSports('train', [self._height, self._width],
'/home/rhou/ucf_sports')
self.anchors = self.dataset.get_anchors()
def reshape(self, bottom, top):
top[0].reshape(self._batch_size, 3, self._depth, self._height, self._width)
top[1].reshape(self._batch_size * 32, 40)
top[2].reshape(self._batch_size * 32, 5)
top[3].reshape(self._batch_size * 32, 40)
def forward(self, bottom, top):
[clips, labels, tmp_bboxes, _] \
= self.dataset.next_batch(self._batch_size, self._depth)
batch_clip = clips.transpose((0, 4, 1, 2, 3))
batch_tois = np.empty((0, 5))
batch_targets = np.empty((0, 40))
batch_masks = np.empty((0, 40))
batch_labels = np.empty((0))
i = 0
for box in tmp_bboxes:
gt_bboxes = np.mean(box, axis=1) / 16
overlaps = bbox_overlaps(
np.ascontiguousarray(self.anchors, dtype=np.float),
np.ascontiguousarray(gt_bboxes, dtype=np.float))
max_overlaps = overlaps.max(axis=1)
gt_argmax_overlaps = overlaps.argmax(axis=0)
argmax_overlaps = overlaps.argmax(axis=1)
curr_labels = np.ones(self.anchors.shape[0]) * (-1)
curr_labels[max_overlaps < 0.3] = 0
curr_labels[max_overlaps >= 0.7] = labels[i]
curr_labels[gt_argmax_overlaps] = labels[i]
fg_inds = np.where(curr_labels > 0)[0]
num_fg = len(fg_inds)
if len(fg_inds) > 16:
fg_inds = np.random.choice(fg_inds, size=(16))
num_fg = 16
bg_inds = np.where(curr_labels == 0)[0]
num_bg = num_fg
bg_inds = np.random.choice(bg_inds, size=(num_bg))
inds = np.hstack((fg_inds, bg_inds))
curr_bboxes = np.hstack((np.ones((len(inds), 1)) * i,
self.anchors[inds]))
[curr_targets, masks] = _map(curr_labels[fg_inds], self.anchors[fg_inds],
gt_bboxes[argmax_overlaps[fg_inds]],
10, len(inds))
batch_tois = np.vstack((batch_tois, curr_bboxes))
batch_targets = np.vstack((batch_targets, curr_targets))
batch_masks = np.vstack((batch_masks, masks))
batch_labels = np.hstack((batch_labels, curr_labels[inds]))
i += 1
top[1].reshape(*batch_targets.shape)
top[2].reshape(*batch_tois.shape)
top[3].reshape(*batch_masks.shape)
top[0].data[...] = batch_clip.astype(np.float32, copy=False)
top[1].data[...] = batch_targets.astype(np.float32, copy=False)
top[2].data[...] = batch_tois.astype(np.float32, copy=False)
top[3].data[...] = batch_masks.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
pass
def _map(label, target, gt_bbox, l, n):
diff = bbox_transform(target, gt_bbox)
r_diff = np.zeros((n, l * 4))
mask = np.zeros((n, l * 4))
for i in xrange(len(label)):
curr_label = int(label[i] - 1)
r_diff[i, curr_label * 4 : curr_label * 4 + 4] = diff[i]
mask[i, curr_label * 4 : curr_label * 4 + 4] = 1
return r_diff, mask | true | true |
1c345f40e98a5bcddc9dc85d980806bc458085e0 | 5,428 | py | Python | setup.py | hellvix/django-ses | 90ec1147ed1ef7bbf5e29f4b20775768d1b270ec | [
"MIT"
] | 522 | 2015-05-06T00:43:47.000Z | 2022-03-31T21:22:02.000Z | setup.py | hellvix/django-ses | 90ec1147ed1ef7bbf5e29f4b20775768d1b270ec | [
"MIT"
] | 143 | 2015-04-24T14:02:18.000Z | 2022-03-31T23:58:26.000Z | setup.py | hellvix/django-ses | 90ec1147ed1ef7bbf5e29f4b20775768d1b270ec | [
"MIT"
] | 160 | 2015-04-30T20:37:21.000Z | 2022-03-20T03:11:44.000Z | import ast
import os
import re
import sys
from fnmatch import fnmatchcase
from distutils.util import convert_path
from setuptools import setup, find_packages
def read(*path):
return open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
*path)).read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ["*.py", "*.pyc", "*~", ".*", "*.bak"]
standard_exclude_directories = [
".*", "CVS", "_darcs", "./build",
"./dist", "EGG-INFO", "*.egg-info"
]
# Copied from paste/util/finddata.py
def find_package_data(where=".", package="", exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True, show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{"package": [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), "", package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern) or
fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
sys.stderr.write("Directory %s ignored by pattern %s" % (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, "__init__.py")) \
and not prefix:
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
stack.append((fn, prefix + name + "/", package,
only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern) or
fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
sys.stderr.write("File %s ignored by pattern %s" % (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
excluded_directories = standard_exclude_directories + ["example", "tests"]
package_data = find_package_data(exclude_directories=excluded_directories)
DESCRIPTION = "A Django email backend for Amazon's Simple Email Service"
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except Exception:
pass
# Parse version
_version_re = re.compile(r"VERSION\s+=\s+(.*)")
with open("django_ses/__init__.py", "rb") as f:
version = ".".join(
map(str, ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1)))
)
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Django',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Framework :: Django :: 3.2',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
setup(
name='django-ses',
version=version,
packages=find_packages(exclude=['example', 'tests']),
package_data=package_data,
python_requires='>=3.5',
author='Harry Marr',
author_email='harry@hmarr.com',
url='https://github.com/django-ses/django-ses',
license='MIT',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=CLASSIFIERS,
install_requires=["boto3>=1.0.0", "pytz>=2016.10", "future>=0.16.0", "django>=2.2"],
include_package_data=True,
extras_require={
'bounce': ['requests<3', 'M2Crypto'],
'events': ['requests<3', 'M2Crypto'],
},
)
| 34.138365 | 98 | 0.577561 | import ast
import os
import re
import sys
from fnmatch import fnmatchcase
from distutils.util import convert_path
from setuptools import setup, find_packages
def read(*path):
return open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
*path)).read()
standard_exclude = ["*.py", "*.pyc", "*~", ".*", "*.bak"]
standard_exclude_directories = [
".*", "CVS", "_darcs", "./build",
"./dist", "EGG-INFO", "*.egg-info"
]
def find_package_data(where=".", package="", exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True, show_ignored=False):
out = {}
stack = [(convert_path(where), "", package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern) or
fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
sys.stderr.write("Directory %s ignored by pattern %s" % (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, "__init__.py")) \
and not prefix:
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
stack.append((fn, prefix + name + "/", package,
only_in_packages))
elif package or not only_in_packages:
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern) or
fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
sys.stderr.write("File %s ignored by pattern %s" % (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
excluded_directories = standard_exclude_directories + ["example", "tests"]
package_data = find_package_data(exclude_directories=excluded_directories)
DESCRIPTION = "A Django email backend for Amazon's Simple Email Service"
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except Exception:
pass
# Parse version
_version_re = re.compile(r"VERSION\s+=\s+(.*)")
with open("django_ses/__init__.py", "rb") as f:
version = ".".join(
map(str, ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1)))
)
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Django',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Framework :: Django :: 3.2',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
setup(
name='django-ses',
version=version,
packages=find_packages(exclude=['example', 'tests']),
package_data=package_data,
python_requires='>=3.5',
author='Harry Marr',
author_email='harry@hmarr.com',
url='https://github.com/django-ses/django-ses',
license='MIT',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=CLASSIFIERS,
install_requires=["boto3>=1.0.0", "pytz>=2016.10", "future>=0.16.0", "django>=2.2"],
include_package_data=True,
extras_require={
'bounce': ['requests<3', 'M2Crypto'],
'events': ['requests<3', 'M2Crypto'],
},
)
| true | true |
1c34606f23133870724c0dd1f7e3327f5acd55d9 | 956 | py | Python | NFL_Projects/app.py | lpdabest1/NFL_Scraper_App | a396a5071257e43bca6f681f393f249a05f4bb1f | [
"MIT"
] | null | null | null | NFL_Projects/app.py | lpdabest1/NFL_Scraper_App | a396a5071257e43bca6f681f393f249a05f4bb1f | [
"MIT"
] | 1 | 2022-02-10T03:42:23.000Z | 2022-02-10T03:42:23.000Z | NFL_Projects/app.py | lpdabest1/NFL_Scraper_App | a396a5071257e43bca6f681f393f249a05f4bb1f | [
"MIT"
] | 1 | 2021-09-27T20:37:27.000Z | 2021-09-27T20:37:27.000Z | import Passing_Stats
import Rushing_Stats
import Receiving_Stats
import Defensive_Player_Stats
import Kicking_Stats
import Kick_Returning_Stats
import Scoring_Stats
import Fantasy_Stats
import streamlit as st
import streamlit.components.v1 as components
st.set_page_config(
page_title="NFL Web Scraper App",
layout="centered",
initial_sidebar_state="expanded",
)
Pages = {
"Passing Stats": Passing_Stats,
"Rushing Stats": Rushing_Stats,
"Receiving Stats": Receiving_Stats,
"Defensive Stats": Defensive_Player_Stats,
"Kicking Stats": Kicking_Stats,
"Kick/Punt Returner Stats": Kick_Returning_Stats,
"Player Scoring Stats": Scoring_Stats,
"Fantasy Stats": Fantasy_Stats
}
st.title('Pro Football Web Scraper')
st.sidebar.title('Pro Football Statistics')
selection = st.sidebar.selectbox("Select One Of The Following Individual Categories",list(Pages.keys()))
page = Pages[selection]
if page:
page.app()
| 23.9 | 104 | 0.766736 | import Passing_Stats
import Rushing_Stats
import Receiving_Stats
import Defensive_Player_Stats
import Kicking_Stats
import Kick_Returning_Stats
import Scoring_Stats
import Fantasy_Stats
import streamlit as st
import streamlit.components.v1 as components
st.set_page_config(
page_title="NFL Web Scraper App",
layout="centered",
initial_sidebar_state="expanded",
)
Pages = {
"Passing Stats": Passing_Stats,
"Rushing Stats": Rushing_Stats,
"Receiving Stats": Receiving_Stats,
"Defensive Stats": Defensive_Player_Stats,
"Kicking Stats": Kicking_Stats,
"Kick/Punt Returner Stats": Kick_Returning_Stats,
"Player Scoring Stats": Scoring_Stats,
"Fantasy Stats": Fantasy_Stats
}
st.title('Pro Football Web Scraper')
st.sidebar.title('Pro Football Statistics')
selection = st.sidebar.selectbox("Select One Of The Following Individual Categories",list(Pages.keys()))
page = Pages[selection]
if page:
page.app()
| true | true |
1c3462783ef0c7362128a92c7bda56697b39a2e8 | 722 | py | Python | src/genie/libs/parser/bigip/get_ltm_data_groupinternal.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/bigip/get_ltm_data_groupinternal.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/bigip/get_ltm_data_groupinternal.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | # Global Imports
import json
from collections import defaultdict
# Metaparser
from genie.metaparser import MetaParser
# =============================================
# Collection for '/mgmt/tm/ltm/data-group/internal' resources
# =============================================
class LtmDatagroupInternalSchema(MetaParser):
schema = {}
class LtmDatagroupInternal(LtmDatagroupInternalSchema):
""" To F5 resource for /mgmt/tm/ltm/data-group/internal
"""
cli_command = "/mgmt/tm/ltm/data-group/internal"
def rest(self):
response = self.device.get(self.cli_command)
response_json = response.json()
if not response_json:
return {}
return response_json
| 21.235294 | 61 | 0.608033 |
import json
from collections import defaultdict
from genie.metaparser import MetaParser
class LtmDatagroupInternalSchema(MetaParser):
schema = {}
class LtmDatagroupInternal(LtmDatagroupInternalSchema):
cli_command = "/mgmt/tm/ltm/data-group/internal"
def rest(self):
response = self.device.get(self.cli_command)
response_json = response.json()
if not response_json:
return {}
return response_json
| true | true |
1c3463602eca234c2e4b004621182211828f78ee | 948,897 | py | Python | test/test_nn.py | VincentLeeMax/pytorch | 5e6f29661254e5ebc97fac3d829e5d455cda5864 | [
"Intel"
] | 1 | 2022-02-05T18:15:29.000Z | 2022-02-05T18:15:29.000Z | test/test_nn.py | VincentLeeMax/pytorch | 5e6f29661254e5ebc97fac3d829e5d455cda5864 | [
"Intel"
] | 1 | 2022-02-03T12:43:23.000Z | 2022-02-03T12:47:53.000Z | test/test_nn.py | VincentLeeMax/pytorch | 5e6f29661254e5ebc97fac3d829e5d455cda5864 | [
"Intel"
] | null | null | null | # Owner(s): ["module: nn"]
import math
import random
import string
import unittest
import io
import unittest.mock as mock
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import repeat, product
from functools import reduce, partial
from operator import mul
from collections import OrderedDict
import torch
# TODO: remove this global setting
# NN tests use double as the default dtype
torch.set_default_dtype(torch.double)
from torch._six import inf, nan
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
import torch.nn.utils.parametrize as parametrize
import torch.nn.utils.prune as prune
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn import Parameter
from torch.nn.parameter import UninitializedParameter, UninitializedBuffer
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_fp_dtypes, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
skipIfRocmVersionLessThan, skipIfNotMiopenSuggestNHWC, TEST_NUMPY, TEST_SCIPY, TEST_WITH_ROCM, download_file, \
get_function_arglist, load_tests, \
suppress_warnings, TemporaryFileName, TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, \
ctcloss_reference, new_module_tests, single_batch_reference_fn
from torch.testing._internal.common_device_type import expectedFailureXLA, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfNoCudnn, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, skipCUDAIfRocmVersionLessThan, skipCUDAIfNotMiopenSuggestNHWC, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types, \
disableMkldnn, skipCPUIfNoMkldnn, disablecuDNN, skipCUDAIfMiopen, skipCUDAIfNoMiopen
from torch.nn import MultiheadAttention
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if TEST_SCIPY:
from scipy import stats
import scipy.ndimage
if TEST_NUMPY:
import numpy as np
# WARNING: If you add a new top-level test case to this file, you MUST
# update test/run_test.py to list it, otherwise it will NOT be run in
# CI.
class PackedSequenceTest(TestCase):
_type_by_name = {
'torch.DoubleTensor': (torch.DoubleTensor, 'double'),
'torch.FloatTensor': (torch.FloatTensor, 'float'),
# We leave out `'torch.HalfTensor': (torch.HalfTensor, 'half'),`
# because of an error in `pad_packed_sequence`
# > AttributeError: 'torch.HalfTensor' object has no attribute 'fill_'
'torch.LongTensor': (torch.LongTensor, 'long'),
'torch.IntTensor': (torch.IntTensor, 'int'),
'torch.ShortTensor': (torch.ShortTensor, 'short'),
'torch.CharTensor': (torch.CharTensor, 'char'),
'torch.ByteTensor': (torch.ByteTensor, 'byte'),
}
def __init__(self, *args, **kwargs):
super(PackedSequenceTest, self).__init__(*args, **kwargs)
self.batch_size = 5
self.max_length = 6
def _ordered_sequence(self, tensor_type):
"""Create ordered list of random sequences"""
seqs = [tensor_type(random.randint(1, self.max_length))
for _ in range(self.batch_size)]
if tensor_type == torch.ByteTensor:
seqs = [s.random_(0, 256) for s in seqs]
else:
seqs = [s.random_(-128, 128) for s in seqs]
ordered = sorted(seqs, key=len, reverse=True)
return ordered
def _padded_sequence(self, tensor_type):
"""Create Tensor of random padded sequences"""
ordered = self._ordered_sequence(tensor_type)
lengths = [len(i) for i in ordered]
padded_tensor = rnn_utils.pad_sequence(ordered)
return padded_tensor, lengths
def test_type_casts(self):
"""Test type casting of `PackedSequence` against type casting of tensor"""
for _, (input_type, _) in self._type_by_name.items():
for expected_type_str, (_, cast_str) in self._type_by_name.items():
for enforce_sorted in [True, False]:
padded, lengths = self._padded_sequence(input_type)
packed = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted)
# Apply cast to `PackedSequence` instance and unpack
masked = getattr(packed, cast_str)()
unpacked, lengths_out = rnn_utils.pad_packed_sequence(masked)
self.assertEqual(unpacked.type(), expected_type_str)
def test_wrong_order(self):
a = torch.ones(25, 300)
b = torch.ones(22, 300)
b_a = rnn_utils.pad_sequence([b, a])
self.assertRaises(
RuntimeError,
lambda: rnn_utils.pack_padded_sequence(b_a, [22, 25], enforce_sorted=True))
def test_total_length(self):
padded, lengths = self._padded_sequence(torch.FloatTensor)
max_length = max(lengths)
packed = rnn_utils.pack_padded_sequence(padded, lengths)
# test ValueError if total_length < max_length
for total_length in (-1, 0, max_length - 1):
for batch_first in (True, False):
def err_fn():
rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,
total_length=total_length)
self.assertRaisesRegex(ValueError,
r'Expected total_length to be at least the '
r'length of the longest sequence in input',
err_fn)
# test that pad_packed_sequence returns results of correct length
for batch_first in (True, False):
no_extra_pad, _ = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)
for total_length_delta in (0, 1, 8):
total_length = max_length + total_length_delta
unpacked, lengths_out = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,
total_length=total_length)
self.assertEqual(lengths, lengths_out)
self.assertEqual(unpacked.size(1 if batch_first else 0), total_length)
if total_length_delta == 0:
ref_output = no_extra_pad
elif batch_first:
extra_pad = no_extra_pad.new_zeros(self.batch_size, total_length_delta)
ref_output = torch.cat([no_extra_pad, extra_pad], 1)
else:
extra_pad = no_extra_pad.new_zeros(total_length_delta, self.batch_size)
ref_output = torch.cat([no_extra_pad, extra_pad], 0)
self.assertEqual(unpacked, ref_output)
def test_to(self):
for enforce_sorted in (True, False):
padded, lengths = self._padded_sequence(torch.IntTensor)
a = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted).cpu()
self.assertIs(a, a.to('cpu'))
self.assertIs(a, a.cpu())
self.assertIs(a, a.to('cpu', dtype=torch.int32))
self.assertEqual(a.long(), a.to(torch.int64))
if torch.cuda.is_available():
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = a.cuda(device=cuda)
self.assertIs(b, b.to(cuda))
self.assertIs(b, b.cuda())
self.assertEqual(a, b.to('cpu'))
self.assertEqual(b, a.to(cuda))
self.assertEqual(a, b.to('cpu', dtype=torch.int32))
self.assertIs(b, b.to(dtype=torch.int32))
self.assertEqual(b.long(), b.to(dtype=torch.int64))
def test_to_memory_format(self):
m = torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=2, bias=True)
m = m.to(memory_format=torch.channels_last)
for param in m.parameters():
if param.dim() == 4:
self.assertTrue(param.is_contiguous(memory_format=torch.channels_last))
class TestAvgPool(TestCase):
def _sum_pool2d(self, x, kernel_size):
windows = torch.nn.functional.unfold(x, kernel_size=kernel_size, stride=kernel_size)
return torch.sum(windows, dim=1)
def _sum_pool3d(self, x, kernel_size):
# Because unfold does not support 3D sliding window we will split tensor to multiple tensors and calculate sum
h = kernel_size[0]
splited_x = [t.sum(0) for t in x.split(h) if t.size(0) == h]
# sum_pool2d assumes tensor in (1, 1, n, m) view, so unsqueeze two times
splited_x = [self._sum_pool2d(t.unsqueeze(0).unsqueeze(0), kernel_size[1:]) for t in splited_x]
joined_x = torch.cat(splited_x)
return joined_x.view(1, joined_x.numel())
def _avg_pool2d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool2d(x, kernel_size) / size
def _avg_pool3d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool3d(x, kernel_size) / size
def test_doubletensor_avg_pool2d(self):
n, m = 5, 8
input = torch.rand(1, 1, n, m)
for i in range(1, n + 1):
for j in range(1, m + 1):
actual = torch.nn.functional.avg_pool2d(input[0], (i, j))
actual = actual.view(1, actual.numel())
expected = self._avg_pool2d(input, (i, j))
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_avg_pool2d_with_zero_divisor(self):
self.assertRaisesRegex(RuntimeError, "divisor must be not zero",
lambda: F.avg_pool2d(torch.zeros(3, 3, 3), (2, 2), divisor_override=0))
def test_doubletensor_avg_pool2d_with_divisor(self):
n, m = 3, 3
input = torch.rand(1, 1, n, m)
for i in range(1, n + 1):
for j in range(1, m + 1):
for divisor in [1, 7, i * j]:
actual = F.avg_pool2d(input[0], (i, j), divisor_override=divisor)
actual = actual.view(1, actual.numel())
expected = self._sum_pool2d(input, (i, j)) / divisor
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_doubletensor_avg_pool3d(self):
h, w, d = 5, 6, 7
input = torch.rand(h, w, d)
for i in range(1, h + 1):
for j in range(1, w + 1):
for k in range(1, d + 1):
actual = torch.nn.functional.avg_pool3d(input.unsqueeze(0), (i, j, k))
actual = actual.view(1, actual.numel())
expected = self._avg_pool3d(input, (i, j, k))
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_doubletensor_avg_pool3d_with_divisor(self):
h, w, d = 6, 5, 7
input = torch.rand(h, w, d)
for i in range(1, h + 1):
for j in range(1, w + 1):
for k in range(1, d + 1):
for divisor in [1, 7, i * j]:
actual = torch.nn.functional.avg_pool3d(input.unsqueeze(0), (i, j, k), divisor_override=divisor)
actual = actual.view(1, actual.numel())
expected = self._sum_pool3d(input, (i, j, k)) / divisor
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_avg_pool3d_with_zero_divisor(self):
self.assertRaisesRegex(RuntimeError, "divisor must be not zero",
lambda: F.avg_pool3d(torch.zeros(3, 3, 3, 3), (2, 2, 2), divisor_override=0))
def test_avg_pool1d_ceil_mode(self):
# Regression test for gh-36977
x = 10 * torch.randn((1, 16, 4))
y = torch.nn.functional.avg_pool1d(
x, ceil_mode=True, count_include_pad=True, kernel_size=1, stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool1d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=1, stride=2)
self.assertTrue(not torch.isnan(y).any())
def test_avg_pool2d_ceil_mode(self):
# Regression test for gh-36977
x = 10 * torch.randn((1, 16, 4, 4))
y = torch.nn.functional.avg_pool2d(
x, ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),
padding=(0, 1), stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool2d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),
padding=(0, 1), stride=2)
self.assertTrue(not torch.isnan(y).any())
def test_avg_pool3d_ceil_mode(self):
# Regression test for gh-36977
x = 10 * torch.randn((1, 16, 4, 4, 4))
y = torch.nn.functional.avg_pool3d(
x, ceil_mode=True, count_include_pad=True, kernel_size=(1, 2, 3), stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool3d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=(1, 2, 3), stride=2)
self.assertTrue(not torch.isnan(y).any())
class TestNN(NNTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
def _forward(self, module, input: _TensorOrTensors):
with freeze_rng_state():
if isinstance(input, tuple):
return module(*input)
else:
return module(input)
def _backward(self, module, input: _TensorOrTensors, output, grad_output, create_graph=False):
output.backward(grad_output, retain_graph=True, create_graph=create_graph)
if isinstance(input, tuple):
return tuple(i.grad.data if i.grad is not None else None for i in input)
else:
return input.grad.data if input.grad is not None else None
def _forward_criterion(self, criterion, input, target, extra_args=None):
if extra_args is None:
extra_args = tuple()
if isinstance(input, tuple):
args = input + (target,) + extra_args
output = criterion(*args)
else:
output = criterion(input, target, *extra_args)
return output
def _backward_criterion(self, criterion, input, output, target, gradOutput=None, extra_args=None):
if extra_args is None:
extra_args = tuple()
input_tuple = input if isinstance(input, tuple) else (input,)
output_tuple = output if isinstance(output, tuple) else (output,)
for i in input_tuple:
if i.grad is not None:
i.grad.data.zero_()
args = input_tuple + (target,) + extra_args
if gradOutput is None:
gradOutput = torch.ones(())
criterion(*args).backward(gradOutput.to(output_tuple[0]))
if isinstance(input, tuple):
return tuple(i.grad.data for i in input)
else:
return input.grad.data
def _zero_grad_parameters(self, module):
for p in module.parameters():
if p.grad is not None:
with torch.no_grad():
p.grad.zero_()
p.grad.detach_()
def _get_parameters(self, module):
params = []
d_params = []
for p in module.parameters():
params.append(p)
d_params.append(p.grad)
return params, d_params
def _create_basic_net(self):
class Layer(nn.Module):
def __init__(self):
super(Layer, self).__init__()
self.layer_dummy_param = Parameter(torch.empty(3, 5))
self.register_buffer('layer_dummy_buf', torch.zeros(1, 3, 3, 7))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = Layer()
self.dummy_param = Parameter(torch.empty(3, 5))
self.register_buffer('dummy_buf', torch.zeros(7, 3, 3, 1))
l = Layer()
n = Net()
s = nn.Sequential(n, n)
return l, n, s
def test_requires_grad_(self):
m = self._create_basic_net()[-1]
assert len(list(m.buffers())) > 0, 'invalid test'
assert all(not b.requires_grad for b in m.buffers()) > 0, 'invalid test'
assert len(list(m.parameters())) > 0, 'invalid test'
assert all(p.requires_grad for p in m.parameters()) > 0, 'invalid test'
for requires_grad in (False, True):
self.assertIs(m.requires_grad_(requires_grad), m)
for p in m.parameters():
self.assertEqual(p.requires_grad, requires_grad)
for b in m.buffers():
self.assertFalse(b.requires_grad)
def test_module_backcompat(self):
from torch.serialization import SourceChangeWarning
path = download_file('https://download.pytorch.org/test_data/linear.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
m = torch.load(path)
input = torch.randn(2, 3, dtype=torch.float)
self.assertEqual(m(input).size(), (2, 5))
def test_conv_backcompat(self):
from torch.serialization import SourceChangeWarning
# This file was generated by running on PyTorch 1.0.1 on Python 2:
#
# import torch
# from torch import nn
# m = nn.Conv2d(1, 1, 1)
# torch.save(m, 'legacy_conv2d.pt')
#
# NB: This Pickle also contains some Unicode data!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
m = torch.load(path, encoding='utf-8')
input = torch.randn((1, 1, 1, 1), dtype=torch.float)
self.assertEqual(m(input).size(), (1, 1, 1, 1))
def test_share_memory(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.p = nn.Parameter(torch.eye(5))
self.par = nn.ParameterList()
self.par.append(nn.Parameter(torch.randn(10)))
def forward(self, inp):
# NB: dead code
return inp.clone()
net = Net()
for p in net.parameters():
self.assertFalse(p.storage().is_shared())
for b in net.buffers():
self.assertFalse(b.storage().is_shared())
net.share_memory()
for p in net.parameters():
self.assertTrue(p.storage().is_shared())
for b in net.buffers():
self.assertTrue(b.storage().is_shared())
def _test_hooks(self, backward_register_fn):
module = nn.Sigmoid()
input = torch.ones(5, 5, requires_grad=True)
counter = {
'forwards': 0,
'backwards': 0
}
def fw_hook(inc, h_module, input, output):
self.assertIsInstance(input, tuple)
self.assertTrue(isinstance(output, torch.Tensor))
self.assertTrue(h_module is module)
self.assertEqual(input[0], torch.ones(5, 5))
self.assertEqual(output, torch.empty(5, 5).fill_(1 / (1 + 1 / math.e)))
counter['forwards'] += inc
def bw_hook(inc, h_module, grad_input, grad_output):
self.assertIsInstance(grad_input, tuple)
self.assertIsInstance(grad_output, tuple)
self.assertTrue(h_module is module)
self.assertEqual(grad_output[0], torch.ones(5, 5) * 2)
counter['backwards'] += inc
test_fwd = module.register_forward_hook(lambda *args: fw_hook(1, *args))
module(input)
module(input)
self.assertEqual(counter['forwards'], 2)
self.assertEqual(counter['backwards'], 0)
test_bwd = getattr(module, backward_register_fn)(
lambda *args: bw_hook(1, *args))
output = module(input)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 0)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 1)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 2)
test2_fwd = module.register_forward_hook(lambda *args: fw_hook(2, *args))
output = module(input)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 2)
test2_bwd = getattr(module, backward_register_fn)(lambda *args: bw_hook(2, *args))
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 9)
self.assertEqual(counter['backwards'], 5)
test2_bwd.remove()
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 12)
self.assertEqual(counter['backwards'], 6)
test2_fwd.remove()
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 13)
self.assertEqual(counter['backwards'], 7)
test_fwd.remove()
test_bwd.remove()
def test_hooks(self):
self._test_hooks("register_backward_hook")
self._test_hooks("register_full_backward_hook")
def test_hook_cpp(self):
bn = nn.BatchNorm1d(5)
def hook(module, grad_inputs, grad_outputs):
self.assertEqual(len(grad_inputs), 1)
self.assertEqual(len(grad_outputs), 1)
self.assertEqual(module, bn)
bn.register_full_backward_hook(hook)
output = bn(torch.randn(5, 5, requires_grad=True))
output.sum().backward()
def test_hook_invalid_outputs(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
def bw_fail1(self, grad_input, grad_output):
return grad_input[:-1]
def bw_fail2(self, grad_input, grad_output):
return grad_input + (torch.randn(2, 2),)
with module.register_backward_hook(bw_fail1):
with self.assertRaisesRegex(RuntimeError, 'got 0, but expected 1'):
module(input).sum().backward()
with module.register_backward_hook(bw_fail2):
with self.assertRaisesRegex(RuntimeError, 'got 2, but expected 1'):
module(input).sum().backward()
def test_hook_requires_grad(self):
test_self = self
class MyModule(nn.Module):
def forward(self, arg1, arg2, arg3):
test_self.assertTrue(arg1.requires_grad)
test_self.assertFalse(arg2.requires_grad)
test_self.assertTrue(arg3.requires_grad)
return arg1.sum() + arg2.sum() + arg3.sum()
inp = torch.rand(2, requires_grad=True)
mod = MyModule()
mod(inp, inp.detach(), inp)
# Ensure that requires grad is properly propagated
mod.register_full_backward_hook(lambda mod, gI, gO: None)
mod(inp, inp.detach(), inp)
def test_hook_no_requires_grad(self):
mod = nn.Linear(2, 3)
inp = torch.rand(1, 2)
return_val = "None"
hook_called = [0]
def hook(mod, grad_input, grad_output):
hook_called[0] += 1
for gI in grad_input:
self.assertIsNone(gI)
for gO in grad_output:
self.assertEqual(gO.size(), (1, 3))
if return_val == "grad_input":
return grad_input
elif return_val == "invalid":
# If the inputs were requiring gradients, this would be
# a valid return
return inp
elif return_val == "None":
return None
else:
raise RuntimeError("Invalid return_val string")
mod.register_full_backward_hook(hook)
# This should run and trigger the hook properly
mod(inp).sum().backward()
self.assertEqual(hook_called[0], 1)
return_val = "grad_input"
mod(inp).sum().backward()
self.assertEqual(hook_called[0], 2)
return_val = "invalid"
with self.assertRaisesRegex(RuntimeError, "where no input requires gradient"):
mod(inp).sum().backward()
def test_hook_last_arg_requires_grad(self):
mod = nn.L1Loss()
inp = torch.rand(1, requires_grad=True)
mod.register_full_backward_hook(lambda m, gI, gO: None)
try:
mod(inp.detach(), inp)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
def test_hook_extra_input(self):
class MyModule(nn.Module):
def forward(self, non_tensor, tensor):
return tensor.clone(), non_tensor
inp = torch.rand(2, requires_grad=True)
mod = MyModule()
def hook(mod, grad_input, grad_output):
self.assertIsNone(grad_input[0])
self.assertIsInstance(grad_input[1], torch.Tensor)
self.assertIsInstance(grad_output[0], torch.Tensor)
self.assertIsNone(grad_output[1])
mod.register_full_backward_hook(hook)
out, _ = mod(True, inp)
out.sum().backward()
def test_hook_inplace(self):
class MyModule(nn.Module):
def forward(self, inp, do_inplace):
self.inp = inp
if do_inplace:
inp += 1
return inp.clone()
hook_called = [0]
def hook(mod, grad_input, grad_output):
hook_called[0] += 1
inp = torch.rand(10, requires_grad=True)
mod = MyModule()
mod.register_full_backward_hook(hook)
# No inplace should work
mod(inp, False).sum().backward()
self.assertEqual(hook_called[0], 1)
# Input inplace error should throw an error
with self.assertRaisesRegex(RuntimeError, "Output 0 of BackwardHookFunctionBackward is "
"a view and is being modified inplace."):
mod(inp.clone(), True)
# Input inplace error should throw an error if we try to re-use the view after they have
# been modified
local_inp = inp.clone()
out = mod(local_inp, False)
local_inp[0] *= 1
with self.assertRaisesRegex(RuntimeError, "Output 0 of BackwardHookFunctionBackward is "
"a view and its base or another view"):
# Any operation involving the view will fail here
mod.inp + 2
# Output inplace error should throw an error
out = mod(inp, False)
with self.assertRaisesRegex(RuntimeError, "BackwardHookFunctionBackward is a view "
"and is being modified inplace."):
out += 1
def test_hook_non_full_warning(self):
def noop(*args):
pass
a = torch.rand(2, requires_grad=True)
b = torch.rand(2, requires_grad=True)
# Check invalid input container
class MyModule(nn.Module):
def forward(self, l):
return l[0].clone(), l[1].clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "does not take as input a single Tensor or a tuple of Tensors"):
m([a, b])
# Check invalid output container
class MyModule(nn.Module):
def forward(self, a, b):
return [a.clone(), b.clone()]
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "does not return a single Tensor or a tuple of Tensors"):
m(a, b)
# Check invalid output from different Nodes
class MyModule(nn.Module):
def forward(self, a, b):
return a.clone(), b.clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "outputs are generated by different autograd Nodes"):
m(a, b)
# Check invalid forward with multiple Nodes
class MyModule(nn.Module):
def forward(self, a):
return a.clone().clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "the forward contains multiple autograd Nodes"):
m(a)
def test_hook_backward_size(self):
# Make module with multiple operations in forward
# And different size for input and outputs
class MyModule(nn.Module):
def forward(self, arg1, arg2):
tmp = arg1.sum() * arg2
tmp = tmp + arg2.sum() * arg1.sum()
tmp = tmp.sum().view(1)
tmp = tmp.expand(8).contiguous()
return tmp
module = MyModule()
inp1 = torch.randn(5, 5, requires_grad=True)
inp2 = torch.randn(10, 10, requires_grad=True)
def bw_hook(module, grad_input, grad_output):
self.assertEqual(len(grad_input), 2)
self.assertEqual(grad_input[0].size(), torch.Size([5, 5]))
self.assertEqual(grad_input[1].size(), torch.Size([10, 10]))
self.assertEqual(len(grad_output), 1)
self.assertEqual(grad_output[0].size(), torch.Size([8]))
with module.register_full_backward_hook(bw_hook):
module(inp1, inp2).sum().backward()
def test_hook_backward_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.nn.functional.sigmoid(input)
def bw_hook(module, grad_input, grad_output):
for grad in grad_input:
self.assertTrue(isinstance(grad, torch.Tensor))
for grad in grad_output:
self.assertTrue(isinstance(grad, torch.Tensor))
return tuple(gi * 2 for gi in grad_input)
module.register_backward_hook(bw_hook)
module(input).backward(torch.ones(5, 5))
expected_grad = sig_x * (1 - sig_x) * 2
self.assertEqual(input.grad, expected_grad)
def test_hook_forward_preforward_writable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.nn.functional.sigmoid(input)
def forward_pre_hook(m, input):
return torch.nn.functional.relu(input[0])
def forward_hook(m, input, output):
return -output
module.register_forward_pre_hook(forward_pre_hook)
module.register_forward_hook(forward_hook)
output = module(input)
expected_res = -torch.nn.functional.sigmoid(torch.nn.functional.relu(input))
self.assertEqual(output, expected_res)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
mask = (input > 0).double()
expected_grad = -sig_x * (1 - sig_x) * 2 * mask
self.assertEqual(input.grad, expected_grad)
def test_to(self):
m = nn.Linear(3, 5)
self.assertIs(m, m.to('cpu'))
self.assertIs(m, m.to('cpu', dtype=torch.float32))
self.assertEqual(m.double(), m.to(torch.float64))
self.assertRaises(RuntimeError, lambda: m.to('cpu', copy=True))
if torch.cuda.is_available():
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
m2 = m.cuda(device=cuda)
self.assertIs(m2, m2.to(cuda))
self.assertEqual(m, m2.to('cpu'))
self.assertEqual(m2, m.to(cuda))
self.assertIs(m2, m2.to(dtype=torch.float32))
self.assertEqual(m2.double(), m2.to(dtype=torch.float64))
def test_zero_grad(self):
i = torch.randn(2, 5, requires_grad=True)
module = nn.Linear(5, 5)
for p in module.parameters():
p.requires_grad = False
module.zero_grad()
module.weight.requires_grad = True
module.zero_grad()
self.assertIsNone(module.weight.grad) # uninitialized grad
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
module.bias.requires_grad = True
module.zero_grad()
self.assertIsNotNone(module.weight.grad)
self.assertIsNone(module.bias.grad)
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertIsNotNone(module.bias.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
self.assertGreater(module.bias.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
self.assertEqual(module.bias.grad.data, module.bias.data.clone().zero_())
# Force set to None.
module.zero_grad(set_to_none=True)
self.assertIsNone(module.weight.grad)
def test_no_grad(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = nn.Conv2d(2, 5, kernel_size=3, padding=1).to(dtype)
input = torch.randn(1, 2, 10, 10).to(dtype)
x = input
y = input.clone()
output = module(x)
self.assertTrue(output.requires_grad)
output.backward(torch.ones(1, 5, 10, 10))
with torch.no_grad():
output2 = module(y)
self.assertFalse(output2.requires_grad)
self.assertRaises(RuntimeError, lambda: output2.backward(torch.ones(1, 5, 10, 10)))
def test_invalid_conv1d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(4\). ' +
r'Kernel size: \(10\). Kernel size can\'t be greater than actual input size'):
module(input)
# Negative stride check
module = nn.Conv1d(in_channels=3, out_channels=6, kernel_size=3, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_mismatch_shape_conv2d(self):
x = torch.randn(1, 10, 1, 28, 28)
w = torch.randn(6, 1, 5, 5)
with self.assertRaisesRegex(RuntimeError,
r'Expected 3D \(unbatched\) or 4D \(batched\) input to conv2d, but got ' +
r'input of size: \[1, 10, 1, 28, 28\]'):
F.conv2d(x, w)
def test_conv2d_discontiguous_weight(self):
# Test for https://github.com/pytorch/pytorch/issues/55781
x = torch.ones(64, 16, 16, 16)
weight = torch.arange(0, 1.0, 1 / 2.0 ** 10).reshape(32, 16, 1, 2)[:, :, :, ::2]
self.assertFalse(weight.is_contiguous())
y = torch.nn.functional.conv2d(x, weight, None)
if torch.backends.mkldnn.is_available():
# Disable MKLDNN explicitly, so that either NNPACK or THCNN will be used
with torch.backends.mkldnn.flags(enabled=False):
y_ = torch.nn.functional.conv2d(x, weight, None)
self.assertEqual(y, y_)
self.assertEqual(y.sum(), 4186112.)
def test_invalid_conv2d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = torch.nn.Conv2d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)
input = torch.empty(1, 1, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True)
input = torch.randn(1, 3, 1, 1)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(1 x 1\). ' +
r'Kernel size: \(10 x 10\). Kernel size can\'t be greater than actual input size'):
module(input)
# Negative stride check
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
# Zero stride check
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=0, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_invalid_conv3d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = torch.nn.Conv3d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)
input = torch.empty(1, 1, 4, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
# Negative stride check
module = torch.nn.Conv3d(1, 1, kernel_size=3, stride=-2)
input = torch.empty(1, 1, 4, 4, 4)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_Conv1d_module_same_padding(self):
# Compare module against functional: without strides/dilation, asymmetric padding
x = torch.rand(1, 1, 20)
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same')
expect = F.conv1d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
# Test dilation, symmetric padding
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same', dilation=2)
expect = F.conv1d(x, module.weight, module.bias, padding='same', dilation=2)
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same', padding_mode='replicate')
x_padded = F.pad(x, [4, 5], mode='replicate')
expect = F.conv1d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
def test_Conv2d_module_same_padding(self):
# Compare module against functional:
# without strides/dilation, both symmetric and asymmetric padding
x = torch.rand(1, 1, 9, 20)
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(5, 10),
padding='same')
expect = F.conv2d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
# with dilation, symmetric padding
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 4),
padding='same', dilation=(1, 2))
expect = F.conv2d(x, module.weight, module.bias, padding='same', dilation=(1, 2))
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 4),
padding='same', padding_mode='reflect')
x_padded = F.pad(x, [1, 2, 1, 1], mode='reflect')
expect = F.conv2d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 3))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(4, 1))
def test_Conv3d_module_same_padding(self):
# Compare module against functional:
x = torch.rand(1, 1, 4, 4, 4)
# without dilation, both symmetric and asymmetric padding
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same')
expect = F.conv3d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
# with dilation, both symmetric and asymmetric padding
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same', dilation=(3, 2, 1))
expect = F.conv3d(x, module.weight, module.bias, padding='same', dilation=(3, 2, 1))
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same', padding_mode='circular')
x_padded = F.pad(x, [1, 2, 1, 1, 0, 1], mode='circular')
expect = F.conv3d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv3d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 1, 3))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 4, 1))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(5, 1, 1))
def _test_alpha_dropout(self, cls, input):
mean = input.mean()
std = input.std()
for p in [0.2, 0.5, 0.8]:
module = cls(p)
input_var = input.detach().clone().requires_grad_()
output = module(input_var)
# output mean should be close to input mean
self.assertLess(abs(output.data.mean() - mean), 0.1)
# output std should be close to input std
self.assertLess(abs(output.data.std() - std), 0.1)
output.backward(input)
def test_parameters_and_named_parameters(self):
def names(named_parameters):
return [k for k, _ in named_parameters]
l, n, s = self._create_basic_net()
self.assertEqual(len(list(l.parameters())), 1)
self.assertEqual(
names(l.named_parameters()),
['layer_dummy_param'])
self.assertEqual(len(list(n.parameters())), 2)
self.assertEqual(
names(n.named_parameters()),
['dummy_param', 'l1.layer_dummy_param'])
self.assertEqual(len(list(n.parameters(recurse=False))), 1)
self.assertEqual(
names(n.named_parameters(recurse=False)),
['dummy_param'])
self.assertEqual(len(list(s.parameters())), 2)
self.assertEqual(
names(s.named_parameters()),
['0.dummy_param', '0.l1.layer_dummy_param'])
def test_buffers_and_named_buffers(self):
def names(named_buffers):
return [k for k, _ in named_buffers]
l, n, s = self._create_basic_net()
self.assertEqual(len(list(l.buffers())), 1)
self.assertEqual(
names(l.named_buffers()),
['layer_dummy_buf'])
self.assertEqual(len(list(n.buffers())), 2)
self.assertEqual(
names(n.named_buffers()),
['dummy_buf', 'l1.layer_dummy_buf'])
self.assertEqual(len(list(n.buffers(recurse=False))), 1)
self.assertEqual(
names(n.named_buffers(recurse=False)),
['dummy_buf'])
self.assertEqual(len(list(s.buffers())), 2)
self.assertEqual(
names(s.named_buffers()),
['0.dummy_buf', '0.l1.layer_dummy_buf'])
def test_call_supports_python_dict_output(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = nn.Linear(10, 20)
self.register_backward_hook(self.hook)
self.check_backward_hook_flag = False
def hook(self, module, grad_out, grad_in):
self.check_backward_hook_flag = True
def forward(self, inputs):
return {"output": self.l1(inputs).sum()}
net = Net()
model_output = net(torch.randn([5, 10]))
model_output["output"].backward()
self.assertTrue(net.check_backward_hook_flag)
def test_children(self):
l1 = nn.Linear(2, 2)
l2 = nn.Linear(2, 2)
l3 = nn.Linear(2, 2)
l4 = nn.Linear(2, 2)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(l1, l2, l1, l2, subnet)
self.assertEqual(list(s.children()), [l1, l2, subnet])
def test_train_errors_for_invalid_mode(self):
class SubclassNet(nn.Module):
def __init__(self):
super(SubclassNet, self).__init__()
self.l1 = nn.Linear(2, 2)
def forward(self, inputs):
return self.l1(inputs)
subclass_net = SubclassNet()
sequential_net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
error_modes = ["invalid_str", torch.device('cpu')]
modules_to_check = [subclass_net, sequential_net]
for error_mode, module in itertools.product(error_modes, modules_to_check):
with self.assertRaises(ValueError):
module.train(error_mode)
def test_dir(self):
linear = nn.Linear(2, 2)
linear._test_submodule = nn.Linear(2, 2)
linear._test_parameter = Parameter(torch.empty(2, 2))
linear.register_buffer('_test_buffer', torch.empty(2, 2))
keys = dir(linear)
self.assertIn('_test_submodule', keys)
self.assertIn('_test_parameter', keys)
self.assertIn('_test_buffer', keys)
for key in keys:
self.assertTrue(hasattr(linear, key))
def test_repr(self):
# no extra information or sub-modules
empty_sequential = nn.Sequential()
expected_repr_empty = 'Sequential()'
self.assertEqual(repr(empty_sequential), expected_repr_empty)
# one liner extra information
linear = nn.Linear(1, 1)
expected_repr_linear = 'Linear(in_features=1, out_features=1, bias=True)'
self.assertEqual(repr(linear), expected_repr_linear)
# sub-modules repr
sequential = nn.Sequential(linear)
expected_repr_sequential = 'Sequential(\n' \
' (0): Linear(in_features=1, out_features=1, bias=True)\n' \
')'
self.assertEqual(repr(sequential), expected_repr_sequential)
def test_dir_digit(self):
model = nn.Sequential(nn.Linear(2, 2))
keys = dir(model)
self.assertNotIn('0', keys)
def test_named_children(self):
l1 = nn.Linear(2, 2)
l2 = nn.Linear(2, 2)
l3 = nn.Linear(2, 2)
l4 = nn.Linear(2, 2)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential()
with self.assertRaises(KeyError):
s.add_module('', l1)
with self.assertRaises(KeyError):
s.add_module('name.with.dot', l1)
s.add_module('layer1', l1)
s.add_module('layer2', l2)
s.add_module('layer3', l1)
s.add_module('layer4', l2)
s.add_module('subnet', subnet)
self.assertEqual(list(s.named_children()), [('layer1', l1), ('layer2', l2), ('subnet', subnet)])
def test_modules(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = torch.empty(3, 5)
l = nn.Linear(10, 20)
n = Net()
s = nn.Sequential(n, n, n, n)
self.assertEqual(list(s.modules()), [s, n, l])
def test_named_modules(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = torch.empty(3, 5)
self.block = block
l = nn.Linear(10, 20)
l1 = nn.Linear(10, 20)
l2 = nn.Linear(10, 20)
block = nn.Sequential()
block.add_module('linear1', l1)
block.add_module('linear2', l2)
n = Net()
s = nn.Sequential(n, n)
self.assertEqual(list(s.named_modules()), [('', s), ('0', n), ('0.l1', l),
('0.block', block), ('0.block.linear1', l1),
('0.block.linear2', l2)])
# test the option to not remove duplicate module instances
self.assertEqual(list(s.named_modules(remove_duplicate=False)), [
('', s), ('0', n), ('0.l1', l), ('0.l2', l),
('0.block', block), ('0.block.linear1', l1),
('0.block.linear2', l2),
('1', n), ('1.l1', l), ('1.l2', l),
('1.block', block), ('1.block.linear1', l1),
('1.block.linear2', l2)])
def test_register_buffer_raises_error_if_name_is_not_string(self):
m = nn.Module()
expected_error = 'buffer name should be a string. Got '
with self.assertRaisesRegex(TypeError, expected_error + 'int'):
m.register_buffer(1, torch.rand(5))
with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):
m.register_buffer(None, torch.rand(5))
def test_register_buffer_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
del m.attribute_name
m.register_parameter('attribute_name', nn.Parameter())
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
del m.attribute_name
m.add_module('attribute_name', nn.Module())
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
def test_register_buffer_raises_error_if_not_tensor(self):
m = nn.Module()
with self.assertRaises(TypeError):
m.register_buffer('attribute_name', 5)
def test_register_buffer_allows_overwriting_with_same_name(self):
m = nn.Module()
buffer1 = torch.rand(5)
buffer2 = buffer1 + 5
buffer3 = None
m.register_buffer('buffer_name', buffer1)
self.assertEqual(m.buffer_name, buffer1)
m.register_buffer('buffer_name', buffer2)
self.assertEqual(m.buffer_name, buffer2)
m.register_buffer('buffer_name', buffer3)
self.assertEqual(m.buffer_name, buffer3)
def test_get_buffer(self):
m = nn.Module()
buffer1 = torch.randn(2, 3)
buffer2 = torch.randn(4, 5)
m.register_buffer('foo', buffer1)
m.register_buffer('bar', buffer2)
self.assertEqual(buffer1, m.get_buffer('foo'))
self.assertEqual(buffer2, m.get_buffer('bar'))
def test_get_buffer_from_submodules(self):
class MyModule(nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.sub = Sub(foo, bar)
class Sub(nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.register_buffer('foo', foo)
self.subsub = SubSub(bar)
class SubSub(nn.Module):
def __init__(self, bar):
super().__init__()
self.register_buffer('bar', bar)
foo = torch.randn(2, 3)
bar = torch.randn(4, 5)
m = MyModule(foo, bar)
self.assertEqual(foo, m.get_buffer('sub.foo'))
self.assertEqual(bar, m.get_buffer('sub.subsub.bar'))
def test_buffer_not_persistent(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
def test_buffer_not_persistent_del(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
del m.buf
self.assertTrue(len(list(m.buffers())) == 0)
def test_buffer_not_persistent_overwrite(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
m.register_buffer('buf', torch.rand(5))
# can we overwrite a non-persistent buffer with a persistent one?
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 1)
# can we overwrite a persistent buffer with a non-persistent one?
m.register_buffer('buf', torch.rand(5), persistent=False)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
def test_buffer_not_persistent_assign(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
# Assigning None removes the buffer but if we then assign a new Tensor
# to the same property, it should still be marked as a buffer.
m.buf = None
self.assertTrue(len(list(m.buffers())) == 0)
self.assertTrue(len(m.state_dict()) == 0)
m.buf = torch.rand(5)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
# Assigning a Parameter removes the buffer.
m.buf = nn.Parameter(torch.rand(5))
self.assertTrue(len(list(m.buffers())) == 0)
self.assertTrue(len(m.state_dict()) == 1)
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_load_state_dict_invalid(self):
m = torch.nn.Linear(2, 2, bias=False)
state_dict = {'weight': np.random.randn(2, 2)}
with self.assertRaisesRegex(RuntimeError,
"expected torch.Tensor or Tensor-like object from checkpoint but received"):
m.load_state_dict(state_dict)
state_dict = {'weight': ((1., 1.), (2., 2.))}
with self.assertRaisesRegex(RuntimeError,
"expected torch.Tensor or Tensor-like object from checkpoint but received"):
m.load_state_dict(state_dict)
def test_buffer_not_persistent_load(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
m.load_state_dict({})
def test_register_parameter_raises_error_if_name_is_not_string(self):
m = nn.Module()
expected_error = 'parameter name should be a string. Got '
with self.assertRaisesRegex(TypeError, expected_error + 'int'):
m.register_parameter(1, nn.Parameter())
with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):
m.register_parameter(None, nn.Parameter())
def test_register_parameter_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
del m.attribute_name
m.register_buffer('attribute_name', torch.rand(5))
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
del m.attribute_name
m.add_module('attribute_name', nn.Module())
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
def test_register_parameter_allows_overwriting_with_same_name(self):
m = nn.Module()
param1 = nn.Parameter(torch.rand(5))
param2 = nn.Parameter(param1.data + 5)
param3 = None
m.register_parameter('param_name', param1)
self.assertEqual(m.param_name, param1)
m.register_parameter('param_name', param2)
self.assertEqual(m.param_name, param2)
m.register_parameter('param_name', param3)
self.assertEqual(m.param_name, param3)
def test_add_module_raises_error_if_attr_exists(self):
methods_to_test = ['add_module', 'register_module']
for fn in methods_to_test:
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
getattr(m, fn)('attribute_name', nn.Module())
del m.attribute_name
m.register_buffer('attribute_name', torch.rand(5))
with self.assertRaises(KeyError):
getattr(m, fn)('attribute_name', nn.Module())
del m.attribute_name
m.register_parameter('attribute_name', nn.Parameter())
with self.assertRaises(KeyError):
getattr(m, fn)('attribute_name', nn.Module())
@unittest.expectedFailure
def test_getattr_with_property(self):
class Model(nn.Module):
@property
def some_property(self):
return self.something_that_doesnt_exist
model = Model()
with self.assertRaisesRegex(
AttributeError,
r"'Model' object has no attribute 'something_that_doesnt_exist'"):
model.some_property
def test_Sequential_getitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
self.assertIs(n[0], l1)
self.assertIs(n[1], l2)
self.assertIs(n[2], l3)
self.assertIs(n[3], l4)
self.assertIs(n[torch.tensor(3, dtype=torch.int64)], l4)
self.assertEqual(n[1:], nn.Sequential(l2, l3, l4))
self.assertEqual(n[3:], nn.Sequential(l4))
self.assertEqual(n[:-1], nn.Sequential(l1, l2, l3))
self.assertEqual(n[:-3], nn.Sequential(l1))
self.assertEqual(n[::-1], nn.Sequential(l4, l3, l2, l1))
def test_Sequential_setitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3)
n[0] = l4
n[-1] = l4
n[torch.tensor(1, dtype=torch.int16)] = l1
self.assertIs(n[0], l4)
self.assertIs(n[1], l1)
self.assertIs(n[2], l4)
def test_Sequential_setitem_named(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(OrderedDict([
('linear1', l1),
('linear2', l2),
('linear3', l3),
]))
n[0] = l4
n[-1] = l4
self.assertEqual(n.linear1, l4)
self.assertEqual(n.linear3, l4)
def test_Sequential_delitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
del n[-1]
self.assertEqual(n, nn.Sequential(l1, l2, l3))
del n[1::2]
self.assertEqual(n, nn.Sequential(l1, l3))
def test_Sequential_append(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3)
n2 = n.append(l4)
self.assertEqual(n, nn.Sequential(l1, l2, l3, l4))
self.assertEqual(n2, nn.Sequential(l1, l2, l3, l4))
self.assertEqual(nn.Sequential(l1).append(l2).append(l4), nn.Sequential(l1, l2, l4))
def test_ModuleList(self):
modules = [nn.ReLU(), nn.Linear(5, 5)]
module_list = nn.ModuleList(modules)
def check():
self.assertEqual(len(module_list), len(modules))
for m1, m2 in zip(modules, module_list):
self.assertIs(m1, m2)
for m1, m2 in zip(modules, module_list.children()):
self.assertIs(m1, m2)
for i in range(len(modules)):
self.assertIs(module_list[i], modules[i])
check()
modules += [nn.Conv2d(3, 4, 3)]
module_list += [modules[-1]]
check()
modules = modules + [nn.Conv2d(3, 4, 3, bias=False), nn.GELU()]
module_list = module_list + nn.ModuleList(modules[-2:])
check()
modules.insert(1, nn.Linear(3, 2))
module_list.insert(1, modules[1])
check()
modules.append(nn.Tanh())
module_list.append(modules[-1])
check()
next_modules = [nn.Linear(5, 5), nn.Sigmoid()]
modules.extend(next_modules)
module_list.extend(next_modules)
check()
modules[2] = nn.Conv2d(5, 3, 2)
module_list[2] = modules[2]
check()
modules[-1] = nn.Conv2d(5, 2, 1)
module_list[-1] = modules[-1]
check()
idx = torch.tensor(2, dtype=torch.int32)
modules[2] = nn.Conv2d(5, 3, 2)
module_list[idx] = modules[2]
self.assertIs(module_list[idx], modules[2])
check()
self.assertEqual(module_list[1:], nn.ModuleList(modules[1:]))
self.assertEqual(module_list[3:], nn.ModuleList(modules[3:]))
self.assertEqual(module_list[:-1], nn.ModuleList(modules[:-1]))
self.assertEqual(module_list[:-3], nn.ModuleList(modules[:-3]))
self.assertEqual(module_list[::-1], nn.ModuleList(modules[::-1]))
del module_list[-1]
self.assertEqual(module_list, nn.ModuleList(modules[:-1]))
del module_list[1::2]
self.assertEqual(module_list, nn.ModuleList(modules[:-1][0::2]))
with self.assertRaises(TypeError):
module_list += nn.ReLU()
with self.assertRaises(TypeError):
module_list.extend(nn.ReLU())
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 2)
l4 = nn.Linear(2, 3)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(
OrderedDict([
("layer1", l1),
("layer2", l2),
("layer3", l3),
("layer4", l4),
("subnet_layer", subnet)
])
)
modules = list(s.modules())
module_list = nn.ModuleList()
module_list.extend(s.modules())
check()
# verify the right exception is thrown when trying to "forward" through a ModuleList
self.assertRaises(NotImplementedError, module_list)
self.assertRaises(NotImplementedError, module_list, torch.rand(1, 3))
def test_ModuleDict(self):
modules = OrderedDict([
('act', nn.ReLU()),
('conv', nn.Conv2d(10, 10, 5)),
('fc', nn.Linear(5, 5)),
])
module_dict = nn.ModuleDict(modules)
def check():
self.assertEqual(len(module_dict), len(modules))
for k1, m2 in zip(modules, module_dict.children()):
self.assertIs(modules[k1], m2)
for k1, k2 in zip(modules, module_dict):
self.assertIs(modules[k1], module_dict[k2])
for k in module_dict:
self.assertIs(module_dict[k], modules[k])
for k in module_dict.keys():
self.assertIs(module_dict[k], modules[k])
for k, v in module_dict.items():
self.assertIs(modules[k], v)
for k1, m2 in zip(modules, module_dict.values()):
self.assertIs(modules[k1], m2)
for k in modules.keys():
self.assertTrue(k in module_dict)
check()
modules['conv'] = nn.Conv2d(3, 4, 3)
module_dict['conv'] = modules['conv']
check()
next_modules = [
('fc2', nn.Linear(5, 5)),
('act', nn.Sigmoid()),
]
modules.update(next_modules)
module_dict.update(next_modules)
check()
next_modules = OrderedDict([
('fc3', nn.Linear(5, 5)),
('act2', nn.Sigmoid()),
])
modules.update(next_modules)
module_dict.update(next_modules)
check()
next_modules = {
'fc4': nn.Linear(5, 5),
'act3': nn.Sigmoid()
}
modules.update(next_modules.items())
module_dict.update(next_modules)
check()
next_modules = nn.ModuleDict([
('fc5', nn.Linear(5, 5)),
('act4', nn.Sigmoid()),
])
modules.update(next_modules)
module_dict.update(next_modules)
check()
del module_dict['fc']
del modules['fc']
check()
with self.assertRaises(TypeError):
module_dict.update(nn.ReLU())
with self.assertRaises(TypeError):
module_dict.update([nn.ReLU()])
with self.assertRaises(ValueError):
module_dict.update([[nn.ReLU()]])
with self.assertRaises(TypeError):
module_dict[1] = nn.ReLU()
s = nn.Sequential(modules)
module_dict = nn.ModuleDict(s.named_children())
check()
c = module_dict.pop('conv')
self.assertIs(c, modules['conv'])
modules.pop('conv')
check()
module_dict.clear()
self.assertEqual(len(module_dict), 0)
modules.clear()
check()
# verify the right exception is thrown when trying to "forward" through a ModuleDict
self.assertRaises(NotImplementedError, module_dict)
self.assertRaises(NotImplementedError, module_dict, torch.rand(1, 3))
def test_ParameterList(self):
def make_param():
return Parameter(torch.randn(10, 10))
parameters = [make_param(), make_param()]
param_list = nn.ParameterList(parameters)
def check():
self.assertEqual(len(parameters), len(param_list))
for p1, p2 in zip(parameters, param_list):
self.assertIs(p1, p2)
for p1, p2 in zip(parameters, param_list.parameters()):
self.assertIs(p1, p2)
for i in range(len(parameters)):
self.assertIs(parameters[i], param_list[i])
check()
parameters += [make_param()]
param_list += [parameters[-1]]
check()
parameters.append(make_param())
param_list.append(parameters[-1])
check()
next_params = [make_param(), make_param()]
parameters.extend(next_params)
param_list.extend(next_params)
check()
parameters[2] = make_param()
param_list[2] = parameters[2]
check()
parameters[-1] = make_param()
param_list[-1] = parameters[-1]
check()
idx = torch.tensor(2, dtype=torch.int32)
parameters[2] = make_param()
param_list[idx] = parameters[2]
self.assertIs(param_list[idx], parameters[2])
check()
self.assertEqual(param_list[1:], nn.ParameterList(parameters[1:]))
self.assertEqual(param_list[3:], nn.ParameterList(parameters[3:]))
self.assertEqual(param_list[:-1], nn.ParameterList(parameters[:-1]))
self.assertEqual(param_list[:-3], nn.ParameterList(parameters[:-3]))
self.assertEqual(param_list[::-1], nn.ParameterList(parameters[::-1]))
with self.assertRaises(TypeError):
param_list += make_param()
with self.assertRaises(TypeError):
param_list.extend(make_param())
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 2)
l4 = nn.Linear(2, 3)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(
OrderedDict([
("layer1", l1),
("layer2", l2),
("layer3", l3),
("layer4", l4),
("subnet_layer", subnet)
])
)
parameters = list(s.parameters())
param_list = nn.ParameterList()
param_list.extend(s.parameters())
check()
def test_ParameterDict(self):
parameters = OrderedDict([
('p1', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
('p3', Parameter(torch.randn(10, 10))),
])
parameter_dict = nn.ParameterDict(parameters)
def check():
self.assertEqual(len(parameter_dict), len(parameters))
for k1, m2 in zip(parameters, parameter_dict.parameters()):
self.assertIs(parameters[k1], m2)
for k1, k2 in zip(parameters, parameter_dict):
self.assertIs(parameters[k1], parameter_dict[k2])
for k in parameter_dict:
self.assertIs(parameter_dict[k], parameters[k])
for k in parameter_dict.keys():
self.assertIs(parameter_dict[k], parameters[k])
for k, v in parameter_dict.items():
self.assertIs(v, parameters[k])
for k1, m2 in zip(parameters, parameter_dict.values()):
self.assertIs(parameters[k1], m2)
for k in parameters.keys():
self.assertTrue(k in parameter_dict)
check()
parameters['p4'] = Parameter(torch.randn(10, 10))
parameter_dict['p4'] = parameters['p4']
check()
next_parameters = [
('p5', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
]
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
next_parameters = OrderedDict([
('p6', Parameter(torch.randn(10, 10))),
('p5', Parameter(torch.randn(10, 10))),
])
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
next_parameters = {
'p8': Parameter(torch.randn(10, 10)),
'p7': Parameter(torch.randn(10, 10))
}
parameters.update(sorted(next_parameters.items()))
parameter_dict.update(next_parameters)
check()
next_parameters = nn.ParameterDict([
('p10', Parameter(torch.randn(10, 10))),
('p9', Parameter(torch.randn(10, 10))),
])
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
del parameter_dict['p3']
del parameters['p3']
check()
with self.assertRaises(TypeError):
parameter_dict.update(1)
with self.assertRaises(TypeError):
parameter_dict.update([1])
with self.assertRaises(ValueError):
parameter_dict.update(Parameter(torch.randn(10, 10)))
with self.assertRaises(TypeError):
parameter_dict[1] = Parameter(torch.randn(10, 10))
p_pop = parameter_dict.pop('p4')
self.assertIs(p_pop, parameters['p4'])
parameters.pop('p4')
check()
# Check reverse works
forward = list(iter(parameter_dict))
backward = list(reversed(parameter_dict))
self.assertEqual(len(forward), len(backward))
n = len(forward)
for i in range(n):
self.assertIs(forward[i], backward[n - i - 1])
check()
# Check copy works
copy = parameter_dict.copy()
# Check all keys are present and have shallow copied values
for key in parameter_dict:
self.assertTrue(key in copy)
self.assertEqual(parameter_dict[key], copy[key])
self.assertIs(parameter_dict[key], copy[key])
check()
parameter_dict["p20"] = Parameter(torch.randn(10, 10))
copy["p21"] = Parameter(torch.randn(9, 10))
self.assertTrue("p20" in parameter_dict)
self.assertFalse("p20" in copy)
self.assertFalse("p21" in parameter_dict)
self.assertTrue("p21" in copy)
parameter_dict.pop("p20")
check()
p = Parameter(torch.randn(10, 10))
parameter_dict['p12'] = p
p_popitem = parameter_dict.popitem()
self.assertEqual(p_popitem[0], 'p12')
self.assertIs(p_popitem[1], p)
# Unit test for set_default
# 1. Ensure parameter is correctly inserted when
# the key is not present in `ParameterDict`
assert 'p11' not in parameter_dict
parameters['p11'] = Parameter(torch.randn(10, 10))
p_setdefault = parameter_dict.setdefault('p11', parameters['p11'])
self.assertIs(p_setdefault, parameters['p11'])
# 2. Ensure parameter is NOT inserted when the
# key is already present in `ParameterDict`
p = Parameter(torch.randn(10, 10))
self.assertFalse(parameter_dict.setdefault('p11', p) is p)
# 3. Ensure `None` is inserted when the key is not
# present in `Parameter` and parameter is not specified
self.assertIs(parameter_dict.setdefault('p26'), None)
del parameter_dict['p26']
check()
parameters2 = OrderedDict([
('p13', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
('p3', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict |= parameter_dict2
check()
parameters2 = OrderedDict()
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict |= parameter_dict2
check()
parameters2 = OrderedDict([
('p14', Parameter(torch.randn(10, 10))),
('p15', Parameter(torch.randn(10, 10))),
('p13', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict |= parameter_dict2
check()
# Check __or__ and __ror__ works
parameters2 = OrderedDict([
('p20', Parameter(torch.randn(10, 10))),
('p21', Parameter(torch.randn(10, 10))),
('p22', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict = parameter_dict | parameter_dict2
check()
parameters2 = OrderedDict([
('p23', Parameter(torch.randn(10, 10))),
('p24', Parameter(torch.randn(10, 10))),
('p25', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters2.update(parameters)
parameters = parameters2
parameter_dict = parameter_dict2 | parameter_dict
check()
parameters['p17'] = Parameter(torch.randn(10, 10))
parameter_dict['p17'] = parameters['p17']
self.assertIs(parameters['p17'], parameter_dict.get('p17'))
temp_param = Parameter(torch.randn(10, 10))
self.assertIs(parameters['p17'], parameter_dict.get('p17', temp_param))
self.assertIs(None, parameter_dict.get('p18'))
self.assertIs(temp_param, parameter_dict.get('p18', temp_param))
check()
parameter_dict.clear()
self.assertEqual(len(parameter_dict), 0)
parameters.clear()
check()
parameter_dict2 = parameter_dict.fromkeys(['p19', 'p20'])
self.assertEqual({'p19': None, 'p20': None}, parameter_dict2)
check()
parameter_dict2 = parameter_dict.fromkeys(['p19', 'p20'], temp_param)
self.assertEqual({'p19': temp_param, 'p20': temp_param}, parameter_dict2)
check()
def test_add_module(self):
methods_to_test = ['add_module', 'register_module']
for fn in methods_to_test:
l = nn.Linear(10, 20)
net = nn.Module()
net.l = l
net.l2 = l
getattr(net, fn)('empty', None)
self.assertEqual(net.l, l)
self.assertEqual(net.l2, l)
self.assertEqual(net.empty, None)
getattr(net, fn)('l3', l)
self.assertEqual(net.l3, l)
l3 = nn.Linear(20, 10)
getattr(net, fn)('l', l3)
self.assertEqual(net.l, l3)
self.assertRaises(TypeError, lambda: getattr(net, fn)('x', 'non-module'))
self.assertRaisesRegex(TypeError, 'module name should be a string. Got int',
lambda: getattr(net, fn)(1, l))
self.assertRaisesRegex(TypeError, 'module name should be a string. Got NoneType',
lambda: getattr(net, fn)(None, l))
def test_module_to_argparse(self):
net = nn.Sequential(nn.Linear(3, 3))
cpu = torch.device('cpu')
with self.assertRaises(TypeError):
net.to(cpu, True)
with self.assertRaises(TypeError):
net.to(torch.long)
with self.assertRaises(TypeError):
net.to(None, True)
with self.assertRaises(TypeError):
net.to(cpu, torch.long, True)
with self.assertRaises(TypeError):
net.to(cpu, dtype=torch.long, non_blocking=True)
with self.assertRaises(TypeError):
net.to([])
with self.assertRaises(TypeError):
net.to({}, non_blocking=True)
with self.assertRaises(TypeError):
net.to(torch.tensor(3, dtype=torch.long), non_blocking=True)
with self.assertRaises(TypeError):
net.to(cpu, torch.tensor(3, dtype=torch.long), non_blocking=True)
def test_RNN_nonlinearity(self):
rnn = torch.nn.RNN(1, 10)
self.assertEqual(rnn.nonlinearity, 'tanh')
rnn = torch.nn.RNN(1, 10, nonlinearity='relu')
self.assertEqual(rnn.nonlinearity, 'relu')
with self.assertRaisesRegex(ValueError, 'Unknown nonlinearity'):
rnn = torch.nn.RNN(1, 10, nonlinearity='garbage')
def test_module_apply_inplace_op(self):
def add_one_inplace(t):
return t.add_(1.0)
# Test that applying an in-place operation to a module would bump
# the module's parameters' version counter.
m = nn.Linear(20, 10)
pvm = m.weight.mul(m.weight)
m_weight_version_saved = m.weight._version
m = m._apply(add_one_inplace)
self.assertGreater(m.weight._version, m_weight_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pvm.backward(torch.randn(10, 20))
# Test that applying an in-place operation to a module would bump
# the module's parameters' gradients' version counter.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
pgm = m.weight.grad.mul(m.weight.grad)
m_weight_grad_version_saved = m.weight.grad._version
m = m._apply(add_one_inplace)
self.assertGreater(m.weight.grad._version, m_weight_grad_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pgm.backward(torch.randn(10, 20))
def test_overwrite_module_params_on_conversion(self):
# Test that if the conversion function passed to `module._apply()`
# changes the TensorImpl type of `module`'s parameters, the `module`'s
# parameters are always overwritten, regardless of the value of
# `torch.__future__.get_overwrite_module_params_on_conversion()`.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20)
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m = m._apply(lambda t: torch.sparse_coo_tensor(torch.zeros([2, 1]), torch.ones([1]), torch.Size([10, 20])))
self.assertNotEqual(weight_ref.layout, m.weight.layout)
self.assertNotEqual(weight_grad_ref.layout, m.weight.grad.layout)
# Test that under the current default settings
# (`torch.__future__.get_overwrite_module_params_on_conversion() == False`),
# a view to a module's parameters is not pointing to the same storage as
# its base variable after converting the module to a different dtype.
m = nn.Linear(20, 10).float()
mw = m.weight[:]
m.double()
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0].dtype == torch.float)
self.assertTrue(mw._base[0][0].dtype == torch.double)
try:
torch.__future__.set_overwrite_module_params_on_conversion(True)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# a view to a module's parameters is still pointing to the same storage as
# its base variable after converting the module to a different dtype.
m = nn.Linear(20, 10).float()
mw = m.weight[:]
m.double()
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0] == mw._base[0][0])
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# `float_module.double()` doesn't preserve previous references to
# `float_module`'s parameters or gradients.
m = nn.Linear(20, 10).float()
m.weight.grad = torch.randn(10, 20).float()
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m.double()
self.assertNotEqual(weight_ref.dtype, m.weight.dtype)
self.assertNotEqual(weight_grad_ref.dtype, m.weight.grad.dtype)
def add_one_inplace(t):
return t.add_(1.0)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an in-place operation to a module would bump the module's
# original parameters' version counter.
m = nn.Linear(20, 10)
pvm = m.weight.mul(m.weight)
weight_ref = m.weight
m_weight_version_saved = weight_ref._version
m = m._apply(add_one_inplace)
# Test that the in-place operation bumps the original parameter's version counter
self.assertGreater(weight_ref._version, m_weight_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pvm.backward(torch.randn(10, 20))
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an in-place operation to a module would bump the module's
# original parameters' gradients' version counter.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
pgm = m.weight.grad.mul(m.weight.grad)
weight_grad_ref = m.weight.grad
m_weight_grad_version_saved = weight_grad_ref._version
m = m._apply(add_one_inplace)
self.assertGreater(weight_grad_ref._version, m_weight_grad_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pgm.backward(torch.randn(10, 20))
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an out-of-place operation to a module doesn't bump
# the module's original parameters' version counter.
m = nn.Linear(20, 10)
weight_ref = m.weight
m_weight_version_saved = weight_ref._version
m = m._apply(lambda t: torch.randn(t.shape))
self.assertEqual(weight_ref._version, m_weight_version_saved)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an out-of-place operation to a module doesn't bump
# the module's original parameters' gradients' version counter.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
weight_grad_ref = m.weight.grad
m_weight_grad_version_saved = weight_grad_ref._version
m = m._apply(lambda t: torch.randn(t.shape))
self.assertEqual(weight_grad_ref._version, m_weight_grad_version_saved)
finally:
torch.__future__.set_overwrite_module_params_on_conversion(False)
def test_type(self):
l = nn.Linear(10, 20)
net = nn.Module()
net.l = l
net.l2 = l
net.add_module('empty', None)
net.register_buffer('indices', torch.LongTensor(1))
net.float()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.double()
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to(torch.half)
self.assertIsInstance(l.weight.data, torch.HalfTensor)
self.assertIsInstance(l.bias.data, torch.HalfTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
if TEST_CUDA:
net.float().cuda()
self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)
self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.cpu()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to("cuda", torch.double, True)
self.assertIsInstance(l.weight.data, torch.cuda.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.cuda.DoubleTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.to(torch.empty(1, device="cuda:0", dtype=torch.half))
self.assertIsInstance(l.weight.data, torch.cuda.HalfTensor)
self.assertIsInstance(l.bias.data, torch.cuda.HalfTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.to(torch.device("cpu"), non_blocking=True)
self.assertIsInstance(l.weight.data, torch.HalfTensor)
self.assertIsInstance(l.bias.data, torch.HalfTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to(torch.float)
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
net.to(torch.DoubleTensor(1))
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
if TEST_CUDA:
net.to(device='cuda', dtype=torch.float)
self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)
self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)
def test_non_leaf_parameters(self):
l1 = nn.Linear(10, 10)
l2 = nn.Linear(10, 10)
def assign_weight():
l2.weight = l1.weight + 2
self.assertRaises(TypeError, assign_weight)
# This should work though
l2.weight = Parameter(torch.randn(10, 10))
def test_clip_grad_norm(self):
l = nn.Linear(10, 10)
max_norm = 2
def compute_norm(norm_type):
norm_type = float(norm_type)
if norm_type != inf:
total_norm = 0
for p in l.parameters():
total_norm += p.grad.data.abs().pow(norm_type).sum()
return pow(total_norm, 1. / norm_type)
else:
return max(p.grad.data.abs().max() for p in l.parameters())
def compare_scaling(grads):
p_scale = [p.grad.data.div(g).view(-1) for p, g in zip(l.parameters(), grads)]
scale = torch.cat(p_scale)
self.assertEqual(scale.std(), 0)
return scale[0]
grads = torch.arange(1., 101).view(10, 10), torch.ones(10).div(1000)
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
for p, g in zip(l.parameters(), grads):
p._grad = g.clone().view_as(p.data)
norm_before = compute_norm(norm_type)
norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)
norm_after = compute_norm(norm_type)
self.assertEqual(norm, norm_before)
self.assertEqual(norm_after, max_norm)
self.assertLessEqual(norm_after, norm_before)
compare_scaling(grads)
# Small gradients should be left unchanged
grads = torch.rand(10, 10).div(10000), torch.ones(10).div(500)
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
for p, g in zip(l.parameters(), grads):
p.grad.data.copy_(g)
norm_before = compute_norm(norm_type)
norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)
norm_after = compute_norm(norm_type)
self.assertEqual(norm, norm_before)
self.assertEqual(norm_before, norm_after)
self.assertLessEqual(norm_after, max_norm)
scale = compare_scaling(grads)
self.assertEqual(scale, 1)
# Should accept a single Tensor as input
p1, p2 = torch.randn(10, 10), torch.randn(10, 10)
g = torch.arange(1., 101).view(10, 10)
p1._grad = g.clone()
p2._grad = g.clone()
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
clip_grad_norm_(p1, max_norm, norm_type=norm_type)
clip_grad_norm_([p2], max_norm, norm_type=norm_type)
self.assertEqual(p1.grad, p2.grad)
def test_clip_grad_value(self):
l = nn.Linear(10, 10)
clip_value = 2.5
grad_w, grad_b = torch.arange(-50., 50).view(10, 10).div_(5), torch.ones(10).mul_(2)
for grad_list in [[grad_w, grad_b], [grad_w, None]]:
for p, g in zip(l.parameters(), grad_list):
p._grad = g.clone().view_as(p.data) if g is not None else g
clip_grad_value_(l.parameters(), clip_value)
for p in filter(lambda p: p.grad is not None, l.parameters()):
self.assertLessEqual(p.grad.data.max(), clip_value)
self.assertGreaterEqual(p.grad.data.min(), -clip_value)
# Should accept a single Tensor as input
p1, p2 = torch.randn(10, 10), torch.randn(10, 10)
g = torch.arange(-50., 50).view(10, 10).div_(5)
p1._grad = g.clone()
p2._grad = g.clone()
clip_grad_value_(p1, clip_value)
clip_grad_value_([p2], clip_value)
self.assertEqual(p1.grad, p2.grad)
def test_parameters_to_vector(self):
conv1 = nn.Conv2d(3, 10, 5)
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)
vec = parameters_to_vector(model.parameters())
self.assertEqual(vec.size(0), 980)
def test_vector_to_parameters(self):
conv1 = nn.Conv2d(3, 10, 5)
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)
vec = torch.arange(0., 980)
vector_to_parameters(vec, model.parameters())
sample = next(model.parameters())[0, 0, 0]
self.assertTrue(torch.equal(sample.data, vec.data[:5]))
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
# torch/nn/utils/parametrize
@skipIfNoLapack
def test_register_and_remove_parametrization(self):
r"""Test that it is possible to add a few parametrizations
on a parameter or a buffer and that removing them restores the initial state
It also tests that backpropagating through them works as expected
"""
# Define a couple matrix parametrizations
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
# Cayley map
# If X is skew-symmetric it returns an orthogonal matrix
Id = torch.eye(X.size(0), device=X.device)
# We call contiguous because solve returns a tensor with strides that are Fortran-contiguous
# and autograd raises a performance warning.
# This happens when we remove the parametrization with leave_parametrized=True,
# which does a set_ with a non-contiguous tensor while the gradient is contiguous
return torch.linalg.solve(Id + X, Id - X).contiguous()
class Resize(nn.Module):
def forward(self, X):
return X[[0]]
class NoResize(nn.Module):
def forward(self, X):
return X
# Define a couple vector parametrizations
class FirstZero(nn.Module):
def forward(self, x):
return torch.cat([x.new_zeros(1), x[1:]])
class LastZero(nn.Module):
def forward(self, x):
return torch.cat([x[:-1], x.new_zeros(1)])
model = nn.Linear(8, 8)
initial_weight_id = id(model.weight)
initial_bias_id = id(model.bias)
initial_model = deepcopy(model)
# Test unsafe flag
with self.assertRaisesRegex(ValueError, "Registering a parametrization may not change the shape of the tensor"):
parametrize.register_parametrization(model, "weight", Resize()) # default unsafe = False
model(torch.ones(8, 8))
# One parametrization with unsafe=True
parametrize.register_parametrization(model, "weight", Resize(), unsafe=True)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
A = model.weight
self.assertTrue(A.shape[0] == 1)
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Two parametrizations with unsafe=True
parametrize.register_parametrization(model, "weight", Resize(), unsafe=True)
parametrize.register_parametrization(model, "weight", NoResize(), unsafe=False)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
A = model.weight
self.assertTrue(A.shape[0] == 1)
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test unsafe flag doesn't change expected behavior
parametrize.register_parametrization(model, "weight", Skew(), unsafe=True)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
# Remove and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test one parametrization
parametrize.register_parametrization(model, "weight", Skew())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
# Remove and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test two parametrizations at the same time and removing them
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
# Result should be orthogonal
X = model.weight
Id = torch.eye(X.size(0), device=X.device)
self.assertEqual(X.T @ X, Id)
# Structure tests
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertIn("weight", model.parametrizations)
self.assertNotIn("weight", model._parameters)
# Remove
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
# Add everything
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
parametrize.register_parametrization(model, "bias", FirstZero())
parametrize.register_parametrization(model, "bias", LastZero())
# Basic tests
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertTrue(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happpened
# Should not throw
sgd = torch.optim.SGD(model.parameters(), lr=0.01)
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Remove first parametrization.
# Check that the model is still parametrized and so is the second parameter
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertTrue(parametrize.is_parametrized(model)) # Still parametrized
self.assertFalse(parametrize.is_parametrized(model, "weight")) # Parametrization removed
self.assertTrue(parametrize.is_parametrized(model, "bias")) # Still parametrized
self.assertEqual(model.bias[0].item(), 0.) # Still parametrized
self.assertEqual(model.bias[-1].item(), 0.) # Still parametrized
self.assertNotEqual(model.weight, initial_model.weight) # Has been updated
self.assertEqual(id(model.weight), initial_weight_id) # Keeps the same id
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happened
# Should not throw
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Remove the second parametrization.
# Check that the module is not parametrized
parametrize.remove_parametrizations(model, "bias", leave_parametrized=False)
self.assertFalse(parametrize.is_parametrized(model)) # Not parametrized
self.assertNotEqual(model.bias, initial_model.bias) # Has been updated
self.assertNotEqual(model.bias[0].item(), 0.) # Not parametrized
self.assertNotEqual(model.bias[-1].item(), 0.) # Not parametrized
self.assertEqual(id(model.bias), initial_bias_id) # Keeps the same id
self.assertFalse(hasattr(model, "parametrizations")) # Not parametrized the module
self.assertEqual(model.__class__, nn.Linear) # Resores the previous class
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happeed
# Should not throw things are updated
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Test leave_parametrized=True
for _ in range(2):
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
# We didn't change the dtype nor had multiple inputs, so the id should be the same
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(id(model.bias), initial_bias_id)
# Should not throw. Things are updated
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
def test_register_and_remove_nested_parametrization(self):
r"""Test that it is possible to nest the parametrizations
meaning that the original param is parametrized again
"""
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
model = nn.Linear(8, 8)
# Add top level parametrization
parametrize.register_parametrization(model, "weight", Skew())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
# Add nested parametrization
param_mod = model.parametrizations.weight
self.assertFalse(hasattr(param_mod, "parametrizations"))
self.assertFalse(parametrize.is_parametrized(param_mod))
self.assertFalse(parametrize.is_parametrized(param_mod, "original"))
parametrize.register_parametrization(param_mod, "original", Skew())
self.assertTrue(hasattr(param_mod, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(param_mod))
self.assertTrue(parametrize.is_parametrized(param_mod, "original"))
self.assertNotIn("original", param_mod._parameters)
# Result should be skew-symmetric
A = param_mod.original
self.assertEqual(A, -A.T)
# Remove nested param and check consistency
parametrize.remove_parametrizations(param_mod, "original", leave_parametrized=False)
self.assertFalse(hasattr(param_mod, "parametrizations"))
self.assertEqual(param_mod.__class__, parametrize.ParametrizationList)
# Remove top level and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
def test_register_and_remove_buffer_parametrization(self):
r"""Test that it is possible to add and remove parametrizations on buffers"""
# Define a couple vector parametrizations
class FirstZero(nn.Module):
def forward(self, x):
return torch.cat([x.new_zeros(1), x[1:]])
class LastZero(nn.Module):
def forward(self, x):
return torch.cat([x[:-1], x.new_zeros(1)])
model = nn.Linear(8, 8)
# Instantiate parametrizations on buffers. It should work as expected
delattr(model, "bias")
model.register_buffer("bias", torch.ones(8))
parametrize.register_parametrization(model, "bias", FirstZero())
parametrize.register_parametrization(model, "bias", LastZero())
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())
self.assertEqual(len(list(model.parameters())), 1)
# Remove parametrizations on buffers. It should work as expected
parametrize.remove_parametrizations(model, "bias", leave_parametrized=True)
self.assertFalse(parametrize.is_parametrized(model))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())
self.assertEqual(len(list(model.parameters())), 1)
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_serialization_parametrization(self):
r"""Test that it is possible to serialize a parametrized model via state_dict"""
# A stateful parametrization
class Orthogonal(nn.Module):
def __init__(self, n):
super().__init__()
self.register_buffer("id", torch.eye(n))
self.register_buffer("B", torch.empty(n, n))
init.orthogonal_(self.B)
def forward(self, X):
A = X.triu(1)
A = A - A.T
return self.B @ torch.linalg.solve(self.id + A, self.id - A)
def get_model():
model = torch.nn.Sequential(
torch.nn.Linear(5, 5),
torch.nn.ReLU(),
torch.nn.Linear(5, 1),
)
parametrize.register_parametrization(model[0], "weight", Orthogonal(5))
return model
model = get_model()
prev_weight = model[0].weight
prev_B = model[0].parametrizations.weight[0].B
new_model = get_model()
with TemporaryFileName() as fname:
torch.save(model.state_dict(), fname)
new_model.load_state_dict(torch.load(fname))
# Integrity tests
self.assertTrue(parametrize.is_parametrized(new_model[0], "weight"))
self.assertEqual(prev_weight, new_model[0].weight)
self.assertEqual(prev_B, new_model[0].parametrizations.weight[0].B)
# Trying to save the whole parametrized model raises
with self.assertRaisesRegex(RuntimeError, "state_dict"):
with TemporaryFileName() as fname:
torch.save(model, fname)
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_initialization_parametrization(self):
r"""Test that it is possible to initialize a parametrization when it
implements a `right_inverse` method
"""
class Skew(nn.Module):
def forward(self, X):
A = X.triu(1)
return A - A.T
def is_skew(self, A):
return torch.allclose(A, -A.T, atol=1e-6)
def right_inverse(self, X):
if not self.is_skew(X):
raise ValueError("The matrix is not skew-symmetric.")
return X.triu(1)
# Implements a Cayley map where right_inverse is not quite the inverse of forward
class Orthogonal(nn.Module):
def __init__(self, n):
super().__init__()
self.register_buffer("B", torch.eye(n))
def forward(self, X):
Id = torch.eye(X.size(0))
return self.B @ torch.linalg.solve(Id + X, Id - X)
def is_orthogonal(self, X):
Id = torch.eye(X.size(0))
return torch.allclose(X.T @ X, Id, atol=1e-4)
def right_inverse(self, X):
if not self.is_orthogonal(X):
raise ValueError("The input is not orthogonal.")
# cayley(0) == Id, so B @ cayley(0) == B
self.B = X
return torch.zeros_like(X)
N = 5
model = nn.Linear(N, N)
# Register the skew-symmetric constraint. The result is now skew-symmetric
skew = Skew()
# Make the weight skew-symmetric before registering the parametrization
with torch.no_grad():
model.weight.set_(skew(model.weight))
parametrize.register_parametrization(model, "weight", skew)
X = torch.rand(N, N)
# X is not skew-symmetric, so it throws an error
with self.assertRaises(ValueError):
model.weight = X
# Make X skew-symmetric
X = X - X.T
model.weight = X
self.assertEqual(model.parametrizations.weight.original, X.triu(1))
self.assertEqual(model.weight, X)
# Having several parametrizations registered should work in the same way
parametrize.register_parametrization(model, "weight", Orthogonal(N))
# Register now the Cayley map. The result is now orthogonal
X = torch.rand(N, N)
# X is not orthogonal, so it throws an error
with self.assertRaises(ValueError):
model.weight = X
init.orthogonal_(X)
model.weight = X
self.assertEqual(model.weight, X)
self.assertEqual(model.parametrizations.weight.original, torch.zeros_like(X))
def test_errors_unparametrized_tensor_parametrization(self):
# Test errors when registering a parametrization on an unparametrized tensor
module = nn.Linear(3, 4)
weight_init = module.weight.clone()
class Identity(nn.Module):
def forward(self, x):
return x
# Register a parametrization on a non-existing parameter throws
with self.assertRaisesRegex(ValueError, "does not have a parameter"):
parametrize.register_parametrization(module, "foo", Identity())
self.assertFalse(parametrize.is_parametrized(module))
# Removing parametrizations from an unparametrized tensor throws
with self.assertRaisesRegex(ValueError, "does not have a parametrization"):
parametrize.remove_parametrizations(module, "bias")
self.assertFalse(parametrize.is_parametrized(module))
# A correct parametrization with several outputs
class Sum(nn.Module):
def forward(self, x, y):
return x + y
def right_inverse(self, z):
return z, torch.zeros_like(z)
parametrize.register_parametrization(module, "weight", Sum())
# Cannot remove a parametrization with several outputs with `leave_parametrized=False`
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
parametrize.remove_parametrizations(module, "weight", leave_parametrized=False)
parametrize.remove_parametrizations(module, "weight", leave_parametrized=True)
# A parametrization with an incorrect number of outputs
class WrongNumberParams(nn.Module):
def forward(self, x, y, z):
return x + y + z
def right_inverse(self, w):
return w, torch.zeros_like(w)
# Makes param(*param.right_inverse(X)) fail
with self.assertRaisesRegex(TypeError, "positional argument"):
parametrize.register_parametrization(module, "weight", WrongNumberParams())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization with a right_inverse that does not return a Tensor or Sequence[Tensor]
class WrongRightInverse(Identity):
def right_inverse(self, z):
return None
# right_inverse should return a Tensor or a Sequence[Tensor]
with self.assertRaisesRegex(ValueError, "Tensor or a Sequence of"):
parametrize.register_parametrization(module, "weight", WrongRightInverse())
self.assertFalse(parametrize.is_parametrized(module))
# If it's a sequence, it must to be a sequence of tensors
class WrongRightInverseSequence(nn.Module):
def forward(self, x, y):
return x
def right_inverse(self, z):
return None, z
with self.assertRaisesRegex(ValueError, "of the sequence with type"):
parametrize.register_parametrization(module, "weight", WrongRightInverseSequence())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization from one tensor to one tensor that changes the dtype
class ChangeDtypeInverse(nn.Module):
def forward(self, x):
return x.float()
def right_inverse(self, w):
return w.bool()
# For parametrizations that return one tensor, right_inverse may not change the dtype
with self.assertRaisesRegex(ValueError, "outputs one tensor, it may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeInverse())
self.assertFalse(parametrize.is_parametrized(module))
# Doesn't return a tensor
class NotTensor(nn.Module):
def forward(self, x):
return 2
# Forward must return a tensor
with self.assertRaisesRegex(ValueError, "must return a tensor"):
parametrize.register_parametrization(module, "weight", NotTensor())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization from one tensor to one tensor that changes the dtype
class ChangeDtype(nn.Module):
def forward(self, x):
return x.bool()
# forward should not change the initial dtype
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtype())
self.assertFalse(parametrize.is_parametrized(module))
# Change shape
class ChangeShape(nn.Module):
def forward(self, x):
return x[:-1]
# forward should not change the original shape
with self.assertRaisesRegex(ValueError, "may not change the shape"):
parametrize.register_parametrization(module, "weight", ChangeShape())
self.assertFalse(parametrize.is_parametrized(module))
# Many to one that changes dtype
class ChangeDtypeMulti(nn.Module):
def forward(self, x, y):
return (x + y).bool()
def right_inverse(self, w):
return w, w + 1
# forward should not change the original shape even for parametrizations with many inputs
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeMulti())
self.assertFalse(parametrize.is_parametrized(module))
# Returning a sequence of size one, although weird, it's correct
class SequenceLen1(nn.Module):
def forward(self, x):
return x
def right_inverse(self, w):
return (w,)
parametrize.register_parametrization(module, "weight", SequenceLen1())
self.assertTrue(hasattr(module.parametrizations.weight, "original0"))
self.assertFalse(hasattr(module.parametrizations.weight, "original1"))
_ = module.weight # Does not throw
self.assertTrue(parametrize.is_parametrized(module))
parametrize.remove_parametrizations(module, "weight", leave_parametrized=True)
# None of the operations above should have altered the weight
self.assertFalse(parametrize.is_parametrized(module))
self.assertEqual(module.weight, weight_init)
def test_errors_parametrized_tensor_parametrization(self):
# Test errors when registering a parametrization on a parametrized tensor
class Identity(nn.Module):
def forward(self, x):
return x
module = nn.Linear(3, 4)
parametrize.register_parametrization(module, "weight", Identity())
# Has to return a tensor
class WrongReturn(nn.Module):
def forward(self, x):
return x, x
with self.assertRaisesRegex(ValueError, "must return a tensor"):
parametrize.register_parametrization(module, "weight", WrongReturn())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change dtype
class ChangeDtype(nn.Module):
def forward(self, x):
return x.bool()
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtype())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change shape
class ChangeShape(nn.Module):
def forward(self, x):
return x[:-1]
with self.assertRaisesRegex(ValueError, "may not change the shape"):
parametrize.register_parametrization(module, "weight", ChangeShape())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# The following checks are mostly due to bugs in the code of the parametrization
# right_inverse has to return a tensor
class WrongReturnInverse(Identity):
def right_inverse(self, x):
return x, x
with self.assertRaisesRegex(ValueError, "right_inverse must return a tensor"):
parametrize.register_parametrization(module, "weight", WrongReturnInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change dtype
class ChangeDtypeInverse(Identity):
def right_inverse(self, x):
return x.bool()
with self.assertRaisesRegex(ValueError, "must have the same dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change shape
class ChangeShapeInverse(Identity):
def right_inverse(self, x):
return x[:-1]
with self.assertRaisesRegex(ValueError, "must have the same shape"):
parametrize.register_parametrization(module, "weight", ChangeShapeInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_multiple_inputs_parametrization(self):
# A parametrization with several outputs
class RankOne(nn.Module):
def forward(self, x, y):
# Form a rank-1 matrix from a pair of vectors
return x.unsqueeze(-1) @ y.unsqueeze(-2)
def right_inverse(self, Y):
# We project the given matrix onto the rank 1 matrices
U, S, Vh = torch.linalg.svd(Y, full_matrices=False)
# S is ordered in a decreasing way.
s0_sqrt = S[0].sqrt().unsqueeze(-1)
return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt
# Simple parametrisation
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
def right_inverse(self, w):
return 0.5 * w
model = nn.Linear(3, 3)
# Test one parametrization
parametrize.register_parametrization(model, "weight", RankOne())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertTrue(hasattr(model.parametrizations.weight, "original0"))
self.assertIn("original0", model.parametrizations.weight._parameters)
self.assertTrue(hasattr(model.parametrizations.weight, "original1"))
self.assertIn("original1", model.parametrizations.weight._parameters)
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be rank 1
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
# Cannot remove a parametrization with multiple inputs and not leave it parametrized
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
# Remove parametrization and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
self.assertFalse(parametrize.is_parametrized(model))
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
self.assertIn("weight", model._parameters)
# Registering parametrizations with one input on top of one with multiple inputs should work
init_weight = model.weight.clone()
parametrize.register_parametrization(model, "weight", RankOne())
# Projecting a rank 1 matrix onto the matrices of rank one does not change the matrix
self.assertEqual(init_weight, model.weight)
parametrize.register_parametrization(model, "weight", Double())
# The matrix now is twice the initial matrix
self.assertEqual(2.0 * init_weight, model.weight)
# Multiplying by a scalar does not change the rank
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
# The model has now three parameters
self.assertEqual(len(list(model.parameters())), 3)
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
# Test backward. Should not throw
for _ in range(2):
sgd.zero_grad()
loss = (model.weight.T @ model.bias).sum()
loss.backward()
sgd.step()
# Same drill as before, removing should work as expected
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
# Cannot remove a parametrization with multiple inputs and not leave it parametrized
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
# Remove parametrization and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
self.assertFalse(parametrize.is_parametrized(model))
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
self.assertIn("weight", model._parameters)
# The model has now two parameters
self.assertEqual(len(list(model.parameters())), 2)
# Test backward. Should not throw
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
for _ in range(2):
sgd.zero_grad()
loss = (model.weight.T @ model.bias).sum()
loss.backward()
sgd.step()
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_caching_parametrization(self):
r"""Test the caching system of a parametrization"""
# Define a couple matrix parametrizations
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
Id = torch.eye(X.size(0), device=X.device)
return torch.linalg.solve(Id + X, Id - X)
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
# Test that the caching system works
with parametrize.cached():
X = model.weight
Y = model.weight
self.assertEqual(id(X), id(Y))
def test_parametrization_same_training_mode(self):
r"""Test training mode updated on parametrization registration"""
class Identity(nn.Module):
def forward(self, X):
return X
module = nn.Linear(4, 4)
module.eval()
parametrize.register_parametrization(module, "weight", Identity())
self.assertFalse(module.parametrizations.weight[0].training)
module.train()
parametrize.register_parametrization(module, "weight", Identity().eval())
self.assertTrue(module.parametrizations.weight[0].training)
self.assertTrue(module.parametrizations.weight[1].training)
# torch/nn/utils/prune.py
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_validate_pruning_amount_init(self):
r"""Test the first util function that validates the pruning
amount requested by the user the moment the pruning method
is initialized. This test checks that the expected errors are
raised whenever the amount is invalid.
The original function runs basic type checking + value range checks.
It doesn't check the validity of the pruning amount with
respect to the size of the tensor to prune. That's left to
`_validate_pruning_amount`, tested below.
"""
# neither float not int should raise TypeError
with self.assertRaises(TypeError):
prune._validate_pruning_amount_init(amount="I'm a string")
# float not in [0, 1] should raise ValueError
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=1.1)
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=20.)
# negative int should raise ValueError
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=-10)
# all these should pass without errors because they're valid amounts
prune._validate_pruning_amount_init(amount=0.34)
prune._validate_pruning_amount_init(amount=1500)
prune._validate_pruning_amount_init(amount=0)
prune._validate_pruning_amount_init(amount=0.)
prune._validate_pruning_amount_init(amount=1)
prune._validate_pruning_amount_init(amount=1.)
self.assertTrue(True)
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_validate_pruning_amount(self):
r"""Tests the second util function that validates the pruning
amount requested by the user, this time with respect to the size
of the tensor to prune. The rationale is that if the pruning amount,
converted to absolute value of units to prune, is larger than
the number of units in the tensor, then we expect the util function
to raise a value error.
"""
# if amount is int and amount > tensor_size, raise ValueError
with self.assertRaises(ValueError):
prune._validate_pruning_amount(amount=20, tensor_size=19)
# amount is a float so this should not raise an error
prune._validate_pruning_amount(amount=0.3, tensor_size=0)
# this is okay
prune._validate_pruning_amount(amount=19, tensor_size=20)
prune._validate_pruning_amount(amount=0, tensor_size=0)
prune._validate_pruning_amount(amount=1, tensor_size=1)
self.assertTrue(True)
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_compute_nparams_to_prune(self):
r"""Test that requested pruning `amount` gets translated into the
correct absolute number of units to prune.
"""
self.assertEqual(
prune._compute_nparams_toprune(amount=0, tensor_size=15),
0
)
self.assertEqual(
prune._compute_nparams_toprune(amount=10, tensor_size=15),
10
)
# if 1 is int, means 1 unit
self.assertEqual(
prune._compute_nparams_toprune(amount=1, tensor_size=15),
1
)
# if 1. is float, means 100% of units
self.assertEqual(
prune._compute_nparams_toprune(amount=1., tensor_size=15),
15
)
self.assertEqual(
prune._compute_nparams_toprune(amount=0.4, tensor_size=17),
7
)
def test_random_pruning_sizes(self):
r"""Test that the new parameters and buffers created by the pruning
method have the same size as the input tensor to prune. These, in
fact, correspond to the pruned version of the tensor itself, its
mask, and its original copy, so the size must match.
"""
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
# mask has the same size as tensor being pruned
self.assertEqual(
original_tensor.size(),
getattr(m, name + '_mask').size()
)
# 'orig' tensor has the same size as the original tensor
self.assertEqual(
original_tensor.size(),
getattr(m, name + '_orig').size()
)
# new tensor has the same size as the original tensor
self.assertEqual(
original_tensor.size(),
getattr(m, name).size()
)
def test_random_pruning_orig(self):
r"""Test that original tensor is correctly stored in 'orig'
after pruning is applied. Important to make sure we don't
lose info about the original unpruned parameter.
"""
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# tensor prior to pruning
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
self.assertEqual(
original_tensor,
getattr(m, name + '_orig')
)
def test_random_pruning_new_weight(self):
r"""Test that module.name now contains a pruned version of
the original tensor obtained from multiplying it by the mask.
"""
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# tensor prior to pruning
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
# weight = weight_orig * weight_mask
self.assertEqual(
getattr(m, name),
getattr(m, name + '_orig')
* getattr(m, name + '_mask').to(
dtype=original_tensor.dtype
),
)
def test_identity_pruning(self):
r"""Test that a mask of 1s does not change forward or backward.
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
y_prepruning = m(input_) # output prior to pruning
# compute grad pre-pruning and check it's equal to all ones
y_prepruning.sum().backward()
old_grad_weight = m.weight.grad.clone() # don't grab pointer!
self.assertEqual(old_grad_weight, torch.ones_like(m.weight))
old_grad_bias = m.bias.grad.clone()
self.assertEqual(old_grad_bias, torch.ones_like(m.bias))
# remove grads
m.zero_grad()
# force the mask to be made of all 1s
prune.identity(m, name="weight")
# with mask of 1s, output should be identical to no mask
y_postpruning = m(input_)
self.assertEqual(y_prepruning, y_postpruning)
# with mask of 1s, grad should be identical to no mask
y_postpruning.sum().backward()
self.assertEqual(old_grad_weight, m.weight_orig.grad)
self.assertEqual(old_grad_bias, m.bias.grad)
# calling forward twice in a row shouldn't change output
y1 = m(input_)
y2 = m(input_)
self.assertEqual(y1, y2)
def test_random_pruning_0perc(self):
r"""Test that a mask of 1s does not change forward or backward.
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
y_prepruning = m(input_) # output prior to pruning
# compute grad pre-pruning and check it's equal to all ones
y_prepruning.sum().backward()
old_grad_weight = m.weight.grad.clone() # don't grab pointer!
self.assertEqual(old_grad_weight, torch.ones_like(m.weight))
old_grad_bias = m.bias.grad.clone()
self.assertEqual(old_grad_bias, torch.ones_like(m.bias))
# remove grads
m.zero_grad()
# force the mask to be made of all 1s
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = torch.ones_like(m.weight)
prune.random_unstructured(m, name='weight', amount=0.9) # amount won't count
# with mask of 1s, output should be identical to no mask
y_postpruning = m(input_)
self.assertEqual(y_prepruning, y_postpruning)
# with mask of 1s, grad should be identical to no mask
y_postpruning.sum().backward()
self.assertEqual(old_grad_weight, m.weight_orig.grad)
self.assertEqual(old_grad_bias, m.bias.grad)
# calling forward twice in a row shouldn't change output
y1 = m(input_)
y2 = m(input_)
self.assertEqual(y1, y2)
def test_random_pruning(self):
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
# define custom mask to assign with mock
mask = torch.ones_like(m.weight)
mask[1, 0] = 0
mask[0, 3] = 0
# check grad is zero for masked weights
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
y_postpruning = m(input_)
y_postpruning.sum().backward()
# weight_orig is the parameter, so it's the tensor that will accumulate the grad
self.assertEqual(m.weight_orig.grad, mask) # all 1s, except for masked units
self.assertEqual(m.bias.grad, torch.ones_like(m.bias))
# make sure that weight_orig update doesn't modify [1, 0] and [0, 3]
old_weight_orig = m.weight_orig.clone()
# update weights
learning_rate = 1.
for p in m.parameters():
p.data.sub_(p.grad.data * learning_rate)
# since these are pruned, they should not be updated
self.assertEqual(old_weight_orig[1, 0], m.weight_orig[1, 0])
self.assertEqual(old_weight_orig[0, 3], m.weight_orig[0, 3])
def test_random_pruning_forward(self):
r"""check forward with mask (by hand).
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
# define custom mask to assign with mock
mask = torch.zeros_like(m.weight)
mask[1, 0] = 1
mask[0, 3] = 1
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
yhat = m(input_)
self.assertEqual(yhat[0, 0], m.weight_orig[0, 3] + m.bias[0])
self.assertEqual(yhat[0, 1], m.weight_orig[1, 0] + m.bias[1])
def test_remove_pruning_forward(self):
r"""Remove pruning and check forward is unchanged from previous
pruned state.
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
# define custom mask to assign with mock
mask = torch.ones_like(m.weight)
mask[1, 0] = 0
mask[0, 3] = 0
# check grad is zero for masked weights
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
y_postpruning = m(input_)
prune.remove(m, 'weight')
y_postremoval = m(input_)
self.assertEqual(y_postpruning, y_postremoval)
def test_pruning_id_consistency(self):
r"""Test that pruning doesn't change the id of the parameters, which
would otherwise introduce issues with pre-existing optimizers that
point to old parameters.
"""
m = nn.Linear(5, 2, bias=False)
tensor_id = id(list(m.parameters())[0])
prune.random_unstructured(m, name="weight", amount=0.9)
self.assertEqual(tensor_id, id(list(m.parameters())[0]))
prune.remove(m, "weight")
self.assertEqual(tensor_id, id(list(m.parameters())[0]))
def test_random_pruning_pickle(self):
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
prune.random_unstructured(m, name=name, amount=0.1)
m_new = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m_new, type(m))
def test_multiple_pruning_calls(self):
# if you call pruning twice, the hook becomes a PruningContainer
m = nn.Conv3d(2, 2, 2)
prune.l1_unstructured(m, name='weight', amount=0.1)
weight_mask0 = m.weight_mask # save it for later sanity check
# prune again
prune.ln_structured(m, name='weight', amount=0.3, n=2, dim=0)
hook = next(iter(m._forward_pre_hooks.values()))
self.assertIsInstance(
hook,
torch.nn.utils.prune.PruningContainer
)
# check that container._tensor_name is correctly set no matter how
# many pruning methods are in the container
self.assertEqual(hook._tensor_name, 'weight')
# check that the pruning container has the right length
# equal to the number of pruning iters
self.assertEqual(len(hook), 2) # m.weight has been pruned twice
# check that the entries of the pruning container are of the expected
# type and in the expected order
self.assertIsInstance(hook[0], torch.nn.utils.prune.L1Unstructured)
self.assertIsInstance(hook[1], torch.nn.utils.prune.LnStructured)
# check that all entries that are 0 in the 1st mask are 0 in the
# 2nd mask too
self.assertTrue(torch.all(m.weight_mask[weight_mask0 == 0] == 0))
# prune again
prune.ln_structured(m, name='weight', amount=0.1, n=float('inf'), dim=1)
# check that container._tensor_name is correctly set no matter how
# many pruning methods are in the container
hook = next(iter(m._forward_pre_hooks.values()))
self.assertEqual(hook._tensor_name, 'weight')
def test_pruning_container(self):
# create an empty container
container = prune.PruningContainer()
container._tensor_name = 'test'
self.assertEqual(len(container), 0)
p = prune.L1Unstructured(amount=2)
p._tensor_name = 'test'
# test adding a pruning method to a container
container.add_pruning_method(p)
# test error raised if tensor name is different
q = prune.L1Unstructured(amount=2)
q._tensor_name = 'another_test'
with self.assertRaises(ValueError):
container.add_pruning_method(q)
# test that adding a non-pruning method object to a pruning container
# raises a TypeError
with self.assertRaises(TypeError):
container.add_pruning_method(10)
with self.assertRaises(TypeError):
container.add_pruning_method('ugh')
def test_pruning_container_compute_mask(self):
r"""Test `compute_mask` of pruning container with a known `t` and
`default_mask`. Indirectly checks that Ln structured pruning is
acting on the right axis.
"""
# create an empty container
container = prune.PruningContainer()
container._tensor_name = 'test'
# 1) test unstructured pruning
# create a new pruning method
p = prune.L1Unstructured(amount=2)
p._tensor_name = 'test'
# add the pruning method to the container
container.add_pruning_method(p)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected_mask, computed_mask)
# 2) test structured pruning
q = prune.LnStructured(amount=1, n=2, dim=0)
q._tensor_name = 'test'
container.add_pruning_method(q)
# since we are pruning the lowest magnitude one of the two rows, the
# outcome of the calculation should be this:
expected_mask = torch.tensor([[0, 0, 0, 0], [1, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected_mask, computed_mask)
# 2) test structured pruning, along another axis
r = prune.LnStructured(amount=1, n=2, dim=1)
r._tensor_name = 'test'
container.add_pruning_method(r)
# since we are pruning the lowest magnitude of the four columns, the
# outcome of the calculation should be this:
expected_mask = torch.tensor([[0, 1, 1, 0], [0, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected_mask, computed_mask)
def test_l1_unstructured_pruning(self):
r"""Test that l1 unstructured pruning actually removes the lowest
entries by l1 norm (by hand). It also checks that applying l1
unstructured pruning more than once respects the previous mask.
"""
m = nn.Linear(4, 2)
# modify its weight matrix by hand
m.weight = torch.nn.Parameter(
torch.tensor(
[[1, 2, 3, 4], [-4, -3, -2, -1]], dtype=torch.float32
)
)
prune.l1_unstructured(m, 'weight', amount=2)
expected_weight = torch.tensor([[0, 2, 3, 4], [-4, -3, -2, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
# check that pruning again removes the next two smallest entries
prune.l1_unstructured(m, 'weight', amount=2)
expected_weight = torch.tensor([[0, 0, 3, 4], [-4, -3, 0, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
def test_l1_unstructured_pruning_with_importance_scores(self):
r"""Test that l1 unstructured pruning actually removes the lowest
entries of importance scores and not the parameter by l1 norm (by hand).
It also checks that applying l1 unstructured pruning more than once
respects the previous mask.
"""
m = nn.Linear(4, 2)
# modify its weight matrix by hand
m.weight = torch.nn.Parameter(
torch.tensor(
[[1, 2, 3, 4], [-4, -3, -2, -1]], dtype=torch.float32
)
)
importance_scores = torch.tensor(
[[4, 2, 1, 3], [-3, -1, -2, -4]], dtype=torch.float32
)
prune.l1_unstructured(m, 'weight', amount=2, importance_scores=importance_scores)
expected_weight = torch.tensor([[1, 2, 0, 4], [-4, 0, -2, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
# check that pruning again removes two entries of m.weight that are colocated with
# the next two smallest absolute values of importance scores.
prune.l1_unstructured(m, 'weight', amount=2, importance_scores=importance_scores)
expected_weight = torch.tensor([[1, 0, 0, 4], [-4, 0, 0, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
def test_unstructured_pruning_same_magnitude(self):
r"""Since it may happen that the tensor to prune has entries with the
same exact magnitude, it is important to check that pruning happens
consistenly based on the bottom % of weights, and not by threshold,
which would instead kill off *all* units with magnitude = threshold.
"""
AMOUNT = 0.2
p = prune.L1Unstructured(amount=AMOUNT)
# create a random tensors with entries in {-2, 0, 2}
t = 2 * torch.randint(low=-1, high=2, size=(10, 7))
nparams_toprune = prune._compute_nparams_toprune(AMOUNT, t.nelement())
computed_mask = p.compute_mask(t, default_mask=torch.ones_like(t))
nparams_pruned = torch.sum(computed_mask == 0)
self.assertEqual(nparams_toprune, nparams_pruned)
def test_random_structured_pruning_amount(self):
AMOUNT = 0.6
AXIS = 2
p = prune.RandomStructured(amount=AMOUNT, dim=AXIS)
t = 2 * torch.randint(low=-1, high=2, size=(5, 4, 2)).to(
dtype=torch.float32
)
nparams_toprune = prune._compute_nparams_toprune(AMOUNT, t.shape[AXIS])
computed_mask = p.compute_mask(t, default_mask=torch.ones_like(t))
# check that 1 column is fully prune, the others are left untouched
remaining_axes = [_ for _ in range(len(t.shape)) if _ != AXIS]
per_column_sums = sorted(
torch.sum(computed_mask == 0, axis=remaining_axes)
)
assert per_column_sums == [0, 20]
def test_ln_structured_pruning(self):
r"""Check Ln structured pruning by hand.
"""
m = nn.Conv2d(3, 1, 2)
m.weight.data = torch.tensor(
[[[[1., 2.], [1., 2.5]],
[[0.5, 1.], [0.1, 0.1]],
[[-3., -5.], [0.1, -1.]]]]
)
# expected effect of pruning 1 of the 3 channels by L2-norm
expected_mask_axis1 = torch.ones_like(m.weight)
expected_mask_axis1[:, 1] = 0.
prune.ln_structured(m, 'weight', amount=1, n=2, dim=1)
self.assertEqual(expected_mask_axis1, m.weight_mask)
# expected effect of pruning 1 of the 2 columns along axis -1 by L1-norm
expected_mask_axis3 = expected_mask_axis1
expected_mask_axis3[:, :, :, 0] = 0.
prune.ln_structured(m, 'weight', amount=1, n=1, dim=-1)
self.assertEqual(expected_mask_axis3, m.weight_mask)
def test_ln_structured_pruning_importance_scores(self):
r"""Check Ln structured pruning by hand.
"""
m = nn.Conv2d(3, 1, 2)
m.weight.data = torch.tensor(
[[[[1., 2.], [1., 2.5]],
[[0.5, 1.], [0.1, 0.1]],
[[-3., -5.], [0.1, -1.]]]]
)
importance_scores = torch.tensor(
[[[[10., 1.], [10., 1.]],
[[30., 3.], [30., 3.]],
[[-20., -2.], [-20., -2.]]]]
)
# expected effect of pruning 1 of the 3 channels by L2-norm
expected_mask_axis1 = torch.ones_like(m.weight)
expected_mask_axis1[:, 0] = 0.
prune.ln_structured(m, 'weight', amount=1, n=2, dim=1, importance_scores=importance_scores)
self.assertEqual(expected_mask_axis1, m.weight_mask)
# expected effect of pruning 1 of the 2 columns along axis -1 by L1-norm
expected_mask_axis3 = expected_mask_axis1
expected_mask_axis3[:, :, :, 1] = 0.
prune.ln_structured(m, 'weight', amount=1, n=1, dim=-1, importance_scores=importance_scores)
self.assertEqual(expected_mask_axis3, m.weight_mask)
def test_remove_pruning(self):
r"""`prune.remove` removes the hook and the reparametrization
and makes the pruning final in the original parameter.
"""
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# first prune
prune.random_unstructured(m, name, amount=0.5)
self.assertIn(name + "_orig", dict(m.named_parameters()))
self.assertIn(name + "_mask", dict(m.named_buffers()))
self.assertNotIn(name, dict(m.named_parameters()))
self.assertTrue(hasattr(m, name))
pruned_t = getattr(m, name)
# then remove pruning
prune.remove(m, name)
self.assertIn(name, dict(m.named_parameters()))
self.assertNotIn(name + "_orig", dict(m.named_parameters()))
self.assertNotIn(name + "_mask", dict(m.named_buffers()))
final_t = getattr(m, name)
self.assertEqual(pruned_t, final_t)
def test_remove_pruning_exception(self):
r"""Removing from an unpruned tensor throws an assertion error
"""
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# check that the module isn't pruned
self.assertFalse(prune.is_pruned(m))
# since it isn't pruned, pruning can't be removed from it
with self.assertRaises(ValueError):
prune.remove(m, name)
def test_global_pruning(self):
r"""Test that global l1 unstructured pruning over 2 parameters removes
the `amount=4` smallest global weights across the 2 parameters.
"""
m = nn.Linear(4, 2)
n = nn.Linear(3, 1)
# modify the weight matrices by hand
m.weight = torch.nn.Parameter(
torch.tensor([[1, 2, 3, 4], [-4, -3, -2, -1]]).to(
dtype=torch.float32)
)
n.weight = torch.nn.Parameter(
torch.tensor([[0, 0.1, -2]]).to(
dtype=torch.float32)
)
params_to_prune = (
(m, 'weight'),
(n, 'weight'),
)
# prune the 4 smallest weights globally by L1 magnitude
prune.global_unstructured(
params_to_prune,
pruning_method=prune.L1Unstructured,
amount=4
)
expected_mweight = torch.tensor([[0, 2, 3, 4], [-4, -3, -2, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_mweight, m.weight)
expected_nweight = torch.tensor([[0, 0, -2]]).to(dtype=n.weight.dtype)
self.assertEqual(expected_nweight, n.weight)
def test_global_pruning_importance_scores(self):
r"""Test that global l1 unstructured pruning over 2 parameters removes
the `amount=4` smallest global weights across the 2 parameters.
"""
m = nn.Linear(4, 2)
n = nn.Linear(3, 1)
# modify the weight matrices by hand
m.weight = torch.nn.Parameter(
torch.tensor([[1, 2, 3, 4], [-4, -3, -2, -1]]).to(
dtype=torch.float32)
)
m_importance_scores = torch.tensor(
[[4, 2, 1, 3], [-3, -1, -2, -4]], dtype=torch.float32
)
n.weight = torch.nn.Parameter(
torch.tensor([[0, 0.1, -2]]).to(
dtype=torch.float32)
)
n_importance_scores = torch.tensor([[0, 10., -0.2]]).to(dtype=torch.float32)
params_to_prune = (
(m, 'weight'),
(n, 'weight'),
)
importance_scores = {
(m, 'weight'): m_importance_scores,
(n, 'weight'): n_importance_scores,
}
# prune the 4 smallest weights globally by L1 magnitude
prune.global_unstructured(
params_to_prune,
pruning_method=prune.L1Unstructured,
amount=4,
importance_scores=importance_scores,
)
expected_m_weight = torch.tensor([[1, 2, 0, 4], [-4, 0, -2, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_m_weight, m.weight)
expected_n_weight = torch.tensor([[0, 0.1, 0]]).to(dtype=n.weight.dtype)
self.assertEqual(expected_n_weight, n.weight)
def test_custom_from_mask_pruning(self):
r"""Test that the CustomFromMask is capable of receiving
as input at instantiation time a custom mask, and combining it with
the previous default mask to generate the correct final mask.
"""
# new mask
mask = torch.tensor([[0, 1, 1, 0], [0, 0, 1, 1]])
# old mask
default_mask = torch.tensor([[0, 0, 0, 0], [1, 1, 1, 1]])
# some tensor (not actually used)
t = torch.rand_like(mask.to(dtype=torch.float32))
p = prune.CustomFromMask(mask=mask)
computed_mask = p.compute_mask(t, default_mask)
expected_mask = torch.tensor([[0, 0, 0, 0], [0, 0, 1, 1]]).to(
dtype=t.dtype
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(computed_mask, expected_mask)
def test_pruning_rollback(self):
r"""Test that if something fails when the we try to compute the mask,
then the model isn't left in some intermediate half-pruned state.
The try/except statement in `apply` should handle rolling back
to the previous state before pruning began.
"""
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
with mock.patch(
"torch.nn.utils.prune.L1Unstructured.compute_mask"
) as compute_mask:
compute_mask.side_effect = Exception('HA!')
with self.assertRaises(Exception):
prune.l1_unstructured(m, name=name, amount=0.9)
self.assertTrue(
name in dict(m.named_parameters())
)
self.assertFalse(
name + '_mask' in dict(m.named_buffers())
)
self.assertFalse(
name + '_orig' in dict(m.named_parameters())
)
def test_pruning_serialization_model(self):
# create a model
model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
# check that everything looks normal before pruning
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# prune one of its parameters
prune.l1_unstructured(module=model[0], name='weight', amount=0.9)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', model.state_dict())
self.assertIn('0.weight_mask', model.state_dict())
self.assertNotIn('0.weight', model.state_dict())
self.assertTrue(hasattr(model[0], 'weight'))
pruned_weight = model[0].weight
with TemporaryFileName() as fname:
torch.save(model, fname)
new_model = torch.load(fname)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', new_model.state_dict())
self.assertIn('0.weight_mask', new_model.state_dict())
self.assertNotIn('0.weight', new_model.state_dict())
self.assertTrue(hasattr(new_model[0], 'weight'))
self.assertEqual(pruned_weight, new_model[0].weight)
def test_pruning_serialization_state_dict(self):
# create a model
model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
# check that everything looks normal before pruning
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# prune one of its parameters
prune.l1_unstructured(module=model[0], name='weight', amount=0.9)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', model.state_dict())
self.assertIn('0.weight_mask', model.state_dict())
self.assertNotIn('0.weight', model.state_dict())
self.assertTrue(hasattr(model[0], 'weight'))
pruned_weight = model[0].weight
# make pruning permanent and restore parameter names as in base
# architecture
prune.remove(module=model[0], name='weight')
# check that the original weight and the new mask are no longer present
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# save the state dict of model and reload it into new_model
new_model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
with TemporaryFileName() as fname:
torch.save(model.state_dict(), fname)
new_model.load_state_dict(torch.load(fname))
# check that the original weight and the new mask are not present in
# new_model either.
self.assertNotIn('0.weight_orig', new_model.state_dict())
self.assertNotIn('0.weight_mask', new_model.state_dict())
self.assertIn('0.weight', new_model.state_dict())
self.assertEqual(pruned_weight, new_model[0].weight)
def test_prune(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
pruned_tensor = p.prune(t, default_mask)
self.assertEqual(t * expected_mask, pruned_tensor)
def test_prune_importance_scores(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
importance_scores = torch.tensor(
[[1, 2, 3, 4], [1.5, 1.6, 1.7, 1.8]]
).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 1, 1, 0], [0, 1, 0, 1]])
pruned_tensor = p.prune(t, default_mask, importance_scores=importance_scores)
self.assertEqual(t * expected_mask, pruned_tensor)
def test_prune_importance_scores_mimic_default(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
pruned_tensor_without_importance_scores = p.prune(t, default_mask)
pruned_tensor_with_importance_scores = p.prune(t, default_mask, importance_scores=t)
self.assertEqual(pruned_tensor_without_importance_scores, pruned_tensor_with_importance_scores)
self.assertEqual(t * expected_mask, pruned_tensor_without_importance_scores)
def test_rnn_pruning(self):
l = torch.nn.LSTM(32, 32)
# This Module has 4 parameters called:
# 'weight_ih_l0', 'weight_hh_l0', 'bias_ih_l0', 'bias_hh_l0'
# Pruning one of them causes one of the weights to become a tensor
prune.l1_unstructured(l, 'weight_ih_l0', 0.5)
assert (
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights])
== 3
)
# Removing the pruning reparametrization restores the Parameter
prune.remove(l, 'weight_ih_l0')
assert (
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights])
== 4
)
# Make sure that, upon removal of the reparametrization, the
# `._parameters` and `.named_parameters` contain the right params.
# Specifically, the original weight ('weight_ih_l0') should be placed
# back in the parameters, while the reparametrization component
# ('weight_ih_l0_orig') should be removed.
assert 'weight_ih_l0' in l._parameters
assert l._parameters['weight_ih_l0'] is not None
assert 'weight_ih_l0_orig' not in l._parameters
assert 'weight_ih_l0' in dict(l.named_parameters())
assert dict(l.named_parameters())['weight_ih_l0'] is not None
assert 'weight_ih_l0_orig' not in dict(l.named_parameters())
def test_rnn_weight_norm(self):
def check_weight_norm(l, name, num_params):
# This Module has 4 or 5 parameters called:
# 'weight_ih_l0', 'weight_hh_l0', 'bias_ih_l0', 'bias_hh_l0', weight_hr_l0
# Applying weight norm on one of them causes it to become a tensor
l = torch.nn.utils.weight_norm(l, name=name)
self.assertEqual(
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),
num_params - 1,
)
# Removing the weight norm reparametrization restores the Parameter
l = torch.nn.utils.remove_weight_norm(l, name=name)
self.assertEqual(
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),
num_params,
)
# Make sure that, upon removal of the reparametrization, the
# `._parameters` and `.named_parameters` contain the right params.
# Specifically, the original weight ('weight_ih_l0') should be placed
# back in the parameters, while the reparametrization components
# ('weight_ih_l0_v' and 'weight_ih_l0_g') should be removed.
self.assertTrue(name in l._parameters)
self.assertIsNotNone(l._parameters[name])
self.assertTrue(name + '_v' not in l._parameters)
self.assertTrue(name + '_g' not in l._parameters)
self.assertTrue(name in dict(l.named_parameters()))
self.assertIsNotNone(dict(l.named_parameters())[name])
self.assertTrue(name + '_v' not in dict(l.named_parameters()))
self.assertTrue(name + '_g' not in dict(l.named_parameters()))
check_weight_norm(torch.nn.LSTM(32, 32), 'weight_ih_l0', 4)
check_weight_norm(torch.nn.LSTM(32, 32, proj_size=16), 'weight_hr_l0', 5)
def test_weight_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
expected_output = m(input)
# add weight normalization
m = torch.nn.utils.weight_norm(m)
self.assertEqual(m.weight_v.size(), m.weight.size())
self.assertEqual(m.weight_g.size(), (7, 1))
self.assertEqual(m(input), expected_output)
# remove weight norm
m = torch.nn.utils.remove_weight_norm(m)
self.assertFalse(hasattr(m, 'weight_g'))
self.assertFalse(hasattr(m, 'weight_v'))
self.assertEqual(m(input), expected_output)
# test with dim=1
m = torch.nn.utils.weight_norm(m, dim=1)
self.assertEqual(m.weight_v.size(), m.weight.size())
self.assertEqual(m.weight_g.size(), (1, 5))
self.assertEqual(m(input), expected_output)
# test with dim=None
m = nn.Linear(5, 7)
expected_output = m(input)
m = torch.nn.utils.weight_norm(m, dim=None)
self.assertEqual(m(input), expected_output)
with self.assertRaisesRegex(RuntimeError, 'register two weight_norm hooks'):
m = torch.nn.utils.weight_norm(m)
m = torch.nn.utils.weight_norm(m)
def test_parameterlistdict_setting_attributes(self):
with warnings.catch_warnings(record=True) as w:
mod = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
mod.train()
mod.eval()
self.assertTrue(len(w) == 0)
with self.assertWarnsRegex(UserWarning,
r"Setting attributes on ParameterList is not supported"):
torch.nn.utils.weight_norm(mod, "0")
with warnings.catch_warnings(record=True) as w:
mod = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
mod.train()
mod.eval()
self.assertTrue(len(w) == 0)
with self.assertWarnsRegex(UserWarning,
r"Setting attributes on ParameterDict is not supported"):
torch.nn.utils.weight_norm(mod, "b")
def test_parameterlistdict_pickle(self):
m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
del m._initialized
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
# Test whether loading from older checkpoints works without triggering warnings
m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
del m._forward_pre_hooks, m._state_dict_hooks, m._load_state_dict_pre_hooks, m._non_persistent_buffers_set
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
m = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
m = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
del m._initialized
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
# Test whether loading from older checkpoints works without triggering warnings
m = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
del m._forward_pre_hooks, m._state_dict_hooks, m._load_state_dict_pre_hooks, m._non_persistent_buffers_set
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
def test_weight_norm_pickle(self):
m = torch.nn.utils.weight_norm(nn.Linear(5, 7))
m = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m, nn.Linear)
def test_spectral_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.spectral_norm(m)
self.assertEqual(m.weight_u.size(), torch.Size([m.weight.size(0)]))
# weight_orig should be trainable
self.assertTrue(hasattr(m, 'weight_orig'))
self.assertTrue('weight_orig' in m._parameters)
# weight_u should be just a reused buffer
self.assertTrue(hasattr(m, 'weight_u'))
self.assertTrue('weight_u' in m._buffers)
self.assertTrue('weight_v' in m._buffers)
# weight should be a plain attribute, not counted as a buffer or a param
self.assertFalse('weight' in m._buffers)
self.assertFalse('weight' in m._parameters)
# it should also be sharing storage as `weight_orig`
self.assertEqual(m.weight_orig.storage(), m.weight.storage())
self.assertEqual(m.weight_orig.size(), m.weight.size())
self.assertEqual(m.weight_orig.stride(), m.weight.stride())
m = torch.nn.utils.remove_spectral_norm(m)
self.assertFalse(hasattr(m, 'weight_orig'))
self.assertFalse(hasattr(m, 'weight_u'))
# weight should be converted back as a parameter
self.assertTrue(hasattr(m, 'weight'))
self.assertTrue('weight' in m._parameters)
with self.assertRaisesRegex(RuntimeError, 'register two spectral_norm hooks'):
m = torch.nn.utils.spectral_norm(m)
m = torch.nn.utils.spectral_norm(m)
# test correctness in training/eval modes and cpu/multi-gpu settings
for apply_dp in (True, False):
if apply_dp:
if not TEST_MULTIGPU:
continue
device = torch.device('cuda:0')
def maybe_wrap(m):
return torch.nn.DataParallel(m, [0, 1])
else:
device = torch.device('cpu')
def maybe_wrap(m):
return m
for requires_grad in (True, False):
m = nn.Linear(3, 4).to(device)
m.weight.requires_grad_(requires_grad)
m = torch.nn.utils.spectral_norm(m)
wrapped_m = maybe_wrap(m)
self.assertTrue(hasattr(m, 'weight_u'))
u0 = m.weight_u.clone()
v0 = m.weight_v.clone()
# TEST TRAINING BEHAVIOR
# assert that u and v are updated
input = torch.randn(2, 3, device=device)
out = wrapped_m(input)
self.assertNotEqual(u0, m.weight_u)
self.assertNotEqual(v0, m.weight_v)
# assert that backprop reaches weight_orig
# can't use gradcheck because the function changes as we
# activate through it in training mode
if requires_grad:
torch.autograd.grad(out.sum(), m.weight_orig)
# test backward works with multiple forwards
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = m.weight_u.clone()
saved_v = m.weight_v.clone()
def fn(input):
m.weight_u.data.copy_(saved_u)
m.weight_v.data.copy_(saved_v)
out0 = wrapped_m(input)
out1 = wrapped_m(input)
return out0 + out1
gradcheck(fn, (input.clone().requires_grad_(),), check_batched_grad=False)
# test removing
pre_remove_out = wrapped_m(input)
m = torch.nn.utils.remove_spectral_norm(m)
self.assertEqual(wrapped_m(input), pre_remove_out)
m = torch.nn.utils.spectral_norm(m)
for _ in range(3):
pre_remove_out = wrapped_m(input)
m = torch.nn.utils.remove_spectral_norm(m)
self.assertEqual(wrapped_m(input), pre_remove_out)
# TEST EVAL BEHAVIOR
m = torch.nn.utils.spectral_norm(m)
wrapped_m(input)
last_train_out = wrapped_m(input)
last_train_u = m.weight_u.clone()
last_train_v = m.weight_v.clone()
wrapped_m.zero_grad()
wrapped_m.eval()
eval_out0 = wrapped_m(input)
# assert eval gives same result as last training iteration
self.assertEqual(eval_out0, last_train_out)
# assert doing more iteartion in eval don't change things
self.assertEqual(eval_out0, wrapped_m(input))
self.assertEqual(last_train_u, m.weight_u)
self.assertEqual(last_train_v, m.weight_v)
# FIXME: the code below is flaky when executed with DataParallel
# see https://github.com/pytorch/pytorch/issues/13818
if apply_dp:
continue
# test backward works with multiple forwards in mixed training
# and eval modes
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = m.weight_u.clone()
saved_v = m.weight_v.clone()
def fn(input):
m.weight_u.data.copy_(saved_u)
m.weight_v.data.copy_(saved_v)
wrapped_m.train()
out0 = wrapped_m(input)
wrapped_m.eval()
out1 = wrapped_m(input)
wrapped_m.train()
out2 = wrapped_m(input)
wrapped_m.eval()
out3 = wrapped_m(input)
return out0 + out1 + out2 + out3
gradcheck(fn, (input.clone().requires_grad_(),))
# assert that backprop reaches weight_orig in eval
if requires_grad:
def fn(weight):
return wrapped_m(input)
gradcheck(fn, (m.weight_orig,))
def test_new_spectral_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.parametrizations.spectral_norm(m)
spectral_norm_m = m.parametrizations.weight[0]
self.assertEqual(spectral_norm_m._u.size(), torch.Size([m.weight.size(0)]))
# .parametrizations.weight.original should be trainable
self.assertTrue(hasattr(m.parametrizations.weight, 'original'))
self.assertTrue('original' in m.parametrizations.weight._parameters)
# u should be just a reused buffer
self.assertTrue(hasattr(spectral_norm_m, '_u'))
self.assertTrue('_u' in spectral_norm_m._buffers)
self.assertTrue('_v' in spectral_norm_m._buffers)
# weight should be a plain attribute, not counted as a buffer or a param
self.assertIsNotNone(m.weight)
self.assertFalse('weight' in m._buffers)
self.assertFalse('weight' in m._parameters)
# it should also be sharing storage as `weight_orig`
# self.assertEqual(m.parametrizations.weight.original.storage(), m.weight.storage())
self.assertEqual(m.parametrizations.weight.original.size(), m.weight.size())
self.assertEqual(m.parametrizations.weight.original.stride(), m.weight.stride())
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
# spectral_norm is the only parametrization
self.assertFalse(hasattr(m, 'parametrizations'))
self.assertTrue('weight' in m._parameters)
# We can register spectral_norm multiple times on the same parameter
# and on multiple parameters in the same module
m = torch.nn.utils.parametrizations.spectral_norm(m, 'weight')
m = torch.nn.utils.parametrizations.spectral_norm(m, 'weight')
m = torch.nn.utils.parametrizations.spectral_norm(m, 'bias')
# If we remove the parametrization on bias, weight is still parametrized
# Removing a parametrization runs forward in eval mode if leave_parametrized=True
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'bias')
self.assertTrue('bias' in m._parameters)
self.assertTrue(hasattr(m, 'parametrizations'))
self.assertFalse('weight' in m._parameters)
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
# Neither weight and bias are parametrized
self.assertFalse(hasattr(m, 'parametrizations'))
self.assertTrue('weight' in m._parameters)
self.assertFalse(torch.nn.utils.parametrize.is_parametrized(m))
# test correctness in training/eval modes and cpu/multi-gpu settings
for apply_dp in (True, False):
if apply_dp:
if not TEST_MULTIGPU:
continue
device = torch.device('cuda:0')
def maybe_wrap(m):
return torch.nn.DataParallel(m, [0, 1])
else:
device = torch.device('cpu')
def maybe_wrap(m):
return m
for requires_grad in (True, False):
def get_modules():
m = nn.Linear(3, 4).to(device)
m.weight.requires_grad_(requires_grad)
m = torch.nn.utils.parametrizations.spectral_norm(m)
wrapped_m = maybe_wrap(m)
spectral_norm_m = m.parametrizations.weight[0]
return m, wrapped_m, spectral_norm_m
input = torch.randn(2, 3, device=device)
m, wrapped_m, spectral_norm_m = get_modules()
self.assertTrue(hasattr(spectral_norm_m, '_u'))
u0 = spectral_norm_m._u.clone()
v0 = spectral_norm_m._v.clone()
# TEST TRAINING BEHAVIOR
# We perform GD first to modify the initial matrix
opt = torch.optim.SGD(wrapped_m.parameters(), lr=0.1)
opt.zero_grad()
wrapped_m(input).sum().backward()
opt.step()
out = wrapped_m(input)
if requires_grad:
# run forward again and assert that u and v are updated
self.assertNotEqual(u0, spectral_norm_m._u)
self.assertNotEqual(v0, spectral_norm_m._v)
# assert that backprop reaches original weight
# can't use gradcheck because the function changes as we
# activate through it in training mode
if requires_grad:
torch.autograd.grad(out.sum(), m.parametrizations.weight.original)
# test backward works with multiple forwards
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = spectral_norm_m._u.clone()
saved_v = spectral_norm_m._v.clone()
def fn(input):
spectral_norm_m._u.data.copy_(saved_u)
spectral_norm_m._v.data.copy_(saved_v)
out0 = wrapped_m(input)
out1 = wrapped_m(input)
return out0 + out1
# Make sure we can compute gradients wrt to all the parameters in the case
# of double forward
fn(input.clone().requires_grad_()).sum().backward()
gradcheck(fn, (input.clone().requires_grad_(),), check_batched_grad=False)
# test removing
# spectral norm module needs to be in eval mode if we'd like to
# avoid doing another power iteration
m, wrapped_m, _ = get_modules()
pre_remove_out = wrapped_m(input)
m.eval()
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
self.assertEqual(wrapped_m(input), pre_remove_out)
torch.nn.utils.parametrizations.spectral_norm(m)
for _ in range(3):
pre_remove_out = wrapped_m(input)
m.eval()
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
self.assertEqual(wrapped_m(input), pre_remove_out)
# TEST EVAL BEHAVIOR
m, wrapped_m, spectral_norm_m = get_modules()
wrapped_m(input)
last_train_out = wrapped_m(input)
last_train_u = spectral_norm_m._u.clone()
last_train_v = spectral_norm_m._v.clone()
wrapped_m.zero_grad()
wrapped_m.eval()
eval_out0 = wrapped_m(input)
# assert eval gives same result as last training iteration
self.assertEqual(eval_out0, last_train_out)
# assert doing more iteartion in eval don't change things
self.assertEqual(eval_out0, wrapped_m(input))
self.assertEqual(last_train_u, spectral_norm_m._u)
self.assertEqual(last_train_v, spectral_norm_m._v)
# FIXME: the code below is flaky when executed with DataParallel
# see https://github.com/pytorch/pytorch/issues/13818
if apply_dp:
continue
# test backward works with multiple forwards in mixed training
# and eval modes
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = spectral_norm_m._u.clone()
saved_v = spectral_norm_m._v.clone()
def fn(input):
spectral_norm_m._u.data.copy_(saved_u)
spectral_norm_m._v.data.copy_(saved_v)
wrapped_m.train()
out0 = wrapped_m(input)
wrapped_m.eval()
out1 = wrapped_m(input)
wrapped_m.train()
out2 = wrapped_m(input)
wrapped_m.eval()
out3 = wrapped_m(input)
return out0 + out1 + out2 + out3
gradcheck(fn, (input.clone().requires_grad_(),))
# assert that backprop reaches weight_orig in eval
if requires_grad:
def fn(weight):
return wrapped_m(input)
gradcheck(fn, (m.parametrizations.weight.original,))
def test_new_spectral_norm_load_state_dict(self):
for activate_times in (0, 3):
inp = torch.randn(2, 3)
m = nn.Linear(3, 5)
snm = torch.nn.utils.parametrizations.spectral_norm(m)
snm.train()
for _ in range(activate_times):
snm(inp)
state_dict = deepcopy(snm.state_dict())
self.assertEqual({
'parametrizations.weight.original',
'bias',
'parametrizations.weight.0._v',
'parametrizations.weight.0._u'
}, set(state_dict.keys()))
# test that non-strict loading works
non_strict_state_dict = deepcopy(state_dict)
non_strict_state_dict['nonsense'] = 'nonsense'
with self.assertRaisesRegex(RuntimeError, r'Unexpected key\(s\) in state_dict: "nonsense"'):
snm.load_state_dict(non_strict_state_dict, strict=True)
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.original']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.0._u']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.0._v']
snm.load_state_dict(non_strict_state_dict, strict=False)
non_strict_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict._metadata['parametrizations.weight.0'] # remove metadata info
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight'] # remove W buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['bias']
snm.load_state_dict(non_strict_state_dict, strict=False)
# normal state_dict
# test that re-wrapping does not matter
m = torch.nn.utils.parametrize.remove_parametrizations(snm, 'weight')
snm = torch.nn.utils.parametrizations.spectral_norm(m)
snm.load_state_dict(state_dict)
with torch.no_grad():
snm.eval()
out0_eval = snm(inp)
snm.train()
out1_train = snm(inp)
out2_train = snm(inp)
snm.eval()
out3_eval = snm(inp)
# test that re-wrapping does not matter
m = torch.nn.utils.parametrize.remove_parametrizations(snm, 'weight')
snm = torch.nn.utils.parametrizations.spectral_norm(m)
# Test normal loading
snm.load_state_dict(state_dict)
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
@skipIfNoLapack
def test_spectral_norm_load_state_dict(self):
inp = torch.randn(2, 3)
for activate_times in (0, 3):
# Test backward compatibility
# At version None -> 1: weight becomes not a buffer and v vector becomes a buffer
m = nn.Linear(3, 5)
snm = torch.nn.utils.spectral_norm(m)
snm.train()
for _ in range(activate_times):
snm(inp)
version_latest_ref_state_dict = deepcopy(snm.state_dict())
self.assertEqual({'weight_orig', 'bias', 'weight_u', 'weight_v'}, set(version_latest_ref_state_dict.keys()))
# test that non-strict loading works
non_strict_state_dict = deepcopy(version_latest_ref_state_dict)
non_strict_state_dict['nonsense'] = 'nonsense'
with self.assertRaisesRegex(RuntimeError, r'Unexpected key\(s\) in state_dict: "nonsense"'):
snm.load_state_dict(non_strict_state_dict, strict=True)
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_orig']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_u']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_v']
snm.load_state_dict(non_strict_state_dict, strict=False)
non_strict_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict._metadata['']['spectral_norm'] # remove metadata info
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight'] # remove W buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['bias']
snm.load_state_dict(non_strict_state_dict, strict=False)
# craft a version None state_dict
version_none_state_dict = deepcopy(version_latest_ref_state_dict)
self.assertIn('spectral_norm', version_none_state_dict._metadata[''])
del version_none_state_dict._metadata['']['spectral_norm'] # remove metadata info
del version_none_state_dict['weight_v'] # remove v vector
version_none_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer
# normal state_dict
for version_latest_with_metadata in [True, False]:
version_latest_state_dict = deepcopy(version_latest_ref_state_dict)
if not version_latest_with_metadata:
# We want to still load a user-crafted state_dict, one without metadata
del version_latest_state_dict._metadata['']['spectral_norm']
# test that re-wrapping does not matter
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
snm.load_state_dict(version_latest_ref_state_dict)
with torch.no_grad():
snm.eval()
out0_eval = snm(inp)
snm.train()
out1_train = snm(inp)
out2_train = snm(inp)
snm.eval()
out3_eval = snm(inp)
# test that re-wrapping does not matter
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
snm.load_state_dict(version_none_state_dict)
if activate_times > 0:
# since in loading version None state dict, we assume that the
# values in the state dict have gone through at lease one
# forward, we only test for equivalence when activate_times > 0.
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
# test that re-wrapping does not matter
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
# Test normal loading
snm.load_state_dict(version_latest_state_dict)
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
def test_spectral_norm_dim(self):
inp = torch.randn(2, 3, 10, 12)
m = nn.ConvTranspose2d(3, 4, (5, 6))
m = torch.nn.utils.spectral_norm(m)
# this should not run into incompatible shapes
x = m(inp)
# check that u refers to the same dimension
self.assertEqual(m.weight_u.shape, m.weight_orig[0, :, 0, 0].shape)
def test_new_spectral_norm_dim(self):
inp = torch.randn(2, 3, 10, 12)
m = nn.ConvTranspose2d(3, 4, (5, 6))
m = torch.nn.utils.parametrizations.spectral_norm(m)
snm = m.parametrizations.weight[0]
# this should not run into incompatible shapes
x = m(inp)
# check that u refers to the same dimension
self.assertEqual(snm._u.shape, m.parametrizations.weight.original[0, :, 0, 0].shape)
def test_spectral_norm_forward(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.spectral_norm(m)
# naive forward
_weight, _bias, _u = m.weight_orig, m.bias, m.weight_u
_weight_mat = _weight.view(_weight.size(0), -1)
_v = torch.mv(_weight_mat.t(), _u)
_v = F.normalize(_v, dim=0, eps=1e-12)
_u = torch.mv(_weight_mat, _v)
_u = F.normalize(_u, dim=0, eps=1e-12)
_weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))
out_hat = torch.nn.functional.linear(input, _weight, _bias)
expect_out = m(input)
self.assertEqual(expect_out, out_hat)
def test_new_spectral_norm_forward(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.parametrizations.spectral_norm(m)
snm = m.parametrizations.weight[0]
# naive forward
_weight = m.parametrizations.weight.original
_bias, _v = m.bias, snm._v
_weight_mat = _weight.view(_weight.size(0), -1)
_u = torch.mv(_weight_mat, _v)
_u = F.normalize(_u, dim=0, eps=1e-12)
_v = torch.mv(_weight_mat.t(), _u)
_v = F.normalize(_v, dim=0, eps=1e-12)
_weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))
out_hat = torch.nn.functional.linear(input, _weight, _bias)
expect_out = m(input)
self.assertEqual(expect_out, out_hat)
def test_spectral_norm_pickle(self):
m = torch.nn.utils.spectral_norm(nn.Linear(5, 7))
m = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m, nn.Linear)
@skipIfNoLapack
def test_orthogonal_parametrization(self):
# Orthogonal implements 6 algorithms (3x parametrizations times 2 options of use_trivialization)
def assert_is_orthogonal(X):
n, k = X.size(-2), X.size(-1)
if n < k:
X = X.mT
n, k = k, n
Id = torch.eye(k, dtype=X.dtype, device=X.device).expand(*(X.size()[:-2]), k, k)
eps = 10 * n * torch.finfo(X.dtype).eps
torch.testing.assert_allclose(X.mH @ X, Id, atol=eps, rtol=0.)
def assert_weight_allclose_Q(weight, W):
# Test that weight is equal to the Q part of the QR decomposition of W
# (or of its transpose if the matrix is wide)
wide_matrix = W.size(-2) < W.size(-1)
if wide_matrix:
W = W.mT
Q, R = torch.linalg.qr(W)
Q *= R.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2)
if wide_matrix:
Q = Q.mT
torch.testing.assert_allclose(Q, weight, atol=1e-5, rtol=0.)
for shape, dtype, use_linear in product(((4, 4), (5, 3), (3, 5)), # square/ tall / wide
(torch.float32, torch.complex64),
(True, False)):
# Conv2d does not support complex yet
if not use_linear and dtype.is_complex:
continue
if use_linear:
input = torch.randn(3, shape[0], dtype=dtype)
else:
input = torch.randn(2, 2, shape[0] + 2, shape[1] + 1, dtype=dtype)
for parametrization, use_trivialization in product(("matrix_exp", "cayley", "householder"),
(False, True)):
# right_inverse for Cayley and matrix_exp not implemented for use_trivialization=False
# See Note [right_inverse expm cayley]
can_initialize = use_trivialization or parametrization == "householder"
# We generate them every time to always start with fresh weights
if use_linear:
m = nn.Linear(*shape, dtype=dtype)
else:
m = nn.Conv2d(2, 3, shape, dtype=dtype)
# We do not support householder for complex inputs
# See Note [Householder complex]
w_init = m.weight.clone()
if parametrization == "householder" and m.weight.is_complex():
msg = "householder parametrization does not support complex tensors"
with self.assertRaisesRegex(ValueError, msg):
torch.nn.utils.parametrizations.orthogonal(m,
"weight",
parametrization,
use_trivialization=use_trivialization)
continue
wide_matrix = w_init.size(-2) < w_init.size(-1)
torch.nn.utils.parametrizations.orthogonal(m,
"weight",
parametrization,
use_trivialization=use_trivialization)
# Forwards works as expected
self.assertEqual(w_init.shape, m.weight.shape)
assert_is_orthogonal(m.weight)
if can_initialize:
assert_weight_allclose_Q(m.weight, w_init)
# Intializing with a given orthogonal matrix works
X = torch.randn_like(m.weight)
if wide_matrix:
X = X.mT
w_new = torch.linalg.qr(X).Q
if wide_matrix:
w_new = w_new.mT
if can_initialize:
m.weight = w_new
torch.testing.assert_allclose(w_new, m.weight, atol=1e-5, rtol=0.)
else:
msg = "assign to the matrix exponential or the Cayley parametrization"
with self.assertRaisesRegex(NotImplementedError, msg):
m.weight = w_new
# Intializing with a non-orthogonal matrix makes m.weight be the Q part of the given matrix
w_new = torch.randn_like(m.weight)
if can_initialize:
m.weight = w_new
assert_weight_allclose_Q(m.weight, w_new)
else:
msg = "assign to the matrix exponential or the Cayley parametrization"
with self.assertRaisesRegex(NotImplementedError, msg):
m.weight = w_new
opt = torch.optim.SGD(m.parameters(), lr=0.1)
for _ in range(2):
opt.zero_grad()
m(input).norm().backward()
grad = m.parametrizations.weight.original.grad
self.assertIsNotNone(grad)
# We do not update the upper triangular part of the matrix if tall tril if wide
if grad.size(-2) >= grad.size(-1):
zeros_grad = grad.triu(1)
else:
zeros_grad = grad.tril(-1)
self.assertEqual(zeros_grad, torch.zeros_like(zeros_grad))
# The gradient in the diagonal can only be imaginary because a skew-Hermitian
# matrix has imaginary diagonal
diag_grad = grad.diagonal(dim1=-2, dim2=-1)
if grad.is_complex():
diag_grad = diag_grad.real
self.assertEqual(diag_grad, torch.zeros_like(diag_grad))
opt.step()
assert_is_orthogonal(m.weight)
@skipIfNoLapack
def test_orthogonal_errors(self):
m = nn.Linear(3, 4)
with self.assertRaisesRegex(ValueError, "has to be one of"):
torch.nn.utils.parametrizations.orthogonal(m, "weight", "foo")
with self.assertRaisesRegex(ValueError, "Expected a matrix"):
torch.nn.utils.parametrizations.orthogonal(m, "bias")
torch.nn.utils.parametrizations.orthogonal(m, "weight")
with self.assertRaisesRegex(ValueError, "matrices of shape"):
m.weight = torch.randn(5, 5)
torch.nn.utils.parametrize.remove_parametrizations(m, "weight")
def test_threshold_int(self):
x = torch.tensor([-3, -2, -1, 0, 1, 2, 3])
expected = torch.tensor([99, 99, 99, 99, 1, 2, 3])
self.assertEqual(F.threshold(x, 0, 99), expected)
def test_threshold_bfloat16(self):
x = torch.randn(100)
for threshold in [0, -0.5, 0.5, float('inf'), float('-inf'), float('nan')]:
expected = F.threshold(x, threshold, 0).bfloat16().float()
res_bf16 = F.threshold(x.bfloat16(), threshold, 0).float()
self.assertEqual(res_bf16, expected)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_embedding_max_norm_unsorted_repeating_indices(self):
def create_embedding(device):
# Seed RNG so we get the same Embedding each time
torch.manual_seed(0)
return torch.nn.Embedding(
num_embeddings=20,
embedding_dim=64,
max_norm=1.0).to(device)
ix = torch.arange(2, device='cpu', dtype=torch.long).repeat(2000)
out_cpu = create_embedding('cpu')(ix)
ix = ix.to('cuda')
out = create_embedding('cuda')(ix)
self.assertEqual(out.cpu(), out_cpu)
def test_embedding_sparse_basic(self):
embedding = nn.Embedding(10, 20, sparse=True)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long)
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
def test_embedding_sparse_empty_tensor(self):
embedding = nn.Embedding(0, 0, sparse=True)
input = torch.tensor([], dtype=torch.int64)
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
embedding = nn.Embedding(10, 0, sparse=True)
input = torch.LongTensor([[0, 2, 4, 5], [4, 3, 0, 9]])
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
def test_move_sparse_half_embedding(self):
embedding = nn.Embedding(10, 3, sparse=True)
self.assertEqual(embedding.weight.device.type, 'cpu')
self.assertEqual(embedding.weight.dtype, torch.float64)
embedding.to(torch.float16)
self.assertEqual(embedding.weight.dtype, torch.float16)
self.assertEqual(embedding.embedding_dim, 3)
self.assertEqual(embedding.num_embeddings, 10)
if torch.cuda.is_available():
embedding.to('cuda')
self.assertEqual(embedding.weight.device.type, 'cuda')
embedding.to('cpu')
self.assertEqual(embedding.weight.device.type, 'cpu')
def test_embedding_max_norm(self):
embedding = nn.Embedding(22, 5, max_norm=1.0)
input = torch.tensor([2, 8, 8, 6], dtype=torch.long)
output = embedding(input)
self.assertEqual(output[1], output[2])
self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())
def test_embedding_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embedding = nn.Embedding.from_pretrained(a)
self.assertEqual(a, embedding.weight.data)
input = torch.LongTensor([0, 1])
output = embedding(input)
self.assertEqual(a, output)
def test_embedding_bag_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embedding = nn.EmbeddingBag.from_pretrained(a)
self.assertEqual(a, embedding.weight)
input = torch.tensor([0, 1], dtype=torch.long)
output = embedding(input, torch.arange(input.size(0)))
self.assertEqual(a, output)
def test_embedding_from_pretrained_padding_idx(self):
padding_idx = 2
padding_vec = torch.ones(3) * 7
embeddings = torch.rand(4, 3, requires_grad=True)
with torch.no_grad():
embeddings[padding_idx] = padding_vec
embedding_nn = nn.Embedding.from_pretrained(embeddings, padding_idx=padding_idx)
self.assertEqual(embedding_nn.weight[padding_idx], padding_vec)
def test_embedding_bag_from_pretrained_padding_idx(self):
padding_idx = 2
embeddings = torch.rand(4, 3, requires_grad=True)
embedding_nn = nn.EmbeddingBag.from_pretrained(embeddings, padding_idx=padding_idx)
self.assertEqual(embedding_nn.weight, embeddings)
def test_embedding_from_pretrained_options(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
opts = {
"max_norm": 2.,
"norm_type": .5,
"scale_grad_by_freq": False,
"sparse": True
}
embedding = nn.Embedding.from_pretrained(a, **opts)
input = torch.LongTensor([0, 1])
output = embedding(input)
# test output and that weight matrix was renormalized
self.assertEqual(a, output)
self.assertTrue(a.ne(torch.arange(1, 7, dtype=a.dtype).view(2, 3)).all())
self.assertTrue(output.data.norm(p=opts["norm_type"], dim=1).le(opts["max_norm"]).all())
def test_embedding_functional(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
embeddings = torch.rand(4, 3, requires_grad=True)
embed_old = torch.nn.Embedding(4, 3)
embed_old.weight.data = embeddings.data
res_old = embed_old(a)
res_F = F.embedding(a, embeddings)
self.assertEqual(res_old, res_F)
embed_old = torch.nn.Embedding(4, 3)
embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)
res_old = embed_old(a)
res_F = F.embedding(a, embeddings, padding_idx=2)
self.assertEqual(res_old, res_F)
def test_embedding_bag_functional(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
embeddings = torch.rand(4, 3, requires_grad=True)
embed_old = torch.nn.EmbeddingBag(4, 3)
embed_old.weight = torch.nn.Parameter(embeddings)
res_old = embed_old(a)
res_F = F.embedding_bag(a, embeddings)
self.assertEqual(res_old, res_F)
embed_old = torch.nn.EmbeddingBag(4, 3)
embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)
res_old = embed_old(a)
res_F = F.embedding_bag(a, embeddings, padding_idx=2)
self.assertEqual(res_old, res_F)
# Make sure that error is thrown if padding_idx is out of bounds
def test_embedding_bag_padding_idx_error(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
num_embeddings = 4
num_features = 3
embeddings = torch.rand(num_embeddings, num_features, requires_grad=True)
functional_err_msg = r'padding_idx must be within the number of embeddings'
module_err_msg = r'padding_idx must be within num_embeddings'
for padding_idx in range(-(num_embeddings + 2), (num_embeddings + 2)):
if (padding_idx < -num_embeddings) or (padding_idx >= num_embeddings):
with self.assertRaisesRegex(RuntimeError, functional_err_msg):
F.embedding_bag(a, embeddings, padding_idx=padding_idx)
with self.assertRaisesRegex(AssertionError, module_err_msg):
torch.nn.EmbeddingBag(num_embeddings, num_features, padding_idx=padding_idx)
else:
F.embedding_bag(a, embeddings, padding_idx=padding_idx)
torch.nn.EmbeddingBag(num_embeddings, num_features, padding_idx=padding_idx)
@unittest.skipUnless('fbgemm' in torch.backends.quantized.supported_engines,
'Linear_FP16_weight requires FBGEMM. FBGEMM is only optimized for CPUs'
' with instruction set support avx2 or newer.')
def test_fb_fc_packed(self):
X = np.random.rand(16, 16).astype(np.float32) - 0.5
W = np.random.rand(16, 16).astype(np.float32) - 0.5
b = np.random.rand(16).astype(np.float32) - 0.5
def fc_op(X, W, b):
return np.dot(X, W.T) + b
x_tensor = torch.tensor(X)
w_tensor = torch.tensor(W)
b_tensor = torch.tensor(b)
packed_w_tensor = torch.fbgemm_pack_gemm_matrix_fp16(w_tensor)
actual_output = torch.fbgemm_linear_fp16_weight(x_tensor, packed_w_tensor, b_tensor)
expected_output = fc_op(X, W, b)
torch.testing.assert_close(torch.from_numpy(expected_output), actual_output.cpu(), atol=1e-3, rtol=1e-3)
def test_embeddingbag_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embeddingbag = nn.EmbeddingBag.from_pretrained(a)
self.assertEqual(a, embeddingbag.weight.data)
input = torch.LongTensor([[0, 1]])
output = embeddingbag(input)
self.assertEqual(a.mean(0, keepdim=True), output)
def test_embeddingbag_from_pretrained_options(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
opts = {
"max_norm": 2.,
"norm_type": .5,
"scale_grad_by_freq": False,
"mode": "max",
"sparse": False
}
embeddingbag = nn.EmbeddingBag.from_pretrained(a, **opts)
input = torch.LongTensor([[0, 1]])
output = embeddingbag(input)
self.assertEqual(a.max(0, keepdim=True)[0], output)
self.assertTrue(a.ne(torch.arange(1, 7, dtype=a.dtype).view(2, 3)).all())
self.assertTrue(a.norm(p=opts["norm_type"], dim=1).le(opts["max_norm"]).all())
def test_AlphaDropout(self):
# generate random tensor with zero mean and unit std
input = torch.randn(5000)
self._test_alpha_dropout(nn.AlphaDropout, input)
def test_FeatureAlphaDropout(self):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
d = random.randint(1, 2)
num_features = 1000
input = torch.randn(num_features, b, d, w, h)
self._test_alpha_dropout(nn.FeatureAlphaDropout, input)
# no batch dims
input = torch.randn(50, 20, 64, 64)
self._test_alpha_dropout(nn.FeatureAlphaDropout, input)
def test_pad_scalar_error(self):
inputs = torch.tensor(0., requires_grad=True)
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1, 1)))
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1,)))
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
@parametrize_test("average_attn_weights", [True, False])
def test_multihead_attention(self, average_attn_weights):
def _scaled_dot_attn_ref(Q, K, V, dims, unseen_mask=None, key_padding_mask=None,
average_attn_weights=average_attn_weights):
""" Numpy-based reference implementation of scaled dot attention
for testing"""
QKT = _batchmatmul(
Q,
np.transpose(K, axes=[0, 1, 3, 2])
/ np.sqrt(dims[3], dtype=np.float32), # divide by sqrt(d_head)
)
b1, b2, s1, s2 = QKT.shape
if unseen_mask is not None or key_padding_mask is not None:
# assert s1 == s2
for i in range(b1):
for j in range(b2):
for m in range(s1):
for n in range(s2):
if unseen_mask is not None and unseen_mask[m][n] == 0:
QKT[i, j, m, n] = -np.inf
if key_padding_mask is not None and key_padding_mask[i][n]:
QKT[i, j, m, n] = -np.inf
reference = _softmax(QKT)
ref_attn_weight = reference
if average_attn_weights:
ref_attn_weight = np.sum(ref_attn_weight, axis=1) / b2
reference = _batchmatmul(reference, V)
return reference, ref_attn_weight
def _batchmatmul(a, b): # batchmatmul over 4 dim matrix
""" Numpy-based batch matrix multiply over 4 dim matrix"""
assert a.shape[0] == b.shape[0]
assert a.shape[1] == b.shape[1]
retval = np.zeros(
(a.shape[0], a.shape[1], a.shape[2], b.shape[3]), dtype=np.float32
)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
retval[i, j, :, :] = np.matmul(a[i, j, :, :], b[i, j, :, :])
return retval
def _softmax(x): # softmax over 4 dim matrix
""" Numpy-based reference softmax over 4 dim matrix"""
np.seterr(invalid='ignore')
output = np.zeros(x.shape, dtype=np.float64)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
x_curr = x[i, j, k, :]
e_x = np.exp(x_curr - np.amax(x_curr))
output[i, j, k, :] = e_x / np.sum(e_x)
return output
def _split_heads_ref(X, dims, nheads, d_head):
X_split = np.reshape(X, dims[:2] + [nheads, d_head])
X_split_transposed = np.transpose(X_split, [0, 2, 1, 3])
reference = np.reshape(X_split_transposed, [dims[0], nheads, dims[1], d_head])
return reference
def _combine_heads_ref(X, dims, nheads, d_head):
X_transposed = np.transpose(X, [0, 2, 1, 3])
reference = np.reshape(X_transposed, dims[:2] + [nheads * d_head])
return reference
def _fc(X, X_weight, X_bias):
X_fc_b = X_bias.detach().numpy()
X_fc_w = X_weight.detach().numpy()
return np.matmul(X, np.transpose(X_fc_w)) + X_fc_b
def _create_src_lengths_mask(batch_size, src_lengths):
"""
Generate boolean mask to prevent attention beyond the end of source
Inputs:
batch_size : int
src_lengths : [batch_size] of sentence lengths
Outputs:
[batch_size, max_src_len]
"""
max_srclen = src_lengths.max()
src_indices = torch.arange(0, max_srclen).unsqueeze(0).to(src_lengths)
src_indices = src_indices.expand(batch_size, max_srclen)
src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_srclen)
# returns [batch_size, max_seq_len]
return (src_indices < src_lengths).int().detach()
def _multihead_attn_test_helper(add_key_padding_mask=False, add_bias_kv=False, add_zero_attn=False,
saved_kv=False, same_embed_dim=False, byte_mask=False,
average_attn_weights=average_attn_weights):
for _ in range(100):
batch_sz, seq_len = [random.randint(2, 10) for r in range(2)]
d_head = random.randint(3, 10)
nheads = random.randint(3, 10)
d_model = d_head * nheads
if same_embed_dim:
kv_dim = d_model
else:
kv_dim = random.randint(5, 20)
dims = [batch_sz, seq_len, kv_dim]
saved_k = None
saved_k_tensor = None
saved_v = None
saved_v_tensor = None
if saved_kv:
saved_k = np.random.rand(batch_sz * nheads, seq_len, d_head)
saved_k_tensor = torch.from_numpy(saved_k).to(torch.get_default_dtype())
saved_v = np.random.rand(batch_sz * nheads, seq_len, d_head)
saved_v_tensor = torch.from_numpy(saved_v).to(torch.get_default_dtype())
key_padding_mask = None
key_padding_mask_tensor = None
if add_key_padding_mask:
seq_mask = np.random.randint(0, 2, (1, seq_len))
key_padding_mask = (np.repeat(seq_mask, batch_sz, axis=0) == 1)
key_padding_mask_tensor = torch.from_numpy(key_padding_mask)
if byte_mask:
key_padding_mask_tensor = key_padding_mask_tensor.byte()
decoder_state = np.random.rand(batch_sz, d_model)
K = np.random.rand(*dims)
V = K
Q = np.expand_dims(decoder_state, 1)
attn_mask = np.random.randint(0 , 2, size=(1, seq_len))
attn_mask_tensor = torch.from_numpy(attn_mask).float()
if byte_mask:
attn_mask_tensor = (attn_mask_tensor == 0).byte()
else:
attn_mask_tensor.masked_fill_(attn_mask_tensor == 0, float('-inf'))
attn_mask_tensor.masked_fill_(attn_mask_tensor > 0, float('0.0'))
attn_mask_tensor = attn_mask_tensor.double()
decoder_state_tensor = torch.from_numpy(decoder_state).to(torch.get_default_dtype())
source_hid_tensor = torch.from_numpy(K).to(torch.get_default_dtype()).transpose(0, 1)
multihead_attn_module = MultiheadAttention(d_model, nheads,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
kdim=kv_dim, vdim=kv_dim)
if add_bias_kv:
bias_k = multihead_attn_module.bias_k.detach().numpy()
bias_v = multihead_attn_module.bias_v.detach().numpy()
else:
bias_k = None
bias_v = None
_Q = decoder_state_tensor.unsqueeze(1).transpose(0, 1)
_V = source_hid_tensor
_K = source_hid_tensor
if multihead_attn_module._qkv_same_embed_dim:
result, result_weight = torch.nn.functional.multi_head_attention_forward(
_Q, _K, _V,
d_model, nheads,
multihead_attn_module.in_proj_weight, multihead_attn_module.in_proj_bias,
multihead_attn_module.bias_k, multihead_attn_module.bias_v,
multihead_attn_module.add_zero_attn, multihead_attn_module.dropout,
multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias,
multihead_attn_module.training, key_padding_mask_tensor, True, attn_mask_tensor,
static_k=saved_k_tensor, static_v=saved_v_tensor,
average_attn_weights=average_attn_weights)
else:
result, result_weight = torch.nn.functional.multi_head_attention_forward(
_Q, _K, _V,
d_model, nheads,
None, multihead_attn_module.in_proj_bias,
multihead_attn_module.bias_k, multihead_attn_module.bias_v,
multihead_attn_module.add_zero_attn, multihead_attn_module.dropout,
multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias,
multihead_attn_module.training, key_padding_mask_tensor, True, attn_mask_tensor,
True, multihead_attn_module.q_proj_weight,
multihead_attn_module.k_proj_weight, multihead_attn_module.v_proj_weight,
static_k=saved_k_tensor, static_v=saved_v_tensor,
average_attn_weights=average_attn_weights)
result = result.squeeze(0).detach().numpy()
if multihead_attn_module._qkv_same_embed_dim:
q_proj_weight = multihead_attn_module.in_proj_weight[:d_model]
k_proj_weight = multihead_attn_module.in_proj_weight[d_model:(d_model * 2)]
v_proj_weight = multihead_attn_module.in_proj_weight[(d_model * 2):]
else:
q_proj_weight = multihead_attn_module.q_proj_weight
k_proj_weight = multihead_attn_module.k_proj_weight
v_proj_weight = multihead_attn_module.v_proj_weight
Q_fc = _fc(Q, q_proj_weight, multihead_attn_module.in_proj_bias[:d_model])
K_fc = _fc(K, k_proj_weight, multihead_attn_module.in_proj_bias[d_model:(d_model * 2)])
V_fc = _fc(V, v_proj_weight, multihead_attn_module.in_proj_bias[(d_model * 2):])
if add_bias_kv:
K_fc = np.concatenate((K_fc, np.repeat(bias_k, K_fc.shape[0], axis=0)), axis=1)
V_fc = np.concatenate((V_fc, np.repeat(bias_v, V_fc.shape[0], axis=0)), axis=1)
if attn_mask is not None:
attn_mask = np.concatenate((attn_mask, np.ones([1, 1])), axis=1)
if key_padding_mask is not None:
key_padding_mask = np.concatenate((key_padding_mask, np.full((batch_sz, 1), False, dtype=bool)), axis=1)
dims[1] += 1
Q_split = _split_heads_ref(
Q_fc, [batch_sz, 1, d_model], nheads, d_head
)
if saved_k is not None:
K_split = np.reshape(saved_k, [dims[0], nheads, dims[1], d_head])
else:
K_split = _split_heads_ref(K_fc, dims, nheads, d_head)
if saved_v is not None:
V_split = np.reshape(saved_v, [dims[0], nheads, dims[1], d_head])
else:
V_split = _split_heads_ref(V_fc, dims, nheads, d_head)
if add_zero_attn:
dims[1] += 1
K_split = np.concatenate((K_split, np.zeros([K_split.shape[0], K_split.shape[1], 1, K_split.shape[3]])), axis=2)
V_split = np.concatenate((V_split, np.zeros([V_split.shape[0], V_split.shape[1], 1, V_split.shape[3]])), axis=2)
if attn_mask is not None:
attn_mask = np.concatenate((attn_mask, np.ones([1, 1])), axis=1)
if key_padding_mask is not None:
key_padding_mask = np.concatenate((key_padding_mask, np.full((batch_sz, 1), False, dtype=bool)), axis=1)
attn_heads, ref_attn_weight = _scaled_dot_attn_ref(
Q=Q_split,
K=K_split,
V=V_split,
dims=Q_split.shape,
unseen_mask=attn_mask,
key_padding_mask=key_padding_mask
)
combined_attn_heads = _combine_heads_ref(
X=attn_heads, dims=[batch_sz, 1], nheads=nheads, d_head=d_head
)
reference = _fc(combined_attn_heads, multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias)
reference = np.squeeze(reference, axis=1)
# result = reference
self.assertEqual(tuple(result.shape), (batch_sz, d_model))
np.testing.assert_allclose(result, reference, atol=1e-5)
# result_weight = ref_attn_weight
result_weight = result_weight.detach().numpy()
self.assertEqual(tuple(result_weight.shape), tuple(ref_attn_weight.shape))
np.testing.assert_allclose(result_weight, ref_attn_weight, atol=1e-5)
def test_multihead_attn_add_bias_kv():
_multihead_attn_test_helper(add_bias_kv=True)
def test_multihead_attn_add_zero_attn():
_multihead_attn_test_helper(add_zero_attn=True)
def test_multihead_attn_no_masking():
_multihead_attn_test_helper()
def test_multihead_attn_key_padding_mask():
_multihead_attn_test_helper(add_key_padding_mask=True)
def test_multihead_attn_saved_kv():
_multihead_attn_test_helper(saved_kv=True)
def test_multihead_attn_add_bias_kv_zero_attn():
_multihead_attn_test_helper(add_key_padding_mask=True, add_bias_kv=True,
add_zero_attn=True)
def test_multihead_attn_all_arguments1():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True, saved_kv=True)
def test_multihead_attn_all_arguments2():
_multihead_attn_test_helper(add_key_padding_mask=True, add_bias_kv=True,
add_zero_attn=True, saved_kv=True)
def test_multihead_attn_all_arguments3():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True,
saved_kv=True, same_embed_dim=True)
def test_multihead_attn_all_arguments4():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True,
saved_kv=True, same_embed_dim=True, byte_mask=True)
test_multihead_attn_add_zero_attn() # Test MultiheadAttention with add_zero_attn
test_multihead_attn_add_bias_kv() # Test MultiheadAttention with add_bias_kv
test_multihead_attn_no_masking() # Test MultiheadAttention without masking
test_multihead_attn_key_padding_mask() # Test MultiheadAttention with src lengths
test_multihead_attn_saved_kv() # Test MultiheadAttention with static kv.
test_multihead_attn_add_bias_kv_zero_attn() # Test MultiheadAttention with bias_kv and zero_attn.
test_multihead_attn_all_arguments1() # Test MultiheadAttention with all the argument.
with self.assertRaisesRegex(AssertionError, "bias cannot be added to static key."):
test_multihead_attn_all_arguments2() # Test MultiheadAttention with all the argument.
test_multihead_attn_all_arguments3() # Test MultiheadAttention with all the argument.
test_multihead_attn_all_arguments4() # Test MultiheadAttention with all the argument.
def test_multihead_attn_3d_attn_mask(self):
embed_dim = 8
num_heads = 4
batch_size = 8
src_len = 3
tgt_len = 2
query = torch.rand(batch_size, tgt_len, embed_dim) # [N, T, D]
key = torch.rand(batch_size, src_len, embed_dim) # [N, S, D]
value = key # [N, S, D]
attn_mask = torch.randint(0, 2, (batch_size, tgt_len, src_len)).float() # [N, T, S]
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))
mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads)
# Generate 3D results
attn_mask_3d = torch.repeat_interleave(attn_mask, num_heads, dim=0) # [N * H, T, S]
output_3d = mta_model(query.transpose(0, 1), key.transpose(0, 1), value.transpose(0, 1), attn_mask=attn_mask_3d)[0]
output_3d = output_3d.transpose(0, 1) # [N, T, D]
for i in range(0, batch_size):
output_2d = mta_model(query[i].unsqueeze(0).transpose(0, 1),
key[i].unsqueeze(0).transpose(0, 1),
value[i].unsqueeze(0).transpose(0, 1),
attn_mask=attn_mask[i])[0]
# output_2d in shape of [T, 1, D]
self.assertEqual(output_3d[i].unsqueeze(0).transpose(0, 1), output_2d)
def test_multihead_attn_no_bias(self):
embed_dim = 8
num_heads = 4
mha = torch.nn.MultiheadAttention(embed_dim, num_heads, bias=False)
# Verify that bias=False applies to both in and out projection layers.
self.assertIsNone(mha.in_proj_bias)
self.assertIsNone(mha.out_proj.bias)
def test_multihead_attn_invalid_shape(self):
mha = torch.nn.MultiheadAttention(3, 3)
# Batched (3D) query cases
query = torch.randn(3, 3, 3)
key = torch.randn(3, 3, 3)
value = torch.randn(3, 3, 3)
msg = "expected `key` and `value` to be 3-D but found 2-D and 3-D tensors respectively"
# 3D query, 2D key and 3D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, torch.randn(3, 3), value)
msg = "expected `key` and `value` to be 3-D but found 3-D and 2-D tensors respectively"
# 3D query, 3D key and 2D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, torch.randn(3, 3))
msg = "expected `key_padding_mask` to be `None` or 2-D but found 1-D tensor instead"
# 3D query, 3D key, 3D value and 1D key_padding_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, key_padding_mask=torch.tensor([False, True, True], dtype=torch.bool))
msg = "expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead"
# 3D query, 3D key, 3D value and 1D attn_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool))
# Unbatched (2D) query cases
query = torch.randn(3, 3)
key = torch.randn(3, 3)
value = torch.randn(3, 3)
msg = "expected `key` and `value` to be 2-D but found 3-D and 2-D tensors respectively"
# 2D query, 3D key and 2D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, torch.randn(3, 3, 3), value)
msg = "expected `key` and `value` to be 2-D but found 2-D and 3-D tensors respectively"
# 2D query, 3D key and 2D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, torch.randn(3, 3, 3))
msg = "expected `key_padding_mask` to be `None` or 1-D but found 2-D tensor instead"
# 2D query, 2D key, 2D value and 1D key_padding_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, key_padding_mask=torch.tensor([[False, True, True] * 2], dtype=torch.bool))
msg = "expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead"
# 2D query, 2D key, 2D value and 1D attn_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool))
msg = r"Expected `attn_mask` shape to be \(3, 3, 3\)"
# 2D query, 2D key, 2D value and 3D incorrect attn_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.randn(4, 3, 3).bernoulli_().to(torch.bool))
def test_normalize(self):
inputs = torch.randn(1, 3, 4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=2, dim=-2), (inputs,)))
inputs = torch.randn((), requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
def test_adaptive_pooling_input_size(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * numel
module = module_cls(output_size)
input = torch.randn(output_size)
self.assertRaises(ValueError, lambda: module(input))
def test_adaptive_pooling_size_none(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * (numel - 1) + (None,)
module = module_cls(output_size)
input = torch.randn((4,) * (numel + 1))
output = module(input)
self.assertEqual(output.size(), (4,) + (2,) * (numel - 1) + (4,))
@unittest.skipIf(TEST_WITH_UBSAN, "signed integer overflow error with UBSAN")
def test_adaptive_pooling_size_overflow(self):
# 0x0x3fffffffffffffff * 2 * 2 = 0xfffffffffffffffc = -4 as int64_t
# Tensor::numel() return int64_t, so following check that negative allocs are correctly handled
self.assertRaises(
RuntimeError,
lambda: torch.nn.AdaptiveMaxPool1d(0x3fffffffffffffff)(torch.empty([2, 2, 2])))
def test_adaptive_pooling_avg_nhwc(self):
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
input = torch.randint(1, 10, (4, 8, 8, 8), dtype=torch.float32).to(device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randint(1, 10, (4, 8, 7, 7), dtype=torch.float32).to(device)
pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
def test_adaptive_pooling_avg_nhwc_non_contiguous(self):
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
input = torch.randint(1, 10, (4, 8, 8, 8), dtype=torch.float32).to(device)
input = input.contiguous(memory_format=torch.channels_last)
input = input[:, ::2, :, :].requires_grad_()
grad = torch.randint(1, 10, (4, 8, 7, 7), dtype=torch.float32).to(device)
grad = grad[:, ::2, :, :]
pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
def test_adaptive_pooling_bfloat16(self):
def _test_adaptive_pooling_bfloat16(self, device, mod, memory_format):
input = torch.randint(1, 10, (3, 19, 8, 8), dtype=torch.float32)
input = input.to(device).to(memory_format=memory_format).requires_grad_()
pool = mod((7, 7)).to(device)
input2 = input.detach().clone().bfloat16().requires_grad_(True)
out = pool(input)
out.sum().backward()
out2 = pool(input2)
out2.sum().backward()
self.assertTrue(out2.is_contiguous(memory_format=memory_format))
self.assertEqual(out2.dtype, torch.bfloat16)
self.assertEqual(input2.grad.dtype, torch.bfloat16)
self.assertEqual(out, out2.float(), atol=0.1, rtol=0)
self.assertEqual(input.grad, input2.grad.float(), atol=0.1, rtol=0)
device_list = ['cpu']
for device in device_list:
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveAvgPool2d, torch.contiguous_format)
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveAvgPool2d, torch.channels_last)
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveMaxPool2d, torch.contiguous_format)
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveMaxPool2d, torch.channels_last)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@largeTensorTest('12GB', device='cuda')
def test_adaptive_pooling_avg_nhwc_launch_config_backward(self):
input = torch.randint(1, 10, (1, 32, 2 ** 17 + 1, 32), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randint(1, 10, (1, 32, 10, 32), dtype=torch.float32, device="cuda")
pool = torch.nn.AdaptiveAvgPool2d((10, 32)).cuda()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((10, 32)).cuda()
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@largeTensorTest('12GB', device='cuda')
def test_adaptive_pooling_avg_nhwc_launch_config_forward(self):
input = torch.randint(1, 10, (1, 32, 16, 16), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
pool = torch.nn.AdaptiveAvgPool2d((2 ** 17 + 1, 32)).cuda()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_pool = torch.nn.AdaptiveAvgPool2d((2 ** 17 + 1, 32)).cuda()
out = pool(input)
ref_out = ref_pool(ref_input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_broadcast_double_backwards_gpu(self):
tensors = (torch.randn(4, 4, device='cuda', requires_grad=True),
torch.randn(4, 4, device='cuda', requires_grad=True),
torch.randn(4, 4, device='cuda', requires_grad=True))
# TODO(#50743): the following segfaults with check_batched_grad=True
_assertGradAndGradgradChecks(self, lambda *i: Broadcast.apply((0, 1), *i), tensors,
check_batched_grad=False)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_not_requiring_grad(self):
variables = [
torch.randn(1, 2, device='cuda', requires_grad=True),
torch.randn(1, 2, device='cuda', requires_grad=False),
torch.randn(1, 2, device='cuda', requires_grad=False),
torch.randn(1, 2, device='cuda', requires_grad=True),
torch.randn(1, 2, device='cuda', requires_grad=True),
]
broadcasted_variables = Broadcast.apply((0, 1), *variables)
for output_idx, broadcasted_var in enumerate(broadcasted_variables):
input_var = variables[output_idx % len(variables)]
self.assertEqual(input_var.requires_grad, broadcasted_var.requires_grad)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_no_grad(self):
x = torch.randn(1, 2, dtype=torch.float32, requires_grad=True, device='cuda')
with torch.no_grad():
broadcasted = Broadcast.apply((0, 1), x)
self.assertTrue(x.requires_grad)
for output in broadcasted:
self.assertFalse(output.requires_grad)
def test_state_dict(self):
l = nn.Linear(5, 5)
block = nn.Module()
block.conv = nn.Conv2d(3, 3, 3, bias=False)
net = nn.Module()
net.linear1 = l
net.linear2 = l
net.bn = nn.BatchNorm2d(2)
net.block = block
net.add_module('empty', None)
state_dict = net.state_dict()
self.assertEqual(len(state_dict), 10)
self.assertEqual(len(state_dict._metadata), 6)
self.assertIn('', state_dict._metadata)
self.assertIn('linear1', state_dict._metadata)
self.assertIn('linear1.weight', state_dict)
self.assertIn('linear1.bias', state_dict)
self.assertIn('linear2', state_dict._metadata)
self.assertIn('linear2.weight', state_dict)
self.assertIn('linear2.bias', state_dict)
self.assertIn('block', state_dict._metadata)
self.assertIn('block.conv', state_dict._metadata)
self.assertIn('block.conv.weight', state_dict)
self.assertIn('block.conv.weight', state_dict)
self.assertNotIn('block.conv.bias', state_dict)
self.assertIn('bn', state_dict._metadata)
self.assertIn('bn.weight', state_dict)
self.assertIn('bn.bias', state_dict)
self.assertIn('bn.running_var', state_dict)
self.assertIn('bn.running_mean', state_dict)
self.assertIn('bn.num_batches_tracked', state_dict)
self.assertFalse(any(k.startswith('empty') for k in state_dict.keys()))
for k, v in state_dict.items():
param = net
for component in k.split('.'):
param = getattr(param, component)
if isinstance(param, Parameter):
param = param.data
self.assertEqual(v.data_ptr(), param.data_ptr())
l = nn.Linear(5, 5)
state_dict = l.state_dict()
self.assertEqual(len(state_dict), 2)
self.assertEqual(len(state_dict._metadata), 1)
self.assertIn('', state_dict._metadata)
self.assertTrue(state_dict._metadata['']['version'] >= 0)
self.assertEqual(state_dict['weight'].data_ptr(), l.weight.data_ptr())
self.assertEqual(state_dict['bias'].data_ptr(), l.bias.data_ptr())
def test_load_state_dict(self):
l = nn.Linear(5, 5)
block = nn.Module()
block.conv1 = nn.Conv2d(3, 3, 3, bias=True)
block.conv2 = nn.Conv2d(3, 3, 3, bias=False)
net = nn.Module()
net.linear1 = l
net.linear2 = l
net.bn = nn.BatchNorm2d(2)
net.block = block
net.add_module('empty', None)
conv1_bias_dtype = block.conv1.bias.dtype
state_dict = net.state_dict()
state_dict.update({
'linear1.weight': torch.ones(5, 5),
'block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'bn.running_mean': torch.randn(2),
})
# Also test if a DDP state_dict can be loaded from a local model.
ddp_state_dict = net.state_dict()
ddp_state_dict.update({
'module.linear1.weight': torch.ones(5, 5),
'module.block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'module.bn.running_mean': torch.randn(2),
})
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(ddp_state_dict, 'module.')
for sd in [state_dict, ddp_state_dict]:
incompatible_keys = net.load_state_dict(sd)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 0)
self.assertNotIn('Incompatible', str(incompatible_keys))
self.assertEqual(net.linear1.weight, sd['linear1.weight'])
self.assertEqual(net.block.conv1.bias, sd['block.conv1.bias'])
self.assertEqual(net.bn.running_mean, sd['bn.running_mean'])
state_dict = net.state_dict()
state_dict.update({'extra': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('extra', incompatible_keys.unexpected_keys)
self.assertIn('Incompatible', str(incompatible_keys))
state_dict = net.state_dict()
state_dict.update({'extra.param': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('extra.param', incompatible_keys.unexpected_keys)
state_dict = net.state_dict()
del state_dict['linear1.weight']
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 1)
self.assertEqual(len(incompatible_keys.unexpected_keys), 0)
self.assertIn('linear1.weight', incompatible_keys.missing_keys)
state_dict.update({'extra.param': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 1)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('linear1.weight', incompatible_keys.missing_keys)
self.assertIn('extra.param', incompatible_keys.unexpected_keys)
state_dict = net.state_dict()
state_dict.update({'bn.running_mean': torch.rand(14, 4)}) # wrong size
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict, strict=False))
state_dict = net.state_dict()
old_state_dict = deepcopy(state_dict)
state_dict = {
'linear1.weight': torch.ones(5, 5),
'block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'bn.running_mean': torch.randn(2),
'nonexistent_key': torch.rand(3)
}
net.load_state_dict(state_dict, strict=False)
self.assertEqual(net.linear1.weight, state_dict['linear1.weight'])
self.assertEqual(net.block.conv1.bias, state_dict['block.conv1.bias'])
self.assertEqual(net.bn.running_mean, state_dict['bn.running_mean'])
new_state_dict = net.state_dict()
del old_state_dict['linear1.weight']
del old_state_dict['block.conv1.bias']
del old_state_dict['bn.running_mean']
for k, v, in old_state_dict.items():
self.assertTrue(v.equal(new_state_dict[k]))
def test_load_state_dict_BC(self):
# BatchNormNd
# Added num_batches_tracked buffer at version 2. For state dict with
# earlier versions or no versions, it should provide default value of 0.
bn = nn.BatchNorm2d(3)
state_dict = bn.state_dict()
del state_dict['num_batches_tracked']
state_dict._metadata['']['version'] = 1 # version 1
bn.load_state_dict(state_dict)
self.assertEqual(bn.num_batches_tracked.dtype, torch.long)
self.assertEqual(bn.num_batches_tracked.item(), 0)
del state_dict._metadata['']['version'] # no version
bn.load_state_dict(state_dict)
self.assertEqual(bn.num_batches_tracked.dtype, torch.long)
self.assertEqual(bn.num_batches_tracked.item(), 0)
def test_load_state_dict_ref_cycle(self):
# load_state_dict shouldn't cause a reference cycle involving Tensors
import gc
m = torch.nn.LSTM(16, 16, bidirectional=True)
gc.collect()
m.load_state_dict(deepcopy(m).state_dict())
refcycles = gc.collect()
self.assertEqual(refcycles, 0)
def test_load_state_dict_custom(self):
class CustomState(nn.Module):
def __init__(self):
super(CustomState, self).__init__()
self.param = torch.nn.Parameter(torch.ones(1))
self.sub = torch.nn.Linear(5, 5)
def _save_to_state_dict(self, destination, prefix, keep_vars):
destination[prefix + "serialized"] = self.param.data + 1
def _load_from_state_dict(self, state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs):
# skip some of the error handling
self.param.data.copy_(state_dict[prefix + "serialized"] - 1)
# use sequential to verify nesting
m = nn.Sequential(CustomState())
with torch.no_grad():
m[0].param[0] = 10
m[0].sub.weight[0, 0] = 555
state_dict = m.state_dict()
self.assertEqual(state_dict["0.serialized"].item(), 11)
self.assertIn("0.sub.weight", state_dict)
self.assertNotIn("0.param", state_dict)
del m
mm = nn.Sequential(CustomState())
self.assertEqual(mm[0].param[0].item(), 1)
mm.load_state_dict(state_dict)
self.assertEqual(mm[0].param[0].item(), 10)
self.assertEqual(mm[0].sub.weight[0, 0].item(), 555)
def test_extra_state(self):
class SubModule(torch.nn.Module):
def __init__(self, foo):
super().__init__()
self.foo = foo
def get_extra_state(self):
return {
'foo': self.foo
}
def set_extra_state(self, state):
self.foo = state['foo']
class MyModule(torch.nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.sub = SubModule(foo)
self.bar = bar
def get_extra_state(self):
return {
'bar': self.bar
}
def set_extra_state(self, state):
self.bar = state['bar']
# Ensure state_dict contains the extra state by loading it into another module.
m = MyModule(3, 'something')
m2 = MyModule(5, 'something else')
m2.load_state_dict(m.state_dict())
self.assertEqual(m.state_dict(), m2.state_dict())
self.assertEqual(m2.bar, m.bar)
self.assertEqual(m2.sub.foo, m.sub.foo)
def test_extra_state_non_dict(self):
class MyModule(torch.nn.Module):
def __init__(self, foo):
super().__init__()
self.foo = foo
def get_extra_state(self):
return self.foo
def set_extra_state(self, state):
self.foo = state
# Test various types of extra state.
for state in ('something', 5, MyModule(3)):
m = MyModule(state)
m2 = MyModule('something else')
m2.load_state_dict(m.state_dict())
self.assertEqual(m.state_dict(), m2.state_dict())
self.assertEqual(m.foo, m2.foo)
def test_extra_state_missing_set_extra_state(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
def get_extra_state(self):
return {
'foo': 5
}
m = MyModule()
with self.assertRaisesRegex(RuntimeError, 'Unexpected key'):
m.load_state_dict(m.state_dict())
def test_extra_state_missing_get_extra_state(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
def set_extra_state(self):
pass
m = MyModule()
with self.assertRaisesRegex(RuntimeError, 'Missing key'):
m.load_state_dict(m.state_dict())
def test_parameter_assignment(self):
l = nn.Linear(5, 5)
def num_params():
return len(list(l.parameters()))
self.assertEqual(num_params(), 2)
new_param = Parameter(torch.randn(5, 5))
l.param_name = new_param
self.assertEqual(num_params(), 3)
self.assertObjectIn(new_param, l.parameters())
var = torch.randn(5, 5)
l.var_name = var
self.assertEqual(num_params(), 3)
self.assertNotIn(id(var), map(id, l.parameters()))
# Make sure Variables are not saved as parameters
l.variable_attr = torch.empty(5, 5)
self.assertEqual(num_params(), 3)
l.param_attr = Parameter(torch.empty(5, 5))
self.assertEqual(num_params(), 4)
# It shouldn't be possible to replace a parameter with a Variable
def assign_var():
l.param_attr = torch.empty(5, 5)
self.assertRaises(TypeError, assign_var)
# But replacing it with None should be fine
l.param_attr = None
self.assertEqual(num_params(), 3)
def test_assignment(self):
l = nn.Module()
a = nn.Parameter(torch.randn(2))
b = nn.Parameter(torch.randn(3))
c = nn.Parameter(torch.randn(4))
q = nn.Linear(4, 4)
r = nn.Linear(5, 5)
w = nn.Linear(6, 6)
def test_assignments(get_list, a, b, c):
# Check that None can be shadowed
l.a = None
self.assertIsNone(l.a)
self.assertIn('a', l.__dict__)
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [a])
self.assertNotIn('a', l.__dict__)
# Assign second object
l.b = None
self.assertIsNone(l.b)
self.assertIn('b', l.__dict__)
l.b = b
self.assertIs(l.b, b)
self.assertEqual(get_list(), [a, b])
self.assertNotIn('b', l.__dict__)
# Remove and add the object back. Order should be unchanged.
l.a = None
self.assertIsNone(l.a)
self.assertEqual(get_list(), [b])
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [a, b])
# Replace object with another one. Order should be unchanged.
l.a = c
self.assertIs(l.a, c)
self.assertEqual(get_list(), [c, b])
# Remove and reassign an attribute. It should appear at the end of the list now.
del l.a
self.assertFalse(hasattr(l, 'a'))
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [b, a])
test_assignments(lambda: list(l.parameters()), a, b, c)
del l.a, l.b
self.assertEqual(list(l.parameters()), [])
test_assignments(lambda: list(l.children()), q, r, w)
del l.a, l.b
self.assertEqual(list(l.children()), [])
buf = torch.randn(10)
l.register_buffer('buf', buf)
self.assertIs(l.buf, buf)
l.buf = None
self.assertIs(l.buf, None)
self.assertNotIn('buf', l.__dict__) # should be stored in l._buffers
l.buf = buf
self.assertIn('buf', l.state_dict())
self.assertEqual(l.state_dict()['buf'], buf)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_thnn_conv_strided_padded_dilated(self):
for convfn, dims, transposed in (
(torch.nn.functional.conv2d, 2, False),
(torch.nn.functional.conv_transpose2d, 2, True),
(torch.nn.functional.conv3d, 3, False),
(torch.nn.functional.conv_transpose3d, 3, True)):
for stride, padding, dilation in (
(2, 0, 1), (1, 1, 1), (2, 1, 1), (1, 0, 2)):
kwargs = {"stride": stride, "padding": padding, "dilation": dilation}
inp_shape = (1, 2) + dims * (4,)
weight_shape = (2, 2) + dims * (1,)
inputs = torch.randn(inp_shape, dtype=torch.double, device="cuda", requires_grad=True)
weight = torch.randn(weight_shape, dtype=torch.double, device="cuda", requires_grad=True)
bias = torch.randn(2, dtype=torch.double, device="cuda", requires_grad=True)
with torch.backends.cudnn.flags(enabled=False):
res = convfn(inputs, weight, bias, **kwargs)
res_cpu = convfn(inputs.cpu(), weight.cpu(), bias.cpu(), **kwargs)
self.assertEqual(res, res_cpu)
with torch.backends.cudnn.flags(enabled=False):
torch.autograd.gradcheck(
lambda x, w, b: convfn(x, w, b, **kwargs),
(inputs, weight, bias)
)
torch.autograd.gradcheck(
lambda x, w, b: convfn(x, w, b, **kwargs),
(inputs.cpu(), weight.cpu(), bias.cpu())
)
def test_Conv2d_inconsistent_types(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float)
weights = torch.randn(1, 1, 3, 3, dtype=torch.double)
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float())
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_Conv2d_inconsistent_types_on_GPU_without_cudnn(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device="cuda")
weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device="cuda")
bias = torch.randn(1, dtype=torch.double, device="cuda")
with torch.backends.cudnn.flags(enabled=False):
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
def test_Conv2d_1x1(self):
in_channels = 2
out_channels = 2
mod = torch.nn.Conv2d(2, 2, 1, bias=False).to(dtype=torch.double)
input = torch.randn(1, in_channels, 5, 5, requires_grad=True, dtype=torch.double)
for enabled in (False, True):
with torch.backends.mkldnn.flags(enabled=enabled):
gradcheck(F.conv2d, (input, mod.weight))
def test_Conv2d_OneDNN(self):
def run_once(group_val=24, dilation=1):
ifm = torch.ones([1, group_val, 6, 6], dtype=torch.float32)
weights = torch.ones([group_val, 1, 3, 3], dtype=torch.float32)
op = torch.nn.Conv2d(
in_channels=group_val,
out_channels=group_val,
kernel_size=[3, 3],
stride=[2, 2],
padding=[1, 1],
dilation=[dilation, dilation],
groups=group_val,
bias=False,
padding_mode='zeros'
)
op.weight.data = weights
res = op(ifm)
grad_in = torch.ones(res.shape, dtype=torch.float32)
res.backward(grad_in)
return op.weight.grad
for gorup_val in (24, 48, 23, 25):
for dilation in (1, 2):
with torch.backends.mkldnn.flags(enabled=False):
without_onednn = run_once(gorup_val, dilation)
with torch.backends.mkldnn.flags(enabled=True):
with_onednn = run_once(gorup_val, dilation)
self.assertEqual(without_onednn, with_onednn)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_cudnn_non_contiguous(self):
x = torch.randn(192, 16, 50).cuda()
x = x.permute(0, 2, 1).contiguous().permute(0, 2, 1)
m = torch.nn.Conv1d(
in_channels=16,
out_channels=32,
kernel_size=2,
bias=True).cuda()
result = m(x)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_Conv2d_inconsistent_types_on_GPU_with_cudnn(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device="cuda")
weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device="cuda")
bias = torch.randn(1, dtype=torch.double, device="cuda")
with torch.backends.cudnn.flags(enabled=True):
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
def test_Conv2d_missing_argument(self):
c = nn.Conv2d(3, 3, 3)
self.assertRaises(TypeError, lambda: c(None))
def test_Conv2d_backward_twice(self):
input = torch.randn(2, 3, 5, 5)
c = nn.Conv2d(3, 3, 3)
o1 = c(input)
o1.sum().backward()
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: o1.sum().backward())
def test_conv_modules_raise_error_on_incorrect_input_size(self):
for dtype in [torch.bfloat16, torch.double, torch.float]:
modules = [nn.Conv1d(3, 8, 3).to(dtype), nn.ConvTranspose1d(3, 8, 3).to(dtype),
nn.Conv2d(3, 8, 3).to(dtype), nn.ConvTranspose2d(3, 8, 3).to(dtype),
nn.Conv3d(3, 8, 3).to(dtype), nn.ConvTranspose3d(3, 8, 3).to(dtype)]
invalid_input_dims = [(1, 4), (1, 4),
(2, 5), (2, 5),
(3, 6), (3, 6)]
for invalid_dims, module in zip(invalid_input_dims, modules):
for dims in invalid_dims:
input = torch.empty(torch.Size((3, ) * dims))
self.assertRaises(RuntimeError, lambda: module(input))
def test_conv_shapecheck(self):
def test(should_raise, module, input_size, dtype):
input = torch.empty(3, *input_size).to(dtype)
if should_raise:
self.assertRaises(RuntimeError, lambda: module(input))
else:
# just run it to ensure no exception raised.
module(input)
for dtype in [torch.bfloat16, torch.float, torch.double]:
# Conv1d
test(True, nn.Conv1d(1, 1, 3).to(dtype), (1, 2), dtype)
test(True, nn.Conv1d(1, 1, 3, stride=2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 2, stride=2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 3, stride=2, padding=1).to(dtype), (1, 2), dtype)
# Conv2d
test(True, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 2, 2), dtype)
test(False, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 3, 3), dtype)
test(False, nn.Conv2d(1, 1, (3, 3), padding=1).to(dtype), (1, 2, 2), dtype)
# Conv3D
test(True, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 2, 2, 2), dtype)
test(False, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 3, 3, 3), dtype)
test(False, nn.Conv3d(1, 1, (3, 3, 3), padding=1).to(dtype), (1, 2, 2, 2), dtype)
def test_ConvTranspose2d_output_size(self):
m = nn.ConvTranspose2d(3, 4, 3, 3, 0, 2)
i = torch.randn(2, 3, 6, 6)
for h in range(15, 22):
for w in range(15, 22):
if 18 <= h <= 20 and 18 <= w <= 20:
output = m(i, output_size=(h, w))
self.assertEqual(output.size()[2:], (h, w))
else:
self.assertRaises(ValueError, lambda: m(i, (h, w)))
def test_ConvTranspose2d_output_size_downsample_upsample(self):
b, c, hid_c = 2, 3, 2
for h in range(13, 24):
for w in range(13, 17):
for k in range(2, 5):
for d in range(1, 5):
for s in range(1, 4):
for p in range(3):
conv = nn.Conv2d(
in_channels=c,
out_channels=hid_c,
kernel_size=k,
stride=s,
padding=p,
dilation=d,
)
t_conv = nn.ConvTranspose2d(
in_channels=hid_c,
out_channels=c,
kernel_size=k,
stride=s,
padding=p,
dilation=d,
)
i = torch.randn(b, c, h, w)
out = t_conv(conv(i), output_size=i.shape)
self.assertEqual(out.size()[2:], i.size()[2:])
def test_ConvTranspose3d_correct_output_size(self):
# Check that ConvTranspose3d can take a 5d output_size.
m = nn.ConvTranspose3d(2, 2, 2)
i = torch.rand(1, 2, 1, 1, 1)
out = m(i, output_size=(1, 2, 2, 2, 2))
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_ConvTranspose2d_half_cublas_gemm(self):
with torch.backends.cudnn.flags(enabled=False):
inputs = torch.randn(1, 1, 16, 16, device='cuda', dtype=torch.half)
deconv = nn.ConvTranspose2d(
1, 1, 3, stride=2, padding=1, output_padding=1).cuda().half()
output = deconv(inputs)
output.mean().backward()
@skipIfRocm
# For https://github.com/pytorch/pytorch/pull/1273
# Almost identical to the above `test_Conv2d_naive_groups`
def test_Conv2d_groups_nobias(self):
dev_dtypes = [("cpu", torch.float)]
if TEST_CUDA:
dev_dtypes += [("cuda", torch.float), ("cuda", torch.half)]
if AMPERE_OR_ROCM:
dev_dtypes += [("cuda", torch.bfloat16)]
for device, dtype in dev_dtypes:
m = nn.Conv2d(4, 4, kernel_size=3, groups=2, bias=False).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype], rtol=0)
# Almost identical to the above `test_Conv2d_naive_groups`
# Covering special case when group > 1, input-channel / group < 16 and output-channel is multiple of 16
# See also https://github.com/pytorch/pytorch/pull/18463#issuecomment-476563686
# and https://github.com/pytorch/pytorch/pull/18463#issuecomment-477001024
@skipIfRocm
def test_Conv2d_groups_nobias_v2(self):
torch.manual_seed(123)
dev_dtypes = [("cpu", torch.float)]
if TEST_CUDA:
dev_dtypes += [("cuda", torch.float), ("cuda", torch.half)]
if AMPERE_OR_ROCM:
dev_dtypes += [("cuda", torch.bfloat16)]
for device, dtype in dev_dtypes:
m = nn.Conv2d(4, 16, kernel_size=3, groups=2, bias=False).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)
m2.weight.data.copy_(m.weight.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype], rtol=0)
# CPU-only test for group conv3d fast implementation using bmm
# See: https://github.com/pytorch/pytorch/pull/36355
def test_Conv3d_groups_nobias(self):
torch.manual_seed(123)
m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=False).to("cpu", torch.float)
i = torch.randn(2, 4, 6, 6, 6, device="cpu", dtype=torch.float, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, 4, device="cpu", dtype=torch.float)
output.backward(grad_output)
m1 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to("cpu", torch.float)
m1.weight.data.copy_(m.weight.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to("cpu", torch.float)
m2.weight.data.copy_(m.weight.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[torch.float], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float], rtol=dtype2prec_DONTUSE[torch.float])
def test_Conv3d_groups_wbias(self):
torch.manual_seed(123)
m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=True).to("cpu", torch.float)
i = torch.randn(2, 4, 6, 6, 6, device="cpu", dtype=torch.float, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, 4, device="cpu", dtype=torch.float)
output.backward(grad_output)
m1 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to("cpu", torch.float)
m1.weight.data.copy_(m.weight.data[:8])
m1.bias.data.copy_(m.bias.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to("cpu", torch.float)
m2.weight.data.copy_(m.weight.data[8:])
m2.bias.data.copy_(m.bias.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float])
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float])
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float], rtol=dtype2prec_DONTUSE[torch.float])
def test_MaxUnpool2d_output_size(self):
m = nn.MaxPool2d(3, stride=2, return_indices=True)
mu = nn.MaxUnpool2d(3, stride=2)
big_t = torch.rand(1, 1, 6, 6)
big_t[0][0][4][4] = 100
output_big, indices_big = m(big_t)
self.assertRaises(RuntimeError, lambda: mu(output_big, indices_big))
small_t = torch.rand(1, 1, 5, 5)
for i in range(0, 4, 2):
for j in range(0, 4, 2):
small_t[:, :, i, j] = 100
output_small, indices_small = m(small_t)
for h in range(3, 10):
for w in range(3, 10):
if 4 <= h <= 6 and 4 <= w <= 6:
size = (h, w)
if h == 6:
size = (1, 1) + size
mu(output_small, indices_small, output_size=size)
else:
self.assertRaises(ValueError, lambda: mu(output_small, indices_small, (h, w)))
def test_max_unpool2d_nhwc_cpu(self):
input = torch.randn(2, 10, 9, 9).float().cpu()
input = input.contiguous(memory_format=torch.channels_last)
ref_input = input.clone().contiguous()
pool = nn.MaxPool2d(3, stride=2, return_indices=True).cpu()
ref_pool = nn.MaxPool2d(3, stride=2, return_indices=True).cpu()
out, ind = pool(input)
ref_out, ref_ind = ref_pool(ref_input)
out.requires_grad_()
ref_out.requires_grad_()
unpool = nn.MaxUnpool2d(3, stride=2).cpu()
ref_unpool = nn.MaxUnpool2d(3, stride=2).cpu()
upout = unpool(out, ind)
ref_upout = ref_unpool(ref_out, ref_ind)
grad = torch.randn(upout.size()).float().cpu()
grad = grad.contiguous(memory_format=torch.channels_last)
ref_grad = grad.clone().contiguous()
upout.backward(grad)
ref_upout.backward(ref_grad)
self.assertTrue(upout.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_upout.is_contiguous())
self.assertTrue(torch.allclose(upout, ref_upout))
self.assertTrue(torch.allclose(out.grad, ref_out.grad))
def test_container_copy(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(4, 5)
def forward(self, input):
return self.linear(input)
input = torch.randn(2, 4)
model = Model()
model_cp = deepcopy(model)
self.assertEqual(model(input).data, model_cp(input).data)
model_cp.linear.weight.data[:] = 2
self.assertNotEqual(model(input).data, model_cp(input).data)
def test_RNN_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
for module in (nn.RNNCell, nn.GRUCell):
for bias in (True, False):
input = torch.randn(3, 10)
hx = torch.randn(3, 20)
cell = module(10, 20, bias=bias)
for _ in range(6):
hx = cell(input, hx)
hx.sum().backward()
def test_RNN_cell_forward_input_size(self):
input = torch.randn(3, 11)
hx = torch.randn(3, 20)
for module in (nn.RNNCell, nn.GRUCell):
cell = module(10, 20)
self.assertRaises(Exception, lambda: cell(input, hx))
def test_RNN_cell_forward_hidden_size(self):
input = torch.randn(3, 10)
hx = torch.randn(3, 21)
cell_shared_param = (10, 20)
for cell in (nn.RNNCell(*cell_shared_param, nonlinearity="relu"),
nn.RNNCell(*cell_shared_param, nonlinearity="tanh"),
nn.GRUCell(*cell_shared_param)):
self.assertRaises(Exception, lambda: cell(input, hx))
def test_RNN_cell_forward_zero_hidden_size(self):
input = torch.randn(3, 10)
hx = torch.randn(3, 0)
cell_shared_param = (10, 0)
for cell in (nn.RNNCell(*cell_shared_param, nonlinearity="relu"),
nn.RNNCell(*cell_shared_param, nonlinearity="tanh"),
nn.GRUCell(*cell_shared_param)):
self.assertEqual(cell(input, hx).shape, torch.Size([3, 0]))
def _test_loss_equal_input_target_shape(self, cast):
# Tests losses whose inputs should have the same size.
losses = {
'mse_loss': lambda x, y: F.mse_loss(x, y),
'l1_loss': lambda x, y: F.l1_loss(x, y),
'smooth_l1_loss': lambda x, y: F.smooth_l1_loss(x, y),
'huber_loss': lambda x, y: F.huber_loss(x, y),
'kl_div': lambda x, y: F.kl_div(x, y),
'poisson_nll_loss': lambda x, y: F.poisson_nll_loss(x, y),
}
input = cast(torch.randn(3, 5))
target = cast(torch.randn(5, 3))
for _name, fn in losses.items():
self.assertRaises(Exception, lambda: fn(input, target))
def test_loss_equal_input_target_shape(self):
self._test_loss_equal_input_target_shape(lambda x: x)
def test_mse_loss_size_warning(self):
i = torch.randn((10, 1), requires_grad=True)
t = torch.randn((10,))
with warnings.catch_warnings(record=True) as w:
# Ensure warnings are being shown
warnings.simplefilter("always")
# Trigger Warning
F.mse_loss(i, t)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertIn('Please ensure they have the same size.', str(w[0]))
def test_poisson_nll_loss_reduction_modes(self):
input = torch.tensor([0.5, 1.5, 2.5])
target = torch.tensor([1., 2., 3.])
component_wise_loss = torch.exp(input) - target * input
self.assertEqual(component_wise_loss,
F.poisson_nll_loss(input, target, reduction='none'))
self.assertEqual(torch.sum(component_wise_loss),
F.poisson_nll_loss(input, target, reduction='sum'))
self.assertEqual(torch.mean(component_wise_loss),
F.poisson_nll_loss(input, target, reduction='mean'))
with self.assertRaisesRegex(ValueError, 'is not valid'):
F.poisson_nll_loss(input, target, reduction='total')
def test_gaussian_nll_loss_reduction_modes(self):
input = torch.tensor([[0.5, 1.5, 2.5], [2., 4., 6.]])
target = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
var = torch.tensor([[0.5, 1., 1.5], [1., 1.5, 2.]])
component_wise_loss = 0.5 * (torch.log(var) + (input - target)**2 / var)
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target, var, reduction='none'))
self.assertEqual(torch.sum(component_wise_loss),
F.gaussian_nll_loss(input, target, var, reduction='sum'))
self.assertEqual(torch.mean(component_wise_loss),
F.gaussian_nll_loss(input, target, var, reduction='mean'))
with self.assertRaisesRegex(ValueError, 'is not valid'):
F.gaussian_nll_loss(input, target, var, reduction='total')
def test_gaussian_nll_loss_broadcasting(self):
input = torch.tensor([[0.5, 1.5, 2.5], [2., 4., 6.]])
target_full = torch.tensor([[1., 2., 3.], [1., 2., 3.]])
target_part = torch.tensor([[1., 2., 3.]])
var_full = torch.tensor([[0.5, 0.5, 0.5], [1.5, 1.5, 1.5]])
var_part1 = torch.tensor([[0.5], [1.5]])
var_part2 = torch.tensor([0.5, 1.5])
component_wise_loss = 0.5 * (torch.log(var_full) + (input - target_full)**2 / var_full)
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_full, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_full, var_part1, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_full, var_part2, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_part1, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_part2, reduction='none'))
def test_gaussian_nll_loss_args(self):
input = torch.randn(3, 5)
with self.assertRaisesRegex(ValueError, 'var is of incorrect size'):
target = torch.randn(3, 5)
var = torch.ones(3, 3)
torch.nn.functional.gaussian_nll_loss(input, target, var)
with self.assertRaisesRegex(ValueError, 'var has negative entry/entries'):
var = -1 * torch.ones(3, 5)
torch.nn.functional.gaussian_nll_loss(input, target, var)
def test_KLDivLoss_batch_mean(self):
input_shape = (2, 5)
log_prob1 = F.log_softmax(torch.randn(input_shape), 1)
prob2 = F.softmax(torch.randn(input_shape), 1)
loss = nn.KLDivLoss(reduction='batchmean')
l = loss(log_prob1, prob2)
loss_none_reduce = nn.KLDivLoss(reduction='sum')(log_prob1, prob2)
expected = loss_none_reduce / input_shape[0]
self.assertEqual(l, expected)
def test_KLDivLoss_batch_mean_log_target(self):
input_shape = (2, 5)
log_prob1 = F.log_softmax(torch.randn(input_shape), 1)
log_prob2 = F.log_softmax(torch.randn(input_shape), 1)
loss = nn.KLDivLoss(reduction='batchmean', log_target=True)
l = loss(log_prob1, log_prob2)
loss_none_reduce = nn.KLDivLoss(reduction='sum', log_target=True)(log_prob1, log_prob2)
expected = loss_none_reduce / input_shape[0]
self.assertEqual(l, expected)
def test_CTCLoss_typechecks(self):
target_lengths = torch.tensor([30, 25, 20])
input_lengths = torch.tensor([50, 50, 50])
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2)
with self.assertRaises(RuntimeError):
_input_lengths = input_lengths.to(dtype=torch.float)
torch.nn.functional.ctc_loss(log_probs, targets, _input_lengths, target_lengths)
with self.assertRaises(RuntimeError):
target_lengths = target_lengths.to(dtype=torch.float)
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_lengthchecks_cuda(self):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (3, 29), dtype=torch.long, device='cuda')
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2)
with self.assertRaises(RuntimeError):
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
def test_CTCLoss_lengthchecks_cpu(self):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (3, 29), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2)
with self.assertRaises(RuntimeError):
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_long_targets(self):
input_length = 4000
vocab_size = 3
batch_size = 4
target_length = 1200
log_probs = torch.randn(input_length, batch_size, vocab_size).log_softmax(2).requires_grad_()
targets = torch.randint(low=1, high=vocab_size - 1, size=(batch_size, target_length), dtype=torch.long)
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
res_cpu = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
grad_out = torch.randn_like(res_cpu)
grad_cpu, = torch.autograd.grad(res_cpu, log_probs, grad_out)
with torch.backends.cudnn.flags(enabled=False):
res_gpu = torch.nn.functional.ctc_loss(log_probs.cuda(), targets.cuda(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
grad_gpu, = torch.autograd.grad(res_gpu, log_probs, grad_out.cuda())
self.assertEqual(res_cpu, res_gpu, atol=1e-4, rtol=0)
self.assertEqual(grad_cpu, grad_gpu, atol=1e-4, rtol=0)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_critical_target_len(self):
# cudnn has an unexpected problem with target length 256, see issue #53505
N = 1
S = 256
C = 10
T = 500
target = torch.randint(low=1, high=C, size=(S,), dtype=torch.int)
input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.int)
target_lengths = torch.tensor(S, dtype=torch.int)
inp = torch.randn(T, N, C, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_()
with cudnn.flags(enabled=True):
res_gpu = torch.nn.functional.ctc_loss(inp, target, input_lengths, target_lengths, reduction='none')
res_cpu = torch.nn.functional.ctc_loss(inp.cpu(), target, input_lengths, target_lengths, reduction='none')
self.assertEqual(res_cpu, res_gpu, atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_zero_infinity(self):
target_lengths = [60, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int, device='cuda')
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_()
res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
with torch.backends.cudnn.flags(enabled=False):
res2 = torch.nn.functional.ctc_loss(log_probs, targets.cuda().long(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
res_cpu = torch.nn.functional.ctc_loss(log_probs.cpu(), targets.cpu(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
self.assertEqual(res2, res, atol=1e-4, rtol=0)
self.assertEqual(res_cpu, res.cpu(), atol=1e-4, rtol=0)
g1, = torch.autograd.grad(res, log_probs)
g2, = torch.autograd.grad(res2, log_probs)
g3, = torch.autograd.grad(res_cpu, log_probs)
self.assertEqual(g2, g3, atol=1e-4, rtol=0)
self.assertEqual(g1, g2, atol=1e-4, rtol=0)
self.assertTrue((g1 == g1).all().item()) # check that we don't have NaN
def test_RNN_cell_no_broadcasting(self):
def test(cell_module, input, hx, input_size, hidden_size):
cell = cell_module(input_size, hidden_size)
self.assertRaises(RuntimeError, lambda: cell(input, hx))
def test_all(hidden_size, bad_hx, good_hx, input_size, input):
test(nn.RNNCell, input, bad_hx, input_size, hidden_size)
test(nn.GRUCell, input, bad_hx, input_size, hidden_size)
test(nn.LSTMCell, input, (bad_hx, good_hx), input_size, hidden_size)
test(nn.LSTMCell, input, (good_hx, bad_hx), input_size, hidden_size)
hidden_size = 20
input_size = 10
input = torch.randn(3, input_size)
bad_hx = torch.randn(1, hidden_size)
good_hx = torch.randn(3, hidden_size)
# Test hidden/input batch size broadcasting
test_all(hidden_size, bad_hx, good_hx, input_size, input)
# Test hx's hidden_size vs module's hidden_size broadcasting
bad_hx = torch.randn(3, 1)
test_all(hidden_size, bad_hx, good_hx, input_size, input)
# Test input's input_size vs module's input_size broadcasting
bad_input = torch.randn(3, 1)
test_all(hidden_size, good_hx, good_hx, input_size, bad_input)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_native_dropout_corner_case(self):
for train in [True, False]:
for p in [0.0, 1.0]:
for device in ["cuda", "cpu"]:
x = torch.randn(5).to(device=device).requires_grad_()
x_ref = x.detach().requires_grad_()
o = torch.native_dropout(x, p, train)[0]
o_ref = torch.dropout(x_ref, p, train)
o.sum().backward()
o_ref.sum().backward()
assert(o.equal(o_ref))
assert(x.grad.equal(x_ref.grad))
def test_invalid_dropout_p(self):
v = torch.ones(1)
self.assertRaises(ValueError, lambda: nn.Dropout(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(1.1))
self.assertRaises(ValueError, lambda: F.dropout(v, -0.1))
self.assertRaises(ValueError, lambda: F.dropout(v, 1.1))
def test_pad_sequence(self):
def pad(tensor, length):
return torch.cat(
[tensor.data, tensor.data.new(
length - tensor.size(0), *tensor.size()[1:]).zero_()])
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
# batch_first = true
expected = torch.tensor([[4, 5, 0], [1, 2, 3], [6, 0, 0]])
padded = rnn_utils.pad_sequence([b, a, c], True)
self.assertEqual(padded, expected)
# batch_first = false
padded = rnn_utils.pad_sequence([b, a, c])
self.assertEqual(padded, expected.transpose(0, 1))
# pad with non-zero value
expected = torch.tensor([[4, 5, 1], [1, 2, 3], [6, 1, 1]])
padded = rnn_utils.pad_sequence([b, a, c], True, 1)
self.assertEqual(padded, expected)
# Test pad sorted sequence
expected = torch.tensor([[1, 2, 3], [4, 5, 0], [6, 0, 0]])
padded = rnn_utils.pad_sequence([a, b, c], True)
self.assertEqual(padded, expected)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
trailing_dims = [4] * num_dim
for i in range(1, maxlen + 1):
seq_len = i * i
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
random.shuffle(sequences)
expected = []
for seq in sequences:
expected.append(pad(seq, maxlen * maxlen))
# batch first = true
expected = torch.stack(expected)
padded = rnn_utils.pad_sequence(sequences, True)
self.assertEqual(padded, expected)
# batch first = false
padded = rnn_utils.pad_sequence(sequences)
self.assertEqual(padded, expected.transpose(0, 1))
def test_unpad_sequence(self):
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
sequences = [a, b, c]
lengths = torch.as_tensor([v.size(0) for v in sequences])
for batch_first in [True, False]:
padded_sequences = rnn_utils.pad_sequence(sequences, batch_first=batch_first)
unpadded_sequences = rnn_utils.unpad_sequence(padded_sequences, lengths, batch_first=batch_first)
self.assertEqual(sequences, unpadded_sequences)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
trailing_dims = [4] * num_dim
for i in range(1, maxlen + 1):
seq_len = i * i
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
random.shuffle(sequences)
lengths = torch.as_tensor([v.size(0) for v in sequences])
padded_sequences = rnn_utils.pad_sequence(sequences, batch_first=batch_first)
unpadded_sequences = rnn_utils.unpad_sequence(padded_sequences, lengths, batch_first=batch_first)
self.assertEqual(sequences, unpadded_sequences)
def test_pack_sequence(self):
def _compatibility_test(sequences, lengths, batch_first, enforce_sorted=False):
padded = rnn_utils.pad_sequence(sequences, batch_first)
packed = rnn_utils.pack_sequence(sequences, enforce_sorted)
unpacked = rnn_utils.pad_packed_sequence(packed, batch_first)
self.assertEqual(padded, unpacked[0])
pack_padded = rnn_utils.pack_padded_sequence(
padded, lengths, batch_first, enforce_sorted)
self.assertEqual(packed, pack_padded)
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
packed = rnn_utils.pack_sequence([a, b, c], enforce_sorted=False)
expected = torch.tensor([1, 4, 6, 2, 5, 3])
self.assertEqual(packed.batch_sizes, [3, 2, 1])
self.assertEqual(packed.data.data, expected)
self.assertEqual(packed.sorted_indices, [0, 1, 2])
self.assertEqual(packed.unsorted_indices, [0, 1, 2])
packed_unsorted = rnn_utils.pack_sequence([b, c, a], enforce_sorted=False)
self.assertEqual(packed_unsorted.batch_sizes, [3, 2, 1])
self.assertEqual(packed_unsorted.data.data, expected)
self.assertEqual(packed_unsorted.sorted_indices, [2, 0, 1])
self.assertEqual(packed_unsorted.unsorted_indices, [1, 2, 0])
# single dimensional, enforce_sorted = True
packed_enforce_sorted = rnn_utils.pack_sequence([a, b, c], enforce_sorted=True)
self.assertEqual(packed_enforce_sorted.batch_sizes, [3, 2, 1])
self.assertEqual(packed_enforce_sorted.data.data, expected)
self.assertTrue(packed_enforce_sorted.sorted_indices is None)
self.assertTrue(packed_enforce_sorted.unsorted_indices is None)
with self.assertRaisesRegex(RuntimeError, 'must be sorted in decreasing order'):
rnn_utils.pack_sequence([b, c, a], enforce_sorted=True)
with self.assertRaisesRegex(RuntimeError, 'You can pass `enforce_sorted=False`'):
rnn_utils.pack_sequence([b, c, a], enforce_sorted=True)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
lengths = []
trailing_dims = [4] * num_dim
for i in range(maxlen, 0, -1):
seq_len = i * i
lengths.append(seq_len)
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
unsorted_sequences = [s.clone() for s in sequences]
random.shuffle(unsorted_sequences)
unsorted_sequences_lengths = [t.size(0) for t in unsorted_sequences]
# compatibility with other utilities
for batch_first in (True, False):
for enforce_sorted in (True, False):
_compatibility_test(sequences, lengths, batch_first, enforce_sorted)
_compatibility_test(unsorted_sequences, unsorted_sequences_lengths,
batch_first)
def test_unpack_sequence(self):
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
sequences = [a, b, c]
packed_sequences = rnn_utils.pack_sequence(sequences, enforce_sorted=False)
unpacked_sequences = rnn_utils.unpack_sequence(packed_sequences)
self.assertEqual(sequences, unpacked_sequences)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
trailing_dims = [4] * num_dim
for i in range(1, maxlen + 1):
seq_len = i * i
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
random.shuffle(sequences)
packed_sequences = rnn_utils.pack_sequence(sequences, enforce_sorted=False)
unpacked_sequences = rnn_utils.unpack_sequence(packed_sequences)
self.assertEqual(sequences, unpacked_sequences)
def test_pack_padded_sequence(self):
def generate_test_case(sorted_lengths, should_shuffle):
def pad(tensor, length):
return torch.cat([tensor, tensor.new(length - tensor.size(0), *tensor.size()[1:]).zero_()])
max_length = sorted_lengths[0]
batch_sizes = [sum(map(bool, filter(lambda x: x >= i, sorted_lengths)))
for i in range(1, max_length + 1)]
offset = 0
padded = torch.cat([pad(i * 100 + torch.arange(1., 5 * l + 1).view(l, 1, 5), max_length)
for i, l in enumerate(sorted_lengths, 1)], 1)
expected_data = [[torch.arange(1., 6) + (i + 1) * 100 + 5 * n for i in range(batch_size)]
for n, batch_size in enumerate(batch_sizes)]
expected_data = list(itertools.chain.from_iterable(expected_data))
expected_data = torch.stack(expected_data, dim=0)
if should_shuffle:
# Shuffle the padded sequence to create an unsorted sequence
permutation = list(range(len(sorted_lengths)))
random.shuffle(permutation)
unsorted_indices = torch.tensor(permutation)
padded = padded.index_select(1, unsorted_indices)
lengths = torch.tensor(sorted_lengths).index_select(0, unsorted_indices)
else:
unsorted_indices = None
lengths = sorted_lengths
return padded.requires_grad_(), lengths, expected_data, batch_sizes, unsorted_indices
test_cases = [
# sorted_lengths, should_shuffle
[[10, 8, 4, 2, 2, 2, 1], False],
[[11, 10, 8, 6, 4, 3, 1], False],
[[11, 10, 8, 6, 4, 3, 1], True],
]
for test_case, batch_first in itertools.product(test_cases, (True, False)):
sorted_lengths, should_shuffle = test_case
padded, lengths, expected_data, batch_sizes, unsorted_indices = generate_test_case(
sorted_lengths, should_shuffle)
src = padded
if batch_first:
src = src.transpose(0, 1)
# check output
packed = rnn_utils.pack_padded_sequence(src, lengths, batch_first=batch_first,
enforce_sorted=not should_shuffle)
self.assertEqual(packed.data.data, expected_data)
self.assertEqual(packed.batch_sizes, batch_sizes)
self.assertEqual(packed.unsorted_indices, unsorted_indices)
# test inverse
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)
self.assertEqual(unpacked, src)
self.assertEqual(unpacked_len, lengths)
# check grad
if padded.grad is not None:
padded.grad.data.zero_()
grad_output = unpacked.data.clone().normal_()
unpacked.backward(grad_output)
if batch_first:
grad_output.transpose_(0, 1)
for i, l in enumerate(lengths):
self.assertEqual(padded.grad.data[:l, i], grad_output[:l, i])
if l < 10:
self.assertEqual(padded.grad.data[l:, i].abs().sum(), 0)
# test error messages
with self.assertRaisesRegex(RuntimeError, 'You can pass `enforce_sorted=False`'):
packed = rnn_utils.pack_padded_sequence(torch.randn(3, 3), [1, 3, 2])
with self.assertRaisesRegex(RuntimeError, 'empty tensor'):
packed = rnn_utils.pack_padded_sequence(torch.randn(0, 0), [])
def test_LSTM_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
for bias in (True, False):
input = torch.randn(3, 10)
hx = torch.randn(3, 20)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20, bias=bias)
for _ in range(6):
hx, cx = lstm(input, (hx, cx))
(hx + cx).sum().backward()
def test_LSTM_cell_forward_input_size(self):
input = torch.randn(3, 11)
hx = torch.randn(3, 20)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20)
self.assertRaises(Exception, lambda: lstm(input, (hx, cx)))
def test_LSTM_cell_forward_hidden_size(self):
input = torch.randn(3, 10)
hx = torch.randn(3, 21)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20)
self.assertRaises(Exception, lambda: lstm(input, (hx, cx)))
self.assertRaises(Exception, lambda: lstm(input, (cx, hx)))
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_pack_sequence_batch_sizes_throw(self):
with self.assertRaisesRegex(ValueError, r"batch_sizes should always be on CPU"):
m = nn.LSTM(3, 4, bidirectional=True, num_layers=2).to('cuda')
a = torch.rand(5, 3, device='cuda')
b = torch.tensor([1, 1, 1, 1, 1], device='cuda')
input = nn.utils.rnn.PackedSequence(a, b)
def test_Transformer_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
d_model = 512
nhead = 16
num_encoder_layers = 4
num_decoder_layers = 3
dim_feedforward = 256
dropout = 0.3
bsz = 8
seq_length = 35
tgt_length = 15
for batch_first, src_size, tgt_size in zip((True, False),
[(bsz, seq_length, d_model),
(seq_length, bsz, d_model)],
[(bsz, tgt_length, d_model),
(tgt_length, bsz, d_model)]):
transformer = nn.Transformer(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, batch_first=batch_first)
src = torch.randn(src_size)
src_mask = transformer.generate_square_subsequent_mask(seq_length).double()
tgt = torch.randn(tgt_size)
tgt_mask = transformer.generate_square_subsequent_mask(tgt_length).double()
memory_mask = torch.randn(tgt_length, seq_length).double()
src_key_padding_mask = torch.rand(bsz, seq_length) >= 0.5
tgt_key_padding_mask = torch.rand(bsz, tgt_length) >= 0.5
memory_key_padding_mask = torch.rand(bsz, seq_length) >= 0.5
output = transformer(src, tgt,
src_mask=src_mask,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
output.sum().backward()
def test_transformerencoderlayer(self):
# this is a deterministic test for TransformerEncoderLayer
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
for batch_first in (False, True):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
batch_first=batch_first)
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
encoder_input = torch.tensor([[[20., 30., 40., 50.]]])
result = model(encoder_input)
ref_output = torch.tensor([[[2.258703, 0.127985, -0.697881, 0.170862]]])
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# 0 values are NOT masked. This shouldn't mask anything.
mask = torch.tensor([[0]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# 1 values are masked. Since there is only 1 input embedding this
# will result in nan.
mask = torch.tensor([[1]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertTrue(np.isnan(result).all())
# deterministic input
encoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.272644, 0.119035, -0.691669, 0.153486]],
[[2.272644, 0.119035, -0.691669, 0.153486]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# all 0 which is no masking
mask = torch.tensor([[0, 0]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
mask = torch.tensor([[1, 0]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.301516, 0.092249, -0.679101, 0.103088]],
[[2.301516, 0.092249, -0.679101, 0.103088]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# deterministic input
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],
[2.427987, 0.021213, -0.602496, -0.084103]],
[[2.424689, 0.019155, -0.604793, -0.085672],
[2.413863, 0.022211, -0.612486, -0.072490]],
[[2.433774, 0.021598, -0.598343, -0.087548],
[2.425104, 0.019748, -0.604515, -0.084839]],
[[2.436185, 0.022682, -0.596625, -0.087261],
[2.433556, 0.021891, -0.598509, -0.086832]],
[[2.416246, 0.017512, -0.610712, -0.082961],
[2.422901, 0.024187, -0.606178, -0.074929]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# all 0
mask = torch.zeros([2, 5]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
mask[0, 1] = 1
mask[1, 3] = 1
mask[1, 4] = 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],
[2.428811, 0.021445, -0.601912, -0.084252]],
[[2.425009, 0.019155, -0.604566, -0.085899],
[2.415408, 0.02249 , -0.611415, -0.073]],
[[2.434199, 0.021682, -0.598039, -0.087699],
[2.42598, 0.019941, -0.603896, -0.085091]],
[[2.436457, 0.022736, -0.59643 , -0.08736],
[2.434021, 0.022093, -0.598179, -0.08679]],
[[2.416531, 0.017498, -0.610513, -0.083181],
[2.4242, 0.024653, -0.605266, -0.074959]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
def test_transformerencoderlayer_gelu(self):
# this is a deterministic test for TransformerEncoderLayer with gelu activation
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
for activation, batch_first in product(('gelu', F.gelu, nn.GELU()), (True, False)):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, batch_first=batch_first)
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
encoder_input = torch.tensor([[[20., 30., 40., 50.]]])
result = model(encoder_input)
ref_output = torch.tensor([[[2.249815, 0.131006, -0.702199, 0.177868]]])
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
encoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.264103, 0.121417, -0.696012, 0.159724]],
[[2.264103, 0.121417, -0.696012, 0.159724]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.42163188, 0.03227153, -0.60714219, -0.05908082],
[2.42151276, 0.03302179, -0.60722523, -0.05762651]],
[[2.41926761, 0.02974034, -0.60879519, -0.0621269],
[2.41626395, 0.03539356, -0.61087842, -0.04978623]],
[[2.42382808, 0.03218872, -0.6055963, -0.06073591],
[2.41983477, 0.03085259, -0.60840145, -0.06046414]],
[[2.42500749, 0.03328855, -0.60476388, -0.0595334],
[2.4237977, 0.03290575, -0.60561789, -0.05940082]],
[[2.41383916, 0.02686345, -0.61256377, -0.06380707],
[2.42000277, 0.03800944, -0.60824798, -0.04754947]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
def test_transformerdecoderlayer(self):
# this is a deterministic test for TransformerDecoderLayer
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
seq_length = 5
tgt_length = 3
for batch_first in (False, True):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
batch_first=batch_first)
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]])
memory_input = torch.tensor([[[60., 70., 80., 90.]]])
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.314351, 0.094805, -0.671322, 0.101977]]])
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
memory_input = torch.tensor([[[1., 2., 3., 4.]]])
result = model(decoder_input, memory_input)
result = result.detach().numpy()
ref_output = perm_fn(torch.tensor([[[2.422245, 0.051716, -0.606338, -0.024756]],
[[2.422245, 0.051716, -0.606338, -0.024756]]]))
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.343536, 0.085561, -0.654954, 0.074991]],
[[2.343536, 0.085561, -0.654954, 0.074991]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]))
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# key_padding_mask
key_padding_mask = torch.zeros(2, 3) == 1
result = model(decoder_input, memory_input, tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# key_padding_mask
key_padding_mask[0, 2] = 1
key_padding_mask[1, 1] = 1
key_padding_mask[1, 2] = 1
result = model(decoder_input, memory_input, tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430025, 0.027643, -0.601164, -0.073476],
[2.4323, 0.029375, -0.599553, -0.071881]],
[[2.428523, 0.026838, -0.602226, -0.07391],
[2.432634, 0.029842, -0.599318, -0.071253]],
[[2.432278, 0.028152, -0.599555, -0.074139],
[2.432659, 0.029244, -0.599294, -0.072382]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# memory_key_padding_mask
key_padding_mask = torch.zeros(2, 5) == 1
result = model(decoder_input, memory_input, memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# memory_key_padding_mask
key_padding_mask[0, 4] = 1
key_padding_mask[1, 3] = 1
key_padding_mask[1, 4] = 1
result = model(decoder_input, memory_input, memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.429757, 0.027358, -0.601351, -0.073816],
[2.432692, 0.028583, -0.599263, -0.073634]],
[[2.428247, 0.02662, -0.602419, -0.074123],
[2.432657, 0.029055, -0.599293, -0.072732]],
[[2.431515, 0.027687, -0.600096, -0.074459],
[2.433075, 0.028543, -0.598987, -0.073985]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
def test_transformerdecoderlayer_gelu(self):
# this is a deterministic test for TransformerDecoderLayer with gelu activation
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
seq_length = 5
tgt_length = 3
for activation, batch_first in product(('gelu', F.gelu, nn.GELU()), (True, False)):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, batch_first=batch_first)
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]])
memory_input = torch.tensor([[[60., 70., 80., 90.]]])
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.306435, 0.095946, -0.675796, 0.10687]]])
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.415448, 0.054389, -0.610932, -0.0156613]],
[[2.415448, 0.054389, -0.610932, -0.0156613]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.338531, 0.087709, -0.65776, 0.080646]],
[[2.338531, 0.087709, -0.65776, 0.080646]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]))
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42049104, 0.03443088, -0.60793706, -0.05436271],
[2.42210631, 0.03546578, -0.60679895, -0.05357488]],
[[2.41907674, 0.0336104, -0.60892977, -0.05490462],
[2.42216881, 0.03586554, -0.6067524, -0.05289126]],
[[2.42205716, 0.03488046, -0.60683681, -0.05460596],
[2.42240309, 0.0354595, -0.60659063, -0.05378816]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
def test_transformerencoder(self):
def get_a_test_layer(use_cuda, activation, batch_first=False):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
device = torch.device("cuda" if use_cuda else "cpu")
layer = nn.TransformerEncoderLayer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
batch_first=batch_first).to(device)
with torch.no_grad():
# set constant weights of the model
for idx, p in enumerate(layer.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
return layer
# this is a deterministic test for TransformerEncoder
activation = F.relu
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
for batch_first in (True, False):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
encoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,
batch_first=batch_first)
model = nn.TransformerEncoder(encoder_layer, 1).to(device)
# deterministic input
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],
[2.427987, 0.021213, -0.602496, -0.084103]],
[[2.424689, 0.019155, -0.604793, -0.085672],
[2.413863, 0.022211, -0.612486, -0.072490]],
[[2.433774, 0.021598, -0.598343, -0.087548],
[2.425104, 0.019748, -0.604515, -0.084839]],
[[2.436185, 0.022682, -0.596625, -0.087261],
[2.433556, 0.021891, -0.598509, -0.086832]],
[[2.416246, 0.017512, -0.610712, -0.082961],
[2.422901, 0.024187, -0.606178, -0.074929]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# all 0
mask = torch.zeros([2, 5]).to(device) == 1
result = model(encoder_input, src_key_padding_mask=mask)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
mask[0, 1] = 1
mask[1, 3] = 1
mask[1, 4] = 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],
[2.428811, 0.021445, -0.601912, -0.084252]],
[[2.425009, 0.019155, -0.604566, -0.085899],
[2.415408, 0.02249, -0.611415, -0.073]],
[[2.434199, 0.021682, -0.598039, -0.087699],
[2.42598, 0.019941, -0.603896, -0.085091]],
[[2.436457, 0.022736, -0.59643, -0.08736],
[2.434021, 0.022093, -0.598179, -0.08679]],
[[2.416531, 0.017498, -0.610513, -0.083181],
[2.4242, 0.024653, -0.605266, -0.074959]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# test case 2, multiple layers no norm
model = nn.TransformerEncoder(encoder_layer, 2).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419051, 0.017446, -0.608738, -0.085003],
[2.419102, 0.017452, -0.608703, -0.085026]],
[[2.419043, 0.017445, -0.608744, -0.084999],
[2.419052, 0.017446, -0.608738, -0.085004]],
[[2.419067, 0.017448, -0.608727, -0.085010],
[2.419098, 0.017452, -0.608706, -0.085024]],
[[2.419072, 0.017449, -0.608724, -0.085012],
[2.419119, 0.017455, -0.608691, -0.085034]],
[[2.419019, 0.017442, -0.608761, -0.084989],
[2.419075, 0.017449, -0.608722, -0.085014]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# test case 3, multiple layers with norm
# d_model = 4
norm = nn.LayerNorm(4)
model = nn.TransformerEncoder(encoder_layer, 2, norm=norm).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695949, -0.357635, -0.893077, -0.445238],
[1.695955, -0.357639, -0.893050, -0.445266]],
[[1.695948, -0.357634, -0.893082, -0.445233],
[1.695950, -0.357635, -0.893077, -0.445238]],
[[1.695951, -0.357636, -0.893069, -0.445246],
[1.695955, -0.357639, -0.893052, -0.445264]],
[[1.695952, -0.357636, -0.893066, -0.445249],
[1.695957, -0.357641, -0.893041, -0.445276]],
[[1.695946, -0.357632, -0.893095, -0.445220],
[1.695952, -0.357637, -0.893065, -0.445251]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6, norm=norm).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
def test_transformerdecoder(self):
def get_a_test_layer(use_cuda, activation, batch_first=False):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
device = torch.device("cuda" if use_cuda else "cpu")
layer = nn.TransformerDecoderLayer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
batch_first=batch_first).to(device)
with torch.no_grad():
# set constant weights of the model
for idx, p in enumerate(layer.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
return layer
# this is a deterministic test for TransformerDecoder
for batch_first in (False, True):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
activation = F.relu
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
decoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,
batch_first=batch_first)
model = nn.TransformerDecoder(decoder_layer, 1).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[2.314351, 0.094805, -0.671322, 0.101977]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.422245, 0.051716, -0.606338, -0.024756]],
[[2.422245, 0.051716, -0.606338, -0.024756]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.343536, 0.085561, -0.654954, 0.074991]],
[[2.343536, 0.085561, -0.654954, 0.074991]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# key_padding_mask
key_padding_mask = torch.zeros(2, 3).to(device) == 1
result = model(decoder_input, memory_input,
tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# key_padding_mask
key_padding_mask[0, 2] = 1
key_padding_mask[1, 1] = 1
key_padding_mask[1, 2] = 1
result = model(decoder_input, memory_input,
tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430025, 0.027643, -0.601164, -0.073476],
[2.4323, 0.029375, -0.599553, -0.071881]],
[[2.428523, 0.026838, -0.602226, -0.07391],
[2.432634, 0.029842, -0.599318, -0.071253]],
[[2.432278, 0.028152, -0.599555, -0.074139],
[2.432659, 0.029244, -0.599294, -0.072382]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# memory_key_padding_mask
key_padding_mask = torch.zeros(2, 5).to(device) == 1
result = model(decoder_input, memory_input,
memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# memory_key_padding_mask
key_padding_mask[0, 4] = 1
key_padding_mask[1, 3] = 1
key_padding_mask[1, 4] = 1
result = model(decoder_input,
memory_input,
memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.429757, 0.027358, -0.601351, -0.073816],
[2.432692, 0.028583, -0.599263, -0.073634]],
[[2.428247, 0.02662, -0.602419, -0.074123],
[2.432657, 0.029055, -0.599293, -0.072732]],
[[2.431515, 0.027687, -0.600096, -0.074459],
[2.433075, 0.028543, -0.598987, -0.073985]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# multiple layers no norm
model = nn.TransformerDecoder(decoder_layer, 2).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[2.31316, 0.0950293, -0.671995, 0.102802]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# multiple layers no norm
model = nn.TransformerDecoder(decoder_layer, 6).to(device)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]],
[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]],
[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# multiple layers with norm
# d_model = 4
norm = nn.LayerNorm(4)
model = nn.TransformerDecoder(decoder_layer, 2, norm=norm).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[1.66166, -0.326986, -1.01466, -0.320017]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# multiple layers with norm
model = nn.TransformerDecoder(decoder_layer, 6, norm=norm).to(device)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]],
[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]],
[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# gelu activation test cases
activation = "gelu"
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
decoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,
batch_first=batch_first)
model = nn.TransformerDecoder(decoder_layer, 1).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.306435, 0.095946, -0.675796, 0.10687]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.415448, 0.054389, -0.610932, -0.0156613]],
[[2.415448, 0.054389, -0.610932, -0.0156613]]])).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.338531, 0.087709, -0.65776, 0.080646]],
[[2.338531, 0.087709, -0.65776, 0.080646]]])).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42049104, 0.03443088, -0.60793706, -0.05436271],
[2.42210631, 0.03546578, -0.60679895, -0.05357488]],
[[2.41907674, 0.0336104, -0.60892977, -0.05490462],
[2.42216881, 0.03586554, -0.6067524, -0.05289126]],
[[2.42205716, 0.03488046, -0.60683681, -0.05460596],
[2.42240309, 0.0354595, -0.60659063, -0.05378816]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
@unittest.skipIf(not (TEST_CUDNN and TEST_MULTIGPU), 'CUDNN or multi-gpu not available')
def test_cudnn_rnn_dropout_states_device(self):
rnn = nn.RNN(10, 20, num_layers=2, dropout=.5)
device = 1
input = torch.randn(5, 4, 10).cuda(device)
rnn.cuda(device)
hx = torch.randn(2, 4, 20).cuda(device)
output = rnn(input, hx)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_weight_format(self):
rnns = [
nn.LSTM(10, 20, batch_first=True),
nn.LSTM(10, 20, batch_first=True, proj_size=10),
nn.GRU(10, 20, batch_first=True),
nn.RNN(10, 20, batch_first=True)
]
first_warn = True
for rnn in rnns:
rnn.cuda()
input = torch.randn(5, 4, 10, requires_grad=True, device="cuda")
hx = torch.randn(1, 5, 20, requires_grad=True, device="cuda")
all_vars = [input, hx] + list(rnn.parameters())
if isinstance(rnn, nn.LSTM):
# LSTM with projections has different hx size
if rnn.proj_size > 0:
hx = torch.randn(1, 5, 10, requires_grad=True, device="cuda")
all_vars[1] = hx
cx = torch.randn(1, 5, 20, requires_grad=True, device="cuda")
all_vars[2:2] = [cx]
hx = (hx, cx)
output = rnn(input, hx)
output[0].sum().backward()
grads = [v.grad.data.clone() for v in all_vars]
for v in all_vars:
v.grad.data.zero_()
# Weights will no longer view onto the same chunk of memory
weight = all_vars[4]
weight_data = weight.data.clone()
with torch.no_grad():
weight.set_(weight_data)
for _ in range(2):
with warnings.catch_warnings(record=True) as w:
output_noncontig = rnn(input, hx)
if first_warn:
self.assertEqual(len(w), 1)
self.assertIn('weights are not part of single contiguous chunk of memory', w[0].message.args[0])
first_warn = False
warnings.resetwarnings()
output_noncontig[0].sum().backward()
grads_noncontig = [v.grad.data.clone() for v in all_vars]
for v in all_vars:
v.grad.data.zero_()
self.assertEqual(output, output_noncontig)
self.assertEqual(grads_noncontig, grads)
# Make sure these still share storage
weight_data[:] = 4
self.assertEqual(weight_data, all_vars[4].data)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_cudnn_weight_tying(self):
rnns = [
nn.LSTM(10, 20, batch_first=True, bidirectional=True),
nn.LSTM(10, 20, batch_first=True, bidirectional=True, proj_size=10),
nn.GRU(10, 20, batch_first=True, bidirectional=True),
nn.RNN(10, 20, batch_first=True, bidirectional=True)
]
for rnn in rnns:
rnn.bias_ih_l0_reverse = rnn.bias_ih_l0
rnn.cuda()
input = torch.randn(5, 4, 10, requires_grad=True, device="cuda")
hx = torch.randn(2, 5, 20, requires_grad=True, device="cuda")
all_vars = [input, hx] + list(rnn.parameters())
opt = torch.optim.SGD(rnn.parameters(), lr=0.1)
opt.zero_grad()
if isinstance(rnn, nn.LSTM):
# LSTM with projections has different hx size
if rnn.proj_size > 0:
hx = torch.randn(2, 5, 10, requires_grad=True, device="cuda")
all_vars[1] = hx
cx = torch.randn(2, 5, 20, requires_grad=True, device="cuda")
all_vars[2:2] = [cx]
hx = (hx, cx)
with warnings.catch_warnings(record=True) as w:
output = rnn(input, hx)
output[0].sum().backward()
opt.step()
with warnings.catch_warnings(record=True) as w:
output_cuda = rnn(input, hx)
rnn.cpu()
hx = (hx[0].cpu(), hx[1].cpu()) if isinstance(rnn, nn.LSTM) else hx.cpu()
output_cpu = rnn(input.cpu(), hx)
self.assertEqual(output_cuda, output_cpu)
def test_transformer_args_check(self):
model_name = 'Transformer'
d_model = 128
nhead = 4
num_encoder_layers = 2
num_decoder_layers = 3
dim_feedforward = 65
dropout = 0.3
bsz = 3
seq_len = 35
tgt_len = 15
activations = [F.relu, F.gelu]
wrong_bsz = 7
wrong_d_model = 63
wrong_nhead = 5
wrong_activation = "abc"
def test(encoder_input_shape, decoder_input_shape,
src_mask_len=None, tgt_mask_len=None, memory_mask_size=None,
src_key_padding_mask_size=None, tgt_key_padding_mask_size=None,
memory_key_padding_mask_size=None):
encoder_input = torch.randn(encoder_input_shape)
decoder_input = torch.randn(decoder_input_shape)
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers,
num_decoder_layers, dim_feedforward, dropout)
if src_mask_len is not None:
src_mask = model.generate_square_subsequent_mask(src_mask_len)
else:
src_mask = None
if tgt_mask_len is not None:
tgt_mask = model.generate_square_subsequent_mask(tgt_mask_len)
else:
tgt_mask = None
if memory_mask_size is not None:
memory_task = torch.rand(memory_mask_size)
else:
memory_task = None
if src_key_padding_mask_size is not None:
src_key_padding_mask = torch.rand(src_key_padding_mask_size) >= 0.5
else:
src_key_padding_mask = None
if tgt_key_padding_mask_size is not None:
tgt_key_padding_mask = torch.rand(tgt_key_padding_mask_size) >= 0.5
else:
tgt_key_padding_mask = None
if memory_key_padding_mask_size is not None:
memory_key_padding_mask = torch.rand(memory_key_padding_mask_size) >= 0.5
else:
memory_key_padding_mask = None
with self.assertRaises(RuntimeError):
model(encoder_input, decoder_input,
src_mask=src_mask,
tgt_mask=tgt_mask,
memory_mask=memory_task,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
correct_encoder_input_shape = (seq_len, bsz, d_model)
correct_decoder_input_shape = (tgt_len, bsz, d_model)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
# Incorrect encoder_input batch size
encoder_input_shape = update_shape(correct_encoder_input_shape, 1, wrong_bsz)
decoder_input_shape = correct_decoder_input_shape
test(encoder_input_shape, decoder_input_shape)
# Incorrect decoder_input batch size
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = update_shape(correct_decoder_input_shape, 1, wrong_bsz)
test(encoder_input_shape, decoder_input_shape)
# Incorrect encoder_input input size
encoder_input_shape = update_shape(correct_encoder_input_shape, 2, wrong_d_model)
decoder_input_shape = correct_decoder_input_shape
test(encoder_input_shape, decoder_input_shape)
# Incorrect decoder_input input size
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = update_shape(correct_decoder_input_shape, 2, wrong_d_model)
test(encoder_input_shape, decoder_input_shape)
# Incorrect nhead
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
model = getattr(nn, model_name)(d_model, wrong_nhead, num_encoder_layers,
num_decoder_layers, dim_feedforward, dropout)
# Incorrect src_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_src_mask_size = seq_len + 1
test(encoder_input_shape, decoder_input_shape, src_mask_len=wrong_src_mask_size)
# Incorrect tgt_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_tgt_mask_size = tgt_len + 1
test(encoder_input_shape, decoder_input_shape, tgt_mask_len=wrong_tgt_mask_size)
# Incorrect memory_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_tgt_mask_size = tgt_len + 1
test(encoder_input_shape, decoder_input_shape,
memory_mask_size=(wrong_tgt_mask_size, wrong_src_mask_size))
# Incorrect src_key_padding_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
src_key_padding_mask_size=(wrong_bsz, wrong_src_mask_size))
# Incorrect tgt_key_padding_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
tgt_key_padding_mask_size=(wrong_bsz, wrong_tgt_mask_size))
# Incorrect memory_key_padding_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
memory_key_padding_mask_size=(wrong_bsz, wrong_src_mask_size))
# Correct activations
for activation in activations:
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, activation)
# Incorrect activation
with self.assertRaises(RuntimeError):
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, wrong_activation)
def test_transformer_layer_args_check(self):
model_names = ['TransformerEncoderLayer', 'TransformerDecoderLayer']
d_model = 128
nhead = 4
dim_feedforward = 65
dropout = 0.3
bsz = 3
seq_len = 35
tgt_len = 15
activations = [F.relu, F.gelu]
wrong_activation = "abc"
encoder_input_shape = (seq_len, bsz, d_model)
decoder_input_shape = (tgt_len, bsz, d_model)
encoder_input = torch.randn(encoder_input_shape)
decoder_input = torch.randn(decoder_input_shape)
for model_name in model_names:
for activation in activations:
model = getattr(nn, model_name)(d_model, nhead, dim_feedforward,
dropout, activation)
# Incorrect activation
for model_name in model_names:
with self.assertRaises(RuntimeError):
model = getattr(nn, model_name)(d_model, nhead, dim_feedforward,
dropout, wrong_activation)
def test_rnn_args_check(self):
input_size = 3
hidden_size = 5
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
bad_size = 7 # prime number so that no size can divide it.
def test(input_shape, hidden_shape, mode):
for input, hidden in get_inputs(input_shape, hidden_shape, mode):
model = getattr(nn, mode)(input_size, hidden_size, num_layers)
self.assertRaises(RuntimeError, lambda: model(input, hidden))
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_shape = (num_layers * num_directions, batch_size, hidden_size)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
def get_inputs(input_shape, hidden_shape, mode):
'''returns list( tuple(input, hidden) )
where input, hidden are inputs to a model'''
input = torch.randn(input_shape)
hidden = torch.randn(hidden_shape)
if mode != 'LSTM':
return [(input, hidden)]
if hidden_shape == correct_hidden_shape:
return [(input, (hidden, hidden))]
good_hidden = torch.randn(correct_hidden_shape)
return [
(input, (hidden, good_hidden)),
(input, (good_hidden, hidden)),
]
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
# Incorrect input batch size
input_shape = update_shape(correct_input_shape, 1, bad_size)
hidden_shape = correct_hidden_shape
test(input_shape, hidden_shape, mode)
# Incorrect hidden batch size
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 1, bad_size)
test(input_shape, hidden_shape, mode)
# Incorrect input size
input_shape = update_shape(correct_input_shape, 2, bad_size)
hidden_shape = correct_hidden_shape
test(input_shape, hidden_shape, mode)
# Incorrect hidden size
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 2, bad_size)
test(input_shape, hidden_shape, mode)
# Incorrect hidden[0]
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 0, bad_size)
test(input_shape, hidden_shape, mode)
def test_projections_lstm_args_check(self):
input_size = 3
hidden_size = 5
proj_size = 2
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
bad_size = 7 # prime number so that no size can divide it.
def test(input_shape, hidden_h_shape, hidden_c_shape):
for input, hidden in get_inputs(input_shape, hidden_h_shape, hidden_c_shape):
model = nn.LSTM(input_size, hidden_size, num_layers, proj_size=proj_size)
self.assertRaises(RuntimeError, lambda: model(input, hidden))
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_h_shape = (num_layers * num_directions, batch_size, proj_size)
correct_hidden_c_shape = (num_layers * num_directions, batch_size, hidden_size)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
def get_inputs(input_shape, hidden_h_shape, hidden_c_shape):
'''returns list( tuple(input, hidden) )
where input, hidden are inputs to a model'''
input = torch.randn(input_shape)
hidden_h = torch.randn(hidden_h_shape)
hidden_c = torch.randn(hidden_c_shape)
return [(input, (hidden_h, hidden_c))]
# Incorrect input batch size
input_shape = update_shape(correct_input_shape, 1, bad_size)
test(input_shape, correct_hidden_h_shape, correct_hidden_c_shape)
# Incorrect hidden batch size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 1, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 1, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect input size
input_shape = update_shape(correct_input_shape, 2, bad_size)
test(input_shape, correct_hidden_h_shape, correct_hidden_c_shape)
# Incorrect hidden size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 2, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 2, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect hidden[0]
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 0, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect proj size = hidden size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, hidden_size)
hidden_c_shape = correct_hidden_c_shape
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect proj size != hidden size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, bad_size)
hidden_c_shape = correct_hidden_c_shape
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect cell size != hidden size
input_shape = correct_input_shape
hidden_h_shape = correct_hidden_h_shape
hidden_c_shape = update_shape(correct_hidden_c_shape, 0, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_rnn_check_device(self):
input_size = 3
hidden_size = 5
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_shape = (num_layers * num_directions, batch_size, hidden_size)
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
model = getattr(nn, mode)(input_size, hidden_size, num_layers)
input = torch.randn(correct_input_shape)
hidden = torch.randn(correct_hidden_shape)
# input and weights are not at the same device
with self.assertRaisesRegex(RuntimeError,
"Input and parameter tensors are not at the same device"):
model(input.to('cuda:0'))
# input and hiddens are not at the same device
with self.assertRaisesRegex(RuntimeError,
r"Input and hidden tensors are not at the same device"):
if mode == 'LSTM':
model(input, (hidden.to('cuda:0'), hidden.to('cuda:0')))
else:
model(input, (hidden.to('cuda:0')))
# hidden tensors are not at the same CUDA device
if mode == 'LSTM':
with self.assertRaisesRegex(RuntimeError,
"Input and hidden tensors are not at the same device"):
model(input.to('cuda:0'), (hidden.to('cuda:0'), hidden.to('cuda:1')))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_projections_lstm_check_device(self):
input_size = 3
hidden_size = 5
proj_size = 2
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_h_shape = (num_layers * num_directions, batch_size, proj_size)
correct_hidden_c_shape = (num_layers * num_directions, batch_size, hidden_size)
model = nn.LSTM(input_size, hidden_size, num_layers, proj_size=proj_size)
input = torch.randn(correct_input_shape)
hidden_h = torch.randn(correct_hidden_h_shape)
hidden_c = torch.randn(correct_hidden_c_shape)
# input and weights are not at the same device
with self.assertRaisesRegex(RuntimeError,
"Input and parameter tensors are not at the same device"):
model(input.to('cuda:0'))
# input and hiddens are not at the same device
with self.assertRaisesRegex(RuntimeError,
r"Input and hidden tensors are not at the same device"):
model(input, (hidden_h.to('cuda:0'), hidden_c.to('cuda:0')))
# hidden tensors are not at the same CUDA device
with self.assertRaisesRegex(RuntimeError,
"Input and hidden tensors are not at the same device"):
model(input.to('cuda:0'), (hidden_h.to('cuda:0'), hidden_c.to('cuda:1')))
def test_rnn_initial_hidden_state(self):
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
rnn = getattr(nn, mode)(30, 20, 2)
input = torch.randn(10, 32, 30)
hidden = torch.zeros(2, 32, 20)
if mode == 'LSTM':
hidden = (hidden, hidden)
output1, hidden1 = rnn(input, hidden)
output2, hidden2 = rnn(input)
self.assertEqual(output1, output2)
self.assertEqual(hidden1, hidden2)
def test_projections_lstm_initial_hidden_state(self):
for bidir in [False, True]:
rnn = nn.LSTM(30, 20, 2, bidirectional=bidir, proj_size=10)
num_dirs = 2 if bidir else 1
input = torch.randn(10, 32, 30)
hidden_h = torch.zeros(2 * num_dirs, 32, 10)
hidden_c = torch.zeros(2 * num_dirs, 32, 20)
hidden = (hidden_h, hidden_c)
output1, hidden1 = rnn(input, hidden)
output2, hidden2 = rnn(input)
self.assertEqual(output1, output2)
self.assertEqual(hidden1, hidden2)
def test_projections_errors_on_gru_and_rnn(self):
error_msg = "proj_size argument is only supported for LSTM, not RNN or GRU"
for mode in ['RNN', 'GRU']:
with self.assertRaisesRegex(ValueError, error_msg):
rnn = getattr(nn, mode)(30, 20, 2, proj_size=10)
def _test_RNN_cpu_vs_cudnn(self, dropout, dtype=torch.double):
def forward_backward(cuda, rnn, input_val, grad_output, weights_val, hx_val, grad_hy,
cx_val=None, grad_cy=None):
is_lstm = isinstance(rnn, nn.LSTM)
for x_layer, y_layer in zip(rnn.all_weights, weights_val):
for x, y in zip(x_layer, y_layer):
x.data.copy_(y.data)
if isinstance(input_val, rnn_utils.PackedSequence):
input = rnn_utils.PackedSequence(
input_val.data.data.requires_grad_(True), input_val.batch_sizes)
input_var = input.data
else:
input = input_val.clone().requires_grad_(True)
input_var = input
if is_lstm:
if cx_val is None:
hx = (hx_val.clone().requires_grad_(True),
hx_val.add(1).requires_grad_(True))
else:
hx = (hx_val.clone().requires_grad_(True),
cx_val.add(1).requires_grad_(True))
else:
hx = hx_val.clone().requires_grad_(True)
if cuda:
rnn.cuda()
input_var.data = input_var.data.cuda()
if is_lstm:
hx[0].data = hx[0].data.cuda()
hx[1].data = hx[1].data.cuda()
else:
hx.data = hx.data.cuda()
grad_hy = grad_hy.cuda()
if grad_cy is not None:
grad_cy = grad_cy.cuda()
grad_output = grad_output.cuda()
output, hy = rnn(input, hx)
if isinstance(output, rnn_utils.PackedSequence):
output = output.data
if is_lstm:
if grad_cy is None:
torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_hy + 1])
else:
torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_cy + 1])
else:
torch.autograd.backward([output, hy], [grad_output, grad_hy])
return {'output': output.data,
'hy': hy[0].data if is_lstm else hy.data,
'weights': rnn.all_weights,
'grad_input': input_var.grad.data,
'grad_hx': hx[0].grad.data if is_lstm else hx.grad.data,
'cy': hy[1].data if is_lstm else None,
'grad_cx': hx[1].grad.data if is_lstm else None}
input_size = 10
hidden_size = 6
proj_size = 3
num_layers = 2
seq_length = 7
batch = 6
def make_noncontig(tensor):
ndim = tensor.dim()
return torch.stack([tensor.clone().zero_(), tensor], ndim).select(ndim, 1)
def compare_cpu_gpu(outputs_cpu, outputs_gpu):
self.assertEqual(list(outputs_cpu.keys()), list(outputs_gpu.keys()))
for key in outputs_cpu.keys():
if key != 'weights':
self.assertEqual(outputs_cpu[key], outputs_gpu[key], atol=5e-5, rtol=0, msg=key)
# check grad weights separately, as nested dict
for cpu_layer_weight, gpu_layer_weight in zip(outputs_cpu['weights'], outputs_gpu['weights']):
for (cpu_weight, gpu_weight) in zip(cpu_layer_weight, gpu_layer_weight):
self.assertEqual(cpu_weight.grad.data, gpu_weight.grad.data, atol=5e-5, rtol=0)
for module in (nn.RNN, nn.LSTM, nn.GRU):
for bias, bidirectional, batch_first, contig, variable_len, lens_as_tensor \
in product((True, False), repeat=6):
num_directions = 2 if bidirectional else 1
if batch_first:
input_val = torch.randn(batch, seq_length, input_size, dtype=dtype)
grad_output = torch.randn(batch, seq_length, hidden_size * num_directions, dtype=dtype)
else:
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, hidden_size * num_directions, dtype=dtype)
hx_val = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
if not contig:
grad_output = make_noncontig(grad_output)
grad_hy = make_noncontig(grad_hy)
input_var = make_noncontig(input_val)
hx_val = make_noncontig(hx_val)
if variable_len:
lengths = [7, 5, 5, 2, 1, 1]
if lens_as_tensor:
lengths = torch.tensor(lengths, dtype=torch.long)
input_val = rnn_utils.pack_padded_sequence(input_val, lengths, batch_first=batch_first)
grad_output = rnn_utils.pack_padded_sequence(grad_output, lengths, batch_first=batch_first).data
rnn = module(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first).to(dtype)
outputs_cpu = forward_backward(
False, rnn, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
rnn_gpu = module(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first).to(dtype)
outputs_gpu = forward_backward(
True, rnn_gpu, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
for nonlinearity in ('tanh', 'relu'):
hx_val = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(
seq_length, batch, hidden_size * num_directions, dtype=dtype)
grad_hy = torch.randn(
num_layers * num_directions, batch, hidden_size, dtype=dtype)
rnn = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity).to(dtype)
outputs_cpu = forward_backward(False, rnn, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
rnn_gpu = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity).to(dtype)
outputs_gpu = forward_backward(True, rnn_gpu, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
# checking LSTM with projections
for bias, bidirectional, batch_first, contig, variable_len, lens_as_tensor \
in product((True, False), repeat=6):
num_directions = 2 if bidirectional else 1
if batch_first:
input_val = torch.randn(batch, seq_length, input_size, dtype=dtype)
grad_output = torch.randn(batch, seq_length, proj_size * num_directions, dtype=dtype)
else:
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, proj_size * num_directions, dtype=dtype)
hx_val = torch.randn(num_layers * num_directions, batch, proj_size, dtype=dtype)
cx_val = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers * num_directions, batch, proj_size, dtype=dtype)
grad_cy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
if not contig:
grad_output = make_noncontig(grad_output)
grad_hy = make_noncontig(grad_hy)
grad_cy = make_noncontig(grad_cy)
input_var = make_noncontig(input_val)
hx_val = make_noncontig(hx_val)
cx_val = make_noncontig(cx_val)
if variable_len:
lengths = [7, 5, 5, 2, 1, 1]
if lens_as_tensor:
lengths = torch.tensor(lengths, dtype=torch.long)
input_val = rnn_utils.pack_padded_sequence(input_val, lengths, batch_first=batch_first)
grad_output = rnn_utils.pack_padded_sequence(grad_output, lengths, batch_first=batch_first).data
rnn = nn.LSTM(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first,
proj_size=proj_size).to(dtype)
outputs_cpu = forward_backward(
False, rnn, input_val, grad_output, rnn.all_weights,
hx_val, grad_hy, cx_val, grad_cy)
rnn_gpu = nn.LSTM(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first,
proj_size=proj_size).to(dtype)
outputs_gpu = forward_backward(
True, rnn_gpu, input_val, grad_output, rnn.all_weights,
hx_val, grad_hy, cx_val, grad_cy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_RNN_cpu_vs_cudnn_no_dropout(self):
dtype = torch.double
self._test_RNN_cpu_vs_cudnn(0, dtype)
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_cpu_vs_cudnn_with_dropout(self):
# Because of dropout randomness, can only compare dropout=0 and dropout=1
self._test_RNN_cpu_vs_cudnn(1)
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_RNN_cudnn_weight_norm(self):
input_size = 10
hidden_size = 6
num_layers = 2
seq_length = 7
batch = 6
# runs on CPU to acquire expected output
def check_weight_norm(m, name):
input = torch.randn(seq_length, batch, input_size)
expected_output = m(input)
# adds weight normalization
m = torch.nn.utils.weight_norm(m, name=name)
# moves to CUDA
m = m.cuda()
input = input.cuda()
# otherwise, subsequent warnings will be hidden, and further tests rely on them
warnings.simplefilter("always")
self.assertEqual(m(input), expected_output)
# remove weight norm
m = torch.nn.utils.remove_weight_norm(m, name=name)
self.assertEqual(m(input), expected_output)
check_weight_norm(nn.LSTM(input_size, hidden_size, num_layers), 'weight_hh_l0')
check_weight_norm(nn.LSTM(input_size, hidden_size, num_layers, proj_size=3), 'weight_hr_l0')
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_partial_flat_weights(self):
input_size = 10
hidden_size = 6
num_layers = 2
m = nn.LSTM(input_size, hidden_size, num_layers)
inp = torch.randn(3, 2, 10)
out_expected = m(inp)
# deletes an attribute of original LSTM
weight_orig = m.weight_hh_l0
del m.weight_hh_l0
self.assertFalse(hasattr(m, "weight_hh_l0"))
# verifies that moving to CUDA with only some attributes defined
# does not throw an error
m.cuda()
# recompute the weight and make sure that module can be used
m.weight_hh_l0 = weight_orig.cuda()
inp = inp.cuda()
# otherwise, subsequent warnings will be hidden, and further tests rely on them
warnings.simplefilter("always")
self.assertEqual(m(inp)[0].cpu(), out_expected[0])
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_dropout(self):
# checking the assumption that cuDNN sticks dropout in between
# RNN layers
for p in (0, 0.276, 0.731, 1):
for train in (True, False):
for cuda in (True, False):
rnn = nn.RNN(10, 1000, 2, bias=False, dropout=p, nonlinearity='relu')
if cuda:
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
rnn.weight_ih_l0.data.fill_(1)
rnn.weight_hh_l0.data.fill_(1)
rnn.weight_ih_l1.data.fill_(1)
rnn.weight_hh_l1.data.fill_(1)
input = torch.ones(1, 1, 10)
hx = torch.zeros(2, 1, 1000)
if cuda:
input = input.cuda()
hx = hx.cuda()
output, hy = rnn(input, hx)
self.assertEqual(output.data.min(), output.data.max())
output_val = output.data[0][0][0]
if p == 0 or not train:
self.assertEqual(output_val, 10000)
elif p == 1:
self.assertEqual(output_val, 0)
else:
self.assertGreater(output_val, 8000)
self.assertLess(output_val, 12000)
denorm_mod = (output_val * (1 - p)) % 10
self.assertLess(min(denorm_mod, 10 - denorm_mod), 1e-2)
self.assertEqual(hy[0].data.min(), hy[0].data.max())
self.assertEqual(hy[1].data.min(), hy[1].data.max())
self.assertEqual(hy.data[0][0][0], 10)
self.assertEqual(hy.data[1][0][0], output_val)
def test_error_RNN_seq_len_zero(self):
# checking error message when RNN has seq_len = 0
for module in (nn.RNN, nn.LSTM, nn.GRU):
for bidirectional in [True, False]:
for device in get_all_device_types():
input = torch.ones(0, 10, 5)
rnn = module(5, 6, bidirectional=bidirectional)
if device == 'cuda':
rnn.cuda()
input = input.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected sequence length to be larger than 0 in RNN"):
rnn(input)
def test_RNN_input_size_zero(self):
for module in (nn.RNN, nn.LSTM, nn.GRU):
for device in get_all_device_types():
input = torch.zeros((5, 0, 3))
rnn = module(input_size=3, hidden_size=4)
if device == 'cuda':
rnn.cuda()
input = input.cuda()
outs = rnn(input)
self.assertEqual(outs[0].shape, torch.Size([5, 0, 4]))
# Check that backward does not cause a hard error
outs[0].sum().backward()
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_dropout_state(self):
for p in (0, 0.1234):
for train in (True, False):
for cuda in (True, False):
rnn = nn.RNN(100, 100, 2, bias=False, dropout=p, nonlinearity='relu')
if cuda:
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
input = torch.rand(1, 1, 100)
hx = torch.rand(2, 1, 100)
if cuda:
input = input.cuda()
hx = hx.cuda()
output1, hy1 = rnn(input, hx)
output2, hy2 = rnn(input, hx)
buf = io.BytesIO()
rnn_pickle = torch.save(rnn, buf)
buf.seek(0)
rnn2 = torch.load(buf)
rnn2.flatten_parameters()
output3, hy3 = rnn2(input, hx)
if p == 0 or not train:
self.assertEqual(output1, output2)
self.assertEqual(output1, output3)
self.assertEqual(hy1, hy2)
self.assertEqual(hy1, hy3)
else:
self.assertNotEqual(output1, output2)
self.assertNotEqual(output1, output3)
self.assertNotEqual(hy1, hy2)
self.assertNotEqual(hy1, hy3)
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_change_dropout(self):
for train, cuda in product((True, False), repeat=2):
rnn = nn.RNN(100, 100, 2, dropout=0, nonlinearity='relu')
input = torch.rand(3, 2, 100)
if cuda:
input.data = input.data.cuda()
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
prev_output = None
for p in (0, 0.5, 0, 0.7, 0.2, 1, 0.2, 0):
rnn.dropout = p
output1, hy1 = rnn(input)
output2, hy2 = rnn(input)
if p == 0 or p == 1 or not train:
self.assertEqual(output1, output2)
self.assertEqual(hy1, hy2)
else:
self.assertNotEqual(output1, output2)
self.assertNotEqual(hy1, hy2)
if prev_output is not None:
if not train:
self.assertEqual(output1.data, prev_output)
self.assertEqual(output2.data, prev_output)
else:
self.assertNotEqual(output1.data, prev_output)
self.assertNotEqual(output2.data, prev_output)
prev_output = output1.data
def test_inplace_thnn(self):
modules = [nn.ReLU, nn.ELU, nn.SELU, nn.CELU, nn.RReLU]
for mod in modules:
r = mod(inplace=True)
input = torch.randn(5, 5, requires_grad=True)
output = r(input + 0)
grad_output = torch.randn(5, 5)
grad_output_clone = grad_output.clone()
output.backward(grad_output)
self.assertEqual(grad_output, grad_output_clone)
def test_pixel_shuffle_unshuffle(self):
def _test_pixel_shuffle_unshuffle_helper(num_input_dims, valid_channels_dim=True,
upscale_factor=None):
# Function to imperatively ensure pixels are shuffled to the correct locations.
# Used to validate the batch operations in pixel_shuffle.
def _verify_pixel_shuffle(input, output, upscale_factor):
for c in range(output.size(-3)):
for h in range(output.size(-2)):
for w in range(output.size(-1)):
height_idx = h // upscale_factor
weight_idx = w // upscale_factor
channel_idx = (upscale_factor * (h % upscale_factor)) + (w % upscale_factor) + \
(c * upscale_factor ** 2)
self.assertEqual(output[..., c, h, w], input[..., channel_idx, height_idx, weight_idx])
upscale_factor = random.randint(2, 5) if upscale_factor is None else upscale_factor
# If valid_channels_dim=False, add 1 to make channels dim indivisible by upscale_factor ** 2.
channels = random.randint(1, 4) * upscale_factor ** 2 + (0 if valid_channels_dim else 1)
height = random.randint(5, 10)
width = random.randint(5, 10)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True)
elif num_input_dims == 2:
input = torch.rand(height, width, requires_grad=True)
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(*batch_sizes, channels, height, width, requires_grad=True)
ps = nn.PixelShuffle(upscale_factor)
pus = nn.PixelUnshuffle(downscale_factor=upscale_factor)
if num_input_dims >= 3 and valid_channels_dim and upscale_factor > 0:
output = ps(input)
_verify_pixel_shuffle(input, output, upscale_factor)
output.backward(output.data)
self.assertEqual(input.data, input.grad.data)
# Ensure unshuffle properly inverts shuffle.
unshuffle_output = pus(output)
self.assertEqual(input, unshuffle_output)
else:
self.assertRaises(RuntimeError, lambda: ps(input))
def _test_pixel_unshuffle_error_case_helper(num_input_dims, valid_height_dim=True, valid_width_dim=True,
downscale_factor=None):
downscale_factor = random.randint(2, 5) if downscale_factor is None else downscale_factor
channels = random.randint(1, 4)
# If valid_height_dim=False, add 1 to make height dim indivisible by downscale_factor.
height = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_height_dim else 1)
# If valid_width_dim=False, add 1 to make width dim indivisible by downscale_factor.
width = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_width_dim else 1)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True)
elif num_input_dims == 2:
input = torch.rand(height, width, requires_grad=True)
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(*batch_sizes, channels, height, width, requires_grad=True)
pus = nn.PixelUnshuffle(downscale_factor)
self.assertRaises(RuntimeError, lambda: pus(input))
def _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims):
# For 1D - 2D, this is an error case.
# For 3D - 5D, this is a success case for pixel_shuffle + pixel_unshuffle.
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims)
# Error cases for pixel_shuffle.
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, valid_channels_dim=False)
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, upscale_factor=0)
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, upscale_factor=-2)
# Error cases for pixel_unshuffle.
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_height_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_width_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=0)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=-2)
def test_pixel_shuffle_unshuffle_1D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=1)
def test_pixel_shuffle_unshuffle_2D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=2)
def test_pixel_shuffle_unshuffle_3D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=3)
def test_pixel_shuffle_unshuffle_4D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=4)
def test_pixel_shuffle_unshuffle_5D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=5)
test_pixel_shuffle_unshuffle_1D()
test_pixel_shuffle_unshuffle_2D()
test_pixel_shuffle_unshuffle_3D()
test_pixel_shuffle_unshuffle_4D()
test_pixel_shuffle_unshuffle_5D()
# These tests should be OpInfo'd
def test_elu_inplace_on_view(self):
v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)
def func(root):
x = root.clone()
view = x.narrow(0, 1, 2)
res = F.elu(view, inplace=True)
self.assertIs(res, view)
return x
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_elu_inplace_gradgrad(self):
v = torch.randn(8, requires_grad=True)
def func(root):
x = root.clone()
return F.elu(x, inplace=True)
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_relu_inplace_on_view(self):
v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)
def func(root):
x = root.clone()
view = x.narrow(0, 1, 2)
res = F.relu(view, inplace=True)
self.assertIs(res, view)
return x
gradcheck(func, [v])
gradgradcheck(func, [v])
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_PReLU_backward_requires_grad_false(self):
m = nn.PReLU().to('cuda')
x = torch.randn(2, 3, 4, 5, requires_grad=False, device='cuda')
y = m(x)
y.mean().backward()
self.assertEqual(x.grad, None)
@unittest.skipIf(
not TEST_NUMPY or not TEST_SCIPY, "Numpy or Scipy not found")
def test_gelu(self):
def _test_gelu(n, m, dtype, contiguous, atol=None, rtol=None):
numpy_dtype = {
torch.bfloat16: torch.float, torch.float: torch.float, torch.double: torch.double
}[dtype]
devices = ['cpu']
devices += ['cuda'] if TEST_CUDA else []
def _gelu_ref(X):
return X * stats.norm.cdf(X)
for d in devices:
if contiguous:
X = torch.rand(n, m, dtype=dtype, requires_grad=True, device=d)
else:
X = torch.rand(n, m, dtype=dtype, requires_grad=True, device=d)[:, ::2]
res = F.gelu(X)
ref = _gelu_ref(X.to(numpy_dtype).cpu().detach().numpy())
self.assertEqual(res, ref, rtol=rtol, atol=atol, exact_dtype=False)
if dtype == torch.float64:
gradcheck(F.gelu, [X], eps=1e-4)
for n in range(1, 10):
for m in range(1, 10):
_test_gelu(n, m, torch.bfloat16, True, 1e-2, 0)
_test_gelu(n, m, torch.bfloat16, False, 1e-2, 0)
_test_gelu(n, m, torch.float32, True)
_test_gelu(n, m, torch.float32, False)
_test_gelu(n, m, torch.float64, True)
_test_gelu(n, m, torch.float64, False)
# Test multi threaded
num_threads = torch.get_num_threads()
torch.set_num_threads(4)
try:
_test_gelu(32, 32, torch.float32, False)
finally:
torch.set_num_threads(num_threads)
def test_bce_loss_always_nonnegative(self):
target = torch.ones(5)
input = torch.ones(5)
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
target = torch.zeros(5)
input = torch.zeros(5)
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
def test_bce_with_logits_raises_if_target_and_input_are_different_size(self):
target = torch.rand(5)
input = torch.rand(5, 1)
with self.assertRaises(ValueError):
nn.BCEWithLogitsLoss()(input, target)
target = torch.rand(5, 1)
input = torch.rand(5)
with self.assertRaises(ValueError):
nn.BCEWithLogitsLoss()(input, target)
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss(self):
sigmoid = nn.Sigmoid()
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))
weight = torch.rand(4)
self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
target = torch.zeros(4, 1, dtype=torch.float)
output = torch.empty(4, 1, dtype=torch.float).fill_(-100)
self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))
self.assertEqual(nn.BCEWithLogitsLoss(reduction='none')(output, target),
nn.BCELoss(reduction='none')(sigmoid(output), target))
weight = torch.rand(1, dtype=torch.float)
self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
def test_bce_loss_input_range(self):
bceloss = nn.BCELoss()
target = torch.rand(25, 25)
output_valid = torch.rand(25, 25)
output_too_negative = output_valid - 1.0
output_too_positive = output_valid + 1.0
loss_valid = bceloss(output_valid, target)
with self.assertRaisesRegex(RuntimeError, 'between 0 and 1'):
loss_too_negative = bceloss(output_too_negative, target)
with self.assertRaisesRegex(RuntimeError, 'between 0 and 1'):
loss_too_positive = bceloss(output_too_positive, target)
def test_bce_loss_size_mismatch(self):
bceloss = nn.BCELoss()
a = torch.rand(25)
b = torch.rand(25, 1)
with self.assertRaisesRegex(ValueError, r'Using a target size \('):
bceloss(a, b)
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss_large_tensors_with_grad(self):
x_size = 1024
y_size = 256
target = torch.rand(x_size, y_size)
for reduction in ['none', 'mean', 'sum']:
output_sig = torch.rand(x_size, y_size) - 0.5
output_logits = output_sig.clone().detach()
output_sig.requires_grad = True
output_logits.requires_grad = True
weight = torch.rand(y_size)
loss_sig = nn.BCELoss(weight, reduction=reduction)(
torch.sigmoid(output_sig), target
)
loss_logits = nn.BCEWithLogitsLoss(weight, reduction=reduction)(
output_logits, target
)
self.assertEqual(loss_logits, loss_sig)
if reduction == 'none':
grad = torch.rand(x_size, y_size)
loss_sig.backward(grad)
loss_logits.backward(grad)
else:
loss_sig.backward()
loss_logits.backward()
self.assertEqual(output_sig.grad, output_logits.grad)
def test_bce_with_logits_has_correct_forward_grad(self):
output = torch.randn(3, 5, requires_grad=True)
target = torch.randn(3, 5)
for reduction in ('sum', 'mean', 'none'):
gradcheck(lambda self, target: nn.BCEWithLogitsLoss(reduction=reduction)(self, target),
(output, target), check_forward_ad=True)
def test_bce_with_logits_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True)
target = torch.zeros(3, 1)
nn.BCEWithLogitsLoss(reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1).fill_(0.5)
self.assertEqual(output.grad, expected_grad)
def test_bce_with_logits_broadcasts_weights(self):
target = torch.rand(16, 4)
output = torch.rand(16, 4) - 0.5
weight = torch.rand(4)
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1)
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
def test_bce_with_logits_ones_in_pos_weights_are_the_same_as_none(self):
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
pos_weight = torch.ones(64, 4)
self.assertEqual(nn.BCEWithLogitsLoss()(output, target),
nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target))
def test_bce_with_logits_broadcasts_pos_weights(self):
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
pos_weight = torch.rand(4)
out1 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
pos_weight1 = pos_weight.expand(1, 4)
out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight1)(output, target)
pos_weight2 = pos_weight.expand(64, 4)
out3 = nn.BCEWithLogitsLoss(pos_weight=pos_weight2)(output, target)
self.assertEqual(out1, out2)
self.assertEqual(out1, out3)
def test_bce_with_logits_with_pos_weight_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True)
target = torch.zeros(3, 1)
pos_weight = torch.ones(3, 1)
nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1).fill_(0.5)
grad = output.grad
self.assertEqual(grad, expected_grad)
def test_bce_with_logits_stability(self):
output = torch.tensor([0., -120.])
target = torch.tensor([0., 1.])
pos_weight = torch.tensor([1., 1.])
out1 = nn.BCEWithLogitsLoss()(output, target)
self.assertTrue(torch.isfinite(out1).all().item())
out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
self.assertTrue(torch.isfinite(out2).all().item())
def test_bce_loss_broadcasts_weights(self):
sigmoid = nn.Sigmoid()
target = torch.rand(16, 4)
output = torch.rand(16, 4) - 0.5
weight = torch.rand(4)
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1)
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
def test_hardtanh_inplace_gradgrad(self):
v = torch.randn(8, requires_grad=True)
def func(root):
x = root.clone()
return F.hardtanh(x, inplace=True)
gradcheck(func, [v])
gradgradcheck(func, [v])
# test hardtanh backward froo large tensor
def test_hardtanh_backward(self):
x = torch.randn(128, 10000, requires_grad=True)
grad = torch.randn(128, 10000)
z = torch.zeros(128, 10000)
y = F.hardtanh(x)
y.backward(grad)
# ref backward path for hardtanh
mask = (x > -1) & (x < 1)
x_grad_ref = torch.where(mask, grad, z)
self.assertEqual(x.grad, x_grad_ref)
def test_batchnorm_nhwc_cpu(self):
def helper(self, size):
channels = size[1]
input = torch.randn(size, dtype=torch.float32, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last)
input.retain_grad()
grad = torch.randn(size, dtype=torch.float32, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().float()
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().float()
ref_bn.load_state_dict(bn.state_dict())
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
helper(self, (4, 8, 10, 10))
helper(self, (4, 1, 9, 9))
helper(self, (4, 9, 1, 1))
def test_batchnorm_non_contig_cpu(self):
input = torch.arange(6, dtype=torch.float).reshape(1, 3, 2, 1).cpu()
input = input.permute(0, 2, 1, 3)
bn = torch.nn.BatchNorm2d(2).cpu().float().eval()
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(2).cpu().float().eval()
ref_bn.load_state_dict(bn.state_dict())
out = bn(input)
ref_out = ref_bn(ref_input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
@skipIfRocm
def test_batchnorm_cudnn_nhwc(self):
def run_test(input, grad_output):
c = input.size(1)
mod = nn.BatchNorm2d(c).cuda().float()
mod.weight.data.uniform_()
mod.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_mod = nn.BatchNorm2d(c).cuda().float()
ref_mod.load_state_dict(mod.state_dict())
out = mod(input)
out.backward(grad_output)
ref_out = ref_mod(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(mod.weight.grad, ref_mod.weight.grad)
self.assertEqual(mod.bias.grad, ref_mod.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
input = torch.randint(1, 10, (4, 8, 2, 2), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).detach().requires_grad_()
grad = torch.randint(1, 10, (4, 8, 2, 2), dtype=torch.float32, device="cuda")
grad = grad.contiguous(memory_format=torch.channels_last)
run_test(input, grad)
# see #42588, grad is channels_last contiguous, but grad.suggest_memory_format (rightly) return "contiguous"
# not channels_last
input = torch.randint(1, 10, (2, 8, 8, 1), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).detach().requires_grad_()
grad = torch.randint(1, 10, (2, 8, 8, 1), dtype=torch.float32, device="cuda")
grad = grad.permute(0, 2, 1, 3)
run_test(input, grad)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_cudnn_half(self):
# THNN
input = torch.randint(1, 10, (2, 3, 2, 2), dtype=torch.half, device="cuda", requires_grad=True)
m = nn.BatchNorm2d(3).half().cuda()
thnn_output = m(input)
thnn_output.sum().backward()
thnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(thnn_output, input)
# cuDNN
if TEST_CUDNN:
input.grad = None
m = m.float()
cudnn_output = m(input)
cudnn_output.sum().backward()
cudnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(cudnn_output, input)
self.assertEqual(cudnn_output, thnn_output)
self.assertEqual(cudnn_input_grad, thnn_input_grad, atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_nonaffine_cuda_half_input(self):
input = torch.randn(16, 3, 24, 24, dtype=torch.half, device="cuda")
m = nn.BatchNorm2d(3, affine=False).cuda().float() # keep running stats in FP32
output = m(input)
self.assertEqualTypeString(output, input)
m.eval()
output = m(input)
self.assertEqualTypeString(output, input)
def test_batchnorm_raises_error_if_less_than_one_value_per_channel(self):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.BatchNorm1d(10)(x)
def test_batchnorm_raises_error_if_running_mean_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, torch.rand(size), running_var)
def test_batchnorm_raises_error_if_running_var_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, torch.rand(size))
def test_batchnorm_raises_error_if_weight_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, running_var, weight=Parameter(torch.rand(size)))
def test_batchnorm_raises_error_if_bias_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, running_var, bias=Parameter(torch.rand(size)))
def test_batchnorm_buffer_update_when_stats_are_not_tracked(self):
input_size = (32, 4)
# Instantiate BN with buffers that are not None
bn = nn.BatchNorm1d(input_size[1], track_running_stats=True)
# Use buffers for normalization but don't update them
bn.track_running_stats = False
# Store initial values
num_batches = bn.num_batches_tracked.clone()
running_mean = bn.running_mean.clone()
running_var = bn.running_var.clone()
# Forward random tensor
_ = bn(torch.rand(input_size))
# Ensure none of the buffers has been updated
self.assertTrue(torch.equal(num_batches, bn.num_batches_tracked))
self.assertTrue(torch.equal(running_mean, bn.running_mean))
self.assertTrue(torch.equal(running_var, bn.running_var))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_batchnorm_nhwc_cuda(self):
for dtype in (torch.half, torch.float):
(N, C, H, W) = 2, 64, 50, 50
model = torch.nn.BatchNorm2d(C, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
model = model.eval().cuda().to(dtype)
inp1 = torch.randn(N, C, H, W, device=torch.device('cuda'), dtype=dtype)
inp2 = inp1.contiguous(memory_format=torch.channels_last)
out1 = model(inp1)
out2 = model(inp2)
self.assertTrue(torch.equal(out1, out2))
def test_pairwise_distance(self):
input1 = torch.randn(4, 4, requires_grad=True)
input2 = torch.randn(4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x, y: F.pairwise_distance(x, y), (input1, input2)))
# TODO: Create an OpInfo for pdist
def test_pdist(self):
for device, trans in itertools.product(device_(), [False, True]):
inp = torch.randn(4, 5, dtype=torch.double, device=device, requires_grad=True)
if trans:
inp = inp.transpose(0, 1)
for p in [0, 1, 2, 0.5, 1.5, 2.5, float('inf')]:
self.assertTrue(gradcheck(lambda x: F.pdist(x, p), (inp,)))
def test_pdist_zeros(self):
"""Test that grad is still valid when dist is 0"""
for device in device_():
inp = torch.randn(1, 3, dtype=torch.double, device=device, requires_grad=True).repeat([2, 1])
for p in [0, 1, 2, 0.5, 1.5, 2.5, float('inf')]:
self.assertTrue(gradcheck(lambda x: F.pdist(x, p), (inp,)))
def test_pdist_empty_row(self):
for device in device_():
inp = torch.randn(1, 3, dtype=torch.double, device=device, requires_grad=True)
self.assertTrue(gradcheck(F.pdist, (inp,)))
def test_pdist_empty_col(self):
for device in device_():
inp = torch.randn(4, 0, dtype=torch.double, device=device, requires_grad=True)
self.assertTrue(gradcheck(F.pdist, (inp,)))
@unittest.expectedFailure
def test_pdist_cpu_gradgrad_unimplemented(self):
inp = torch.randn(4, 5, requires_grad=True)
gradgradcheck(F.pdist, (inp,))
@unittest.expectedFailure
def test_pdist_cuda_gradgrad_unimplemented(self):
inp = torch.randn(4, 5, device='cuda', requires_grad=True)
gradgradcheck(F.pdist, (inp,))
# Merge into OpInfo?
# test for backward in https://github.com/pytorch/pytorch/issues/15511
def test_pdist_large(self):
for device in device_():
def func(x):
return torch.pdist(x, p=2)
# shape[0] should be able to be (roughly) arbitrarily large, but the kernel
# is currently limited to smaller sizes (see issue above); this is just testing
# a floor.
shape = (1000, 1)
x = torch.randn(shape, device=device).requires_grad_()
output = torch.pdist(x, p=2)
# just run a single backward, as gradcheck/gradgradcheck is expensive here
output.sum().backward()
def test_binary_cross_entropy_grads(self):
import torch.nn.functional as F
for device in device_():
input = torch.rand(3, 3, dtype=torch.double, device=device, requires_grad=True)
target = torch.rand(3, 3, dtype=torch.double, device=device)
gradcheck(F.binary_cross_entropy, [input, target])
gradgradcheck(F.binary_cross_entropy, [input, target])
# now with diffentiable target
target.requires_grad_(True)
gradcheck(F.binary_cross_entropy, [input, target], check_batched_grad=False)
# no double backward for target yet
with self.assertRaisesRegex(RuntimeError, "not implemented"):
gradgradcheck(F.binary_cross_entropy, [input, target], check_batched_grad=False)
def test_cosine_embedding_loss_with_diff_type(self):
for device in device_():
input1 = torch.tensor([[2, 3, 4], [6, 2, 4]], dtype=torch.double, device=device)
input2 = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([1, -1], dtype=torch.int, device=device)
expected = torch.nn.functional.cosine_embedding_loss(input1, input2, target)
for dt1 in get_all_math_dtypes(device):
for dt2 in get_all_math_dtypes(device):
for dt3 in get_all_math_dtypes(device):
# dt3 is used as dtype for target = [1, -1], so let's skip unsigned type
if dt3 == torch.uint8:
continue
if dt1.is_complex or dt2.is_complex or dt3.is_complex:
continue
input1 = input1.to(dt1)
input2 = input2.to(dt2)
target = target.to(dt3)
result = torch.nn.functional.cosine_embedding_loss(input1, input2, target)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_with_diff_type(self):
for device in device_():
input = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.double, device=device)
expected = torch.nn.functional.kl_div(input, target)
for input_dtype in get_all_math_dtypes(device):
if input_dtype.is_complex:
continue
for target_dtype in [torch.float32, torch.float64, torch.float16]:
if (torch.device(device).type == 'cpu' and target_dtype == torch.float16):
continue
input = input.to(input_dtype)
target = target.to(target_dtype)
result = torch.nn.functional.kl_div(input, target)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_with_diff_type_log_target(self):
for device in device_():
input = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.double, device=device).log()
expected = torch.nn.functional.kl_div(input, target, log_target=True)
for input_dtype in get_all_math_dtypes(device):
if input_dtype.is_complex:
continue
for target_dtype in [torch.float32, torch.float64, torch.float16]:
if (torch.device(device).type == 'cpu' and target_dtype == torch.float16):
continue
input = input.to(input_dtype)
target = target.to(target_dtype)
result = torch.nn.functional.kl_div(input, target, log_target=True)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_log_softmax_target(self):
for device in device_():
a = torch.tensor([[1.0, 2, 3], [5.0, 5, 5]], device=device)
b = torch.tensor([[1.0, 2, 3], [5.0, 5, 5]], device=device)
self.assertEqual(
F.kl_div(F.log_softmax(a, 1), F.log_softmax(b, 1), reduction='none', log_target=True),
torch.zeros_like(a)
)
def test_cosine_embedding_loss_no_reduce(self):
input1 = torch.randn(15, 10, requires_grad=True)
input2 = torch.randn(15, 10, requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(
x, y, z, reduction='none'), (input1, input2, target)))
self.assertEqual(F.cosine_embedding_loss(input1, input2, target, reduction='none'),
loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target, reduction='none'))
def test_cosine_embedding_loss_margin_no_reduce(self):
input1 = torch.randn(15, 10, requires_grad=True)
input2 = torch.randn(15, 10, requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(
x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))
self.assertEqual(F.cosine_embedding_loss(input1, input2, target, margin=0.5, reduction='none'),
loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target,
margin=0.5, reduction='none'))
def test_cosine_embedding_loss_invalid_shape(self):
input1 = torch.randn(15, 10)
input2 = torch.randn(15, 10)
target = torch.randn(15, 1).sign()
with self.assertRaisesRegex(RuntimeError, "1D target tensor expected"):
F.cosine_embedding_loss(input1, input2, target)
with self.assertRaisesRegex(RuntimeError, "1D target tensor expects 2D input tensors"):
F.cosine_embedding_loss(torch.randn(10), torch.randn(10), torch.randn(10))
with self.assertRaisesRegex(RuntimeError, "0D target tensor expects 1D input tensors"):
F.cosine_embedding_loss(torch.randn(2, 5), torch.randn(2, 5), torch.randn(()))
def test_margin_ranking_loss_no_reduce(self):
input1 = torch.randn(15).mul_(10).requires_grad_()
input2 = torch.randn(15).mul_(10).requires_grad_()
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(
x, y, z, reduction='none'), (input1, input2, target)))
self.assertEqual(F.margin_ranking_loss(input1, input2, target, reduction='none'),
loss_reference_fns['MarginRankingLoss'](input1, input2, target, reduction='none'))
def test_margin_ranking_loss_margin_no_reduce(self):
input1 = torch.randn(15).mul_(10).requires_grad_()
input2 = torch.randn(15).mul_(10).requires_grad_()
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(
x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))
self.assertEqual(F.margin_ranking_loss(input1, input2, target, margin=0.5, reduction='none'),
loss_reference_fns['MarginRankingLoss'](input1, input2, target, margin=0.5, reduction='none'))
def test_triplet_margin_loss(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3))
def test_triplet_margin_loss_swap(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, swap=True), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True))
def test_triplet_margin_loss_no_reduce(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, reduction='none'), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, reduction='none'),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, reduction='none'))
def test_triplet_margin_loss_swap_no_reduce(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, swap=True, reduction='none'), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True, reduction='none'),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True, reduction='none'))
def test_triplet_margin_loss_invalid(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
input_1d = torch.randn(10, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "All inputs should have same dimension"):
F.triplet_margin_loss(input1, input2, input_1d)
with self.assertRaisesRegex(RuntimeError, "All inputs should have same dimension"):
F.triplet_margin_loss(input1, input_1d, input3)
with self.assertRaisesRegex(RuntimeError, "All inputs should have same dimension"):
F.triplet_margin_loss(input_1d, input2, input3)
def test_pointwise_loss_target_grad_none_reduction(self):
i = torch.randn(5, 10)
t = torch.randn(5, 10, requires_grad=True)
self.assertEqual(F.mse_loss(i, t, reduction='none').size(), t.size())
self.assertEqual(F.l1_loss(i, t, reduction='none').size(), t.size())
def test_pointwise_loss_broadcast(self):
losses = {
'mse_loss': lambda x, y, r: F.mse_loss(x, y, reduction=r),
'l1_loss': lambda x, y, r: F.l1_loss(x, y, reduction=r),
'smooth_l1_loss': lambda x, y, r: F.smooth_l1_loss(x, y, reduction=r),
'huber_loss': lambda x, y, r: F.huber_loss(x, y, reduction=r),
}
input = torch.randn(2, 1, requires_grad=True)
for _name, fn in losses.items():
for requires_grad in [True, False]:
# When target.requires_grad=True, its impl is in Python, while the other is in TH.
target = torch.randn(2, 10, requires_grad=requires_grad)
for reduction in ['none', 'mean', 'sum']:
l = fn(input, target, reduction)
if reduction == 'none':
self.assertEqual(l.size(), target.size())
self.assertTrue(gradcheck(fn, (input, target, reduction)))
# https://github.com/pytorch/pytorch/issues/27692 reports
# that l1_loss get a wrong result for big batch size
def test_l1_loss_correct(self):
for dtype in [torch.float, torch.cfloat]:
for N in range(1, 50, 10):
input = torch.rand(N, 3, 1024, 1024, dtype=dtype)
self.assertEqual(
torch.nn.L1Loss()(input, torch.zeros_like(input)),
input.abs().mean())
def test_smoothl1loss_intergral_target(self):
def _input_grad(input, target, reduction):
output = F.smooth_l1_loss(input, target, reduction=reduction, beta=0.5)
output.sum().backward()
return input.grad
for device, dtype, reduction in product(device_(),
integral_types(),
('none', 'sum', 'mean')):
input = torch.randn(2, 2, device=device, requires_grad=True)
target = torch.randint(0, 9, (2, 2), device=device, dtype=dtype)
input_grad_with_float_target = _input_grad(input, target.float(), reduction)
input_grad = _input_grad(input.detach().clone().requires_grad_(True),
target,
reduction)
self.assertEqual(input_grad, input_grad_with_float_target)
def test_smoothl1loss_negative_beta_not_supported(self):
with self.assertRaises(RuntimeError):
F.smooth_l1_loss(torch.randn(2, 2), torch.randn(2, 2), beta=-1.0)
def test_huber_loss_invalid_delta(self):
def _test_huber_loss_delta_error_helper(delta):
input, target = torch.randn(2, 2), torch.randn(2, 2)
loss = torch.nn.HuberLoss(delta=delta)
with self.assertRaises(RuntimeError):
loss(input, target)
def test_huber_loss_negative_delta():
_test_huber_loss_delta_error_helper(delta=-0.5)
def test_huber_loss_zero_delta():
_test_huber_loss_delta_error_helper(delta=0.0)
test_huber_loss_negative_delta()
test_huber_loss_zero_delta()
def test_cosine_similarity(self):
# Check cosine_similarity input/output shapes
input_size = (1, 3, 2, 1)
expected_size = (1, 2, 1)
input1 = torch.randn(input_size, requires_grad=True)
input2 = torch.randn(input_size, requires_grad=True)
self.assertEqual(F.cosine_similarity(input1, input2, dim=1).size(), expected_size)
# Check numerical precision, issue #18057
vv1 = torch.tensor(list([float(i) for i in range(84)])).unsqueeze(0)
vv2 = torch.tensor(list([float(i) for i in range(84)])).unsqueeze(0)
out = F.cosine_similarity(vv1, vv2)
self.assertLessEqual(out, 1.0)
# Check dividing by 0.
input1 = torch.randn(10).requires_grad_()
input2 = torch.zeros_like(input1).requires_grad_()
torch.cosine_similarity(input1, input2, 0).sum().backward()
self.assertEqual(input1.grad, torch.zeros_like(input1))
self.assertEqual(input2.grad, input1 * 1e8)
# Check type promotion, issue #61454
input = torch.tensor(12.)
out = F.cosine_similarity(input.to(torch.int8), input, dim=-1)
self.assertEqual(out, 1.)
def test_grid_sample_error_checking(self):
input = torch.empty(1, 1, 2, 2)
grid = torch.empty(1, 1, 1, 2)
# assert no error
F.grid_sample(input, grid, align_corners=False)
with self.assertRaisesRegex(ValueError, "but got: 'garbage'"):
F.grid_sample(input, grid, mode='garbage', align_corners=False)
with self.assertRaisesRegex(ValueError, "but got: 'garbage'"):
F.grid_sample(input, grid, padding_mode='garbage', align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected 4D or 5D input"):
F.grid_sample(input[0], grid, align_corners=False)
with self.assertRaisesRegex(RuntimeError, "grid with same number of dimensions"):
F.grid_sample(input, torch.empty(1, 1, 1, 1, 3), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected grid and input to have same batch size"):
F.grid_sample(input, torch.empty(2, 1, 1, 2), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected grid to have size 2 in last dimension"):
F.grid_sample(input, torch.empty(1, 1, 1, 3), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected input to have non-empty spatial dimensions"):
F.grid_sample(torch.empty(1, 1, 0, 2), grid, align_corners=False)
with self.assertRaisesRegex(RuntimeError, "bicubic interpolation only supports 4D input"):
F.grid_sample(torch.empty(1, 1, 2, 2, 2), torch.empty(1, 1, 1, 1, 3), mode='bicubic')
if TEST_CUDA:
with self.assertRaisesRegex(RuntimeError, "expected input and grid to be on same device"):
F.grid_sample(input.cuda(), grid, align_corners=False)
def test_affine_grid_error_checking(self):
# 2D affine
theta = torch.empty(1, 2, 3, dtype=torch.double)
size = torch.Size([1, 1, 2, 2])
# assert no error
F.affine_grid(theta, size, align_corners=False)
# check for warning for empty span along dimension
with warnings.catch_warnings(record=True) as w:
# Ensure warnings are being shown
warnings.simplefilter("always")
# Should not trigger warning
F.affine_grid(theta, torch.Size([1, 1, 2, 1]), align_corners=False)
# Check no warning occurs
self.assertNotIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
# Should trigger warning
F.affine_grid(theta, torch.Size([1, 1, 2, 1]), align_corners=True)
# Check warning occurs
self.assertIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
with self.assertRaisesRegex(ValueError, "Expected theta to have floating point type"):
F.affine_grid(theta.int(), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta[0], size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.unsqueeze(0), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.repeat(1, 2, 1), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.repeat(1, 1, 2), size, align_corners=False)
# 3D affine
theta = torch.empty(1, 3, 4, dtype=torch.double)
size = torch.Size([1, 1, 2, 2, 2])
# assert no error
F.affine_grid(theta, size, align_corners=False)
# check for warning for empty span along dimension
with warnings.catch_warnings(record=True) as w:
# Ensure warnings are being shown
warnings.simplefilter("always")
# Should not trigger warning
F.affine_grid(theta, torch.Size([1, 1, 3, 2, 1]), align_corners=False)
# Check no warning occurs
self.assertNotIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
# Should trigger warning
F.affine_grid(theta, torch.Size([1, 1, 3, 2, 1]), align_corners=True)
# Check warning occurs
self.assertIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta[0], size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.unsqueeze(0), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.repeat(1, 2, 1), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.repeat(1, 1, 2), size, align_corners=False)
with self.assertRaisesRegex(NotImplementedError, "affine_grid only supports 4D and 5D sizes"):
F.affine_grid(theta, torch.Size([1, 2, 2]), align_corners=False)
with self.assertRaisesRegex(NotImplementedError, "affine_grid only supports 4D and 5D sizes"):
F.affine_grid(theta, torch.Size([1, 1, 2, 2, 2, 2]), align_corners=False)
@skipIfRocm
def test_grid_sample(self):
# Backward pass of native C++ and CUDA kernels branch depending on whether input requires gradient,
# so we test both cases.
def test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad):
def test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners):
for grid_dim_contig_order in [(0, 1, 2, 3), (0, 3, 1, 2), (3, 0, 1, 2), (0, 2, 1, 3)]:
# grid_dim_contig_order specifies the dimension order that can
# make grid to be contiguous.
# i.e., grid.permute(grid_dim_contig_order) is contiguous.
# e.g., with grid_dim_contig_order=[0, 3, 1, 2], grid should be
# initialized with contiguous tensor of shape [N, 2, H, W]
# and permuted to [N, H, W, 2] afterwards.
grid_shape = [N, H, W, 2]
grid_init_shape = [grid_shape[d] for d in grid_dim_contig_order]
grid_fwd_permute = [None, None, None, None]
for i, d in enumerate(grid_dim_contig_order):
grid_fwd_permute[d] = i
def get_grid(device='cpu', data=None):
if data is not None:
assert list(data.shape) == grid_shape
data = data.permute(grid_dim_contig_order).to(device)
else:
data = torch.randn(grid_init_shape, device=device)
grid = data.permute(grid_fwd_permute)
assert grid.permute(grid_dim_contig_order).is_contiguous()
return grid
input_cpu = torch.randn(C, N, IH, IW).transpose(0, 1).requires_grad_(input_requires_grad)
grid_cpu = get_grid().requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertTrue(out_cpu.size() == torch.Size([N, C, H, W]))
gradients = torch.randn_like(out_cpu)
out_cpu.backward(gradients)
# Compare against unvectorized CPU fallback
# NOTE [ grid_sample CPU fallback ]
# grid_sample uses AVX for 2d images, but that requires 32-bit indexing for
# 32-bit floats. So we also have a fallback that is used only for float tensors
# requiring 64-bit indexing. That requires too much memory to run on CI, so we
# also export the fallback and test it here to ensure feature parity with
# the vectorized version.
input_fallback = input_cpu.float().detach_().requires_grad_()
grid_fallback = grid_cpu.float().detach_().requires_grad_()
out_fallback = torch._grid_sampler_2d_cpu_fallback(
input_fallback, grid_fallback,
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners)
self.assertEqual(out_fallback, out_cpu.float(), atol=1e-5, rtol=5e-5)
out_fallback.backward(gradients.float())
if input_requires_grad:
self.assertEqual(input_fallback.grad, input_cpu.grad.float(), atol=1e-4, rtol=5e-5)
self.assertEqual(grid_fallback.grad, grid_cpu.grad.float(), atol=1e-4, rtol=5e-5)
if TEST_CUDA:
input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_(input_requires_grad)
grid_cuda = get_grid('cuda', grid_cpu.detach()).requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
out_cuda.backward(gradients.cuda())
if input_requires_grad:
self.assertEqual(input_cpu.grad, input_cuda.grad)
self.assertEqual(grid_cpu.grad, grid_cuda.grad, atol=5e-5, rtol=0)
# check that zero-dimensional input strides don't error out
base_input = torch.randn(N, C, 1, IW)
input_cpu = base_input.expand_as(input_cuda).requires_grad_(input_requires_grad)
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_(input_requires_grad)
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
# test same size output
test_shape(N, C, H, W, H, W, mode, padding_mode, align_corners)
# test larger output
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(IH + 1, 12)
W = random.randint(IW + 1, 12)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# test smaller output
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(2, IH)
W = random.randint(2, IW)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# test 1x1 inpput
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = 1
IW = 1
H = random.randint(2, 5)
W = random.randint(2, 5)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# testing empty grid
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
W = random.randint(3, IW + 2)
test_shape(N, C, IH, IW, 0, W, mode, padding_mode, align_corners)
# testing empty channel
N = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(N, 0, IH, IW, H, W, mode, padding_mode, align_corners)
# testing empty batch
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(0, C, IH, IW, H, W, mode, padding_mode, align_corners)
for mode in ('bilinear', 'nearest', 'bicubic'):
for padding_mode in ('zeros', 'border', 'reflection'):
for align_corners in (True, False):
# test known input on CPU
input = torch.arange(1., 11).view(1, 1, 2, 5)
grid = torch.tensor(
[[[-0.9, -4.1], [0, 0.2000], [1, -1], [-0.333, 1e-6], [0.5, 1.0]],
[[-1.0, -0.5], [0, 0.3333], [1, -1], [-0.200, 1e-6], [1.5, 0.5]]]).view(1, 2, 5, 2)
if mode == 'bilinear':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[0.0000, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 0.0000]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.0000, 6.5000000000, 1.2500, 4.6675000191, 4.6250],
[0.5000, 7.1665000916, 1.2500, 5.0000000000, 0.0000]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1.2000, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 8.7500]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1.0000, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
[1.0000, 7.1665000916, 5.0000, 5.0000000000, 10.0000]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[3.4500, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 7.7500]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[3.0000004768, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
[1.0000000000, 7.1665000916, 5.0000, 5.0000000000, 9.2500]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'nearest':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[0., 8., 5., 7., 9.],
[1., 8., 5., 8., 0.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0., 8., 5., 7., 0.],
[1., 8., 5., 8., 0.]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 10.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 10.]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 9.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 9.]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'bicubic':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[-0.10424726, 7.1400003, 5.0000, 5.7842274, 9.0000],
[2.4492188, 7.4814040, 5.0000, 6.0277520, 0.0000]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.00000, 7.6287503, 1.0625, 5.5977230, 5.3270264],
[0.40625, 8.0288770, 1.0625, 5.9375067, -0.3515625]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1.1520010, 6.0599990, 5.0000, 4.870930, 9.0000000],
[2.1328125, 6.4258375, 5.0000, 5.076003, 8.8671875]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.894531, 6.6050020, 4.625, 4.7138715, 9.800781],
[0.906250, 7.2822485, 4.625, 5.0000052, 10.00000]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[3.1822524, 6.239998, 5.0000, 4.8709273, 9.00000],
[1.7812500, 6.703594, 5.0000, 5.0760007, 8.21875]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[2.7993753, 6.6050020, 4.25, 4.7138715, 10.269531],
[0.8125000, 7.2822485, 4.25, 5.0000052, 9.332031]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
else:
raise AssertionError("missing groundtruth test for interpolation mode '{}'".format(mode))
output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,
msg="groundtruth comparison failed for mode={}, "
"padding_mode={}".format(mode, padding_mode))
# See NOTE [ grid_sample CPU fallback ]
output = torch._grid_sampler_2d_cpu_fallback(
input.float(), grid.float(),
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners)
self.assertEqual(output, groundtruth.float(), atol=1e-5, rtol=0)
# explicit check for gradient edge cases
input = torch.arange(0., 5).expand((1, 1, 5, 5))
grid = torch.tensor(
[[[1.0, 1.0], [1.0, -1.0], [0.8, 0.8], [0.8, -0.8]],
[[-1.0, -1.0], [-1.0, 1.0], [-0.8, -0.8], [-0.8, 0.8]]]).view(1, 2, 4, 2).requires_grad_()
if mode == 'bilinear':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[[[-8., -8.], [-8., 0.], [2., 0.], [2., 0.]],
[[2., 0.], [2., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-5., -5.], [-5., 5.], [-10., -10.], [-10., 10.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [2., 0.], [2., 0.]],
[[0., 0.], [0., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [2., 0.], [2., 0.]],
[[0., 0.], [0., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
else:
raise AssertionError("missing gradient groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'nearest':
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif mode == 'bicubic':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[[[-4.5, -6.], [-4.5, 6.], [2.725679, 0.740878], [2.725679, -0.740878]],
[[1.5, 0.], [1.5, 0.], [1.927921, -0.05688], [1.927921, 0.05688]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-5.859375, -5.888672], [-5.859375, 5.888672], [-5.6250, -7.5000], [-5.6250, 7.5000]],
[[-0.234375, -0.263672], [-0.234375, 0.263672], [1.8750, 0.], [1.8750, 0.]]]]
).view(1, 2, 4, 2)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[[[1.5, 0.], [1.5, 0.], [1.74, 0.], [1.74, 0.]],
[[1.5, 0.], [1.5, 0.], [1.74, 0.], [1.74, 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0.46875, 0.], [-0.46875, 0.], [1.8750, 0.], [1.8750, 0.]],
[[-0.46875, 0.], [-0.46875, 0.], [1.8750, 0.], [1.8750, 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[[[0., 0.], [0., 0.], [1.92, 0.], [1.92, 0.]],
[[0., 0.], [0., 0.], [1.92, 0.], [1.92, 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]],
[[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]]]]).view(1, 2, 4, 2)
else:
raise AssertionError("missing gradient groundtruth test for padding mode '{}'".format(padding_mode))
else:
raise AssertionError("missing gradient groundtruth test for interpolation mode '{}'".format(mode))
for input_requires_grad in [False, True]:
input = input.requires_grad_(input_requires_grad)
F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners).sum().backward()
self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0,
msg="gradient groundtruth comparison failed for mode={}, "
"padding_mode={}, input_requires_grad={}".format(mode, padding_mode, input_requires_grad))
grid.grad.zero_()
# See NOTE [ grid_sample CPU fallback ]
torch._grid_sampler_2d_cpu_fallback(
input.float(), grid.float(),
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners).sum().backward()
self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0)
# do gradcheck
N = random.randint(2, 8)
C = random.randint(2, 6)
H = random.randint(2, 8)
W = random.randint(2, 8)
input = torch.randn(N, C, H, W, requires_grad=True)
grid = torch.randn(N, H, W, 2, requires_grad=True)
self.assertTrue(gradcheck(
lambda inp, grid: F.grid_sample(inp, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners),
(input, grid)))
input = input.requires_grad_(False)
self.assertTrue(gradcheck(
lambda grid: F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners),
(grid,)))
for input_requires_grad in [False, True]:
test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad)
if TEST_CUDNN:
with cudnn.flags(enabled=False):
test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad)
def test_grid_sample_3d(self):
def test(N, C, D, H, W, mode, padding_mode, align_corners):
def test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners):
input_cpu = torch.randn(C, N, ID, IH, IW).transpose(0, 1).requires_grad_()
grid_cpu = torch.randn(D, N, H, W, 3).transpose(0, 1).requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertTrue(out_cpu.size() == torch.Size([N, C, D, H, W]))
gradients = torch.randn_like(out_cpu)
out_cpu.backward(gradients)
if TEST_CUDA:
input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
grid_cuda = grid_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
out_cuda.backward(gradients.cuda())
self.assertEqual(input_cpu.grad, input_cuda.grad)
self.assertEqual(grid_cpu.grad, grid_cuda.grad, atol=5e-5, rtol=0)
# check that zero-dimensional input strides don't error out
base_input = torch.randn(N, C, 1, IH, IW)
input_cpu = base_input.expand_as(input_cuda).requires_grad_()
grid_cpu = torch.randn(N, D, H, W, 3, requires_grad=True)
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_()
grid_cuda = grid_cpu.detach().cuda().requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
# test same size output
test_shape(N, C, D, H, W, D, H, W, mode, padding_mode, align_corners)
# test larger output
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(ID + 1, 10)
H = random.randint(IH + 1, 10)
W = random.randint(IW + 1, 10)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# test smaller output
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(2, ID)
H = random.randint(2, IH)
W = random.randint(2, IW)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# test 1x1 inpput
N = random.randint(2, 7)
C = random.randint(2, 7)
ID = 1
IH = 1
IW = 1
H = random.randint(2, 5)
W = random.randint(2, 5)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# testing empty grid
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
W = random.randint(3, IW + 2)
test_shape(N, C, ID, IH, IW, D, 0, W, mode, padding_mode, align_corners)
# testing empty channel
N = random.randint(2, 7)
ID = random.randint(2, 5)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(N, 0, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# testing empty batch
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(0, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
for mode in ('bilinear', 'nearest'):
for padding_mode in ('zeros', 'border', 'reflection'):
for align_corners in (True, False):
# do gradcheck
N = random.randint(2, 5)
C = random.randint(2, 4)
D = random.randint(2, 5)
H = random.randint(2, 5)
W = random.randint(2, 5)
input = torch.randn(N, C, D, H, W, requires_grad=True)
grid = torch.randn(N, D, H, W, 3, requires_grad=True)
self.assertTrue(gradcheck(
lambda inp, grid: F.grid_sample(inp, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners),
(input, grid)))
test(N, C, D, H, W, mode, padding_mode, align_corners)
def test_affine_grid(self):
# test known input on CPU
input = torch.arange(1., 7).view(1, 2, 3)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2]), align_corners=True)
groundtruth = torch.tensor(
[[[0., -3.], [2., 5.]], [[4., 7.], [6., 15.]]]).view(1, 2, 2, 2)
self.assertEqual(output, groundtruth)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2]), align_corners=False)
groundtruth = torch.tensor(
[[[1.5, 1.5], [2.5, 5.5]], [[3.5, 6.5], [4.5, 10.5]]]).view(1, 2, 2, 2)
self.assertEqual(output, groundtruth)
for align_corners in (True, False):
# do gradcheck
N = random.randint(1, 8)
C = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, H, W])
inp = torch.randn(N, 2, 3, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
self.assertTrue(gradcheck(
lambda inp: F.affine_grid(inp, sz, align_corners=align_corners),
(inp,)))
# test CPU against CUDA
if TEST_CUDA:
N = random.randint(1, 8)
C = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, H, W])
for align_corners in (True, False):
input_cpu = torch.randn(N, 2, 3, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cpu = F.affine_grid(input_cpu, sz, align_corners=align_corners)
gradients = torch.randn(out_cpu.size())
out_cpu.backward(gradients)
input_gpu = input_cpu.detach().cuda().requires_grad_()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cuda = F.affine_grid(input_gpu, sz, align_corners=align_corners)
out_cuda.backward(gradients.cuda())
self.assertEqual(out_cpu, out_cuda)
self.assertEqual(input_cpu.grad, input_gpu.grad)
def test_affine_grid_3d(self):
# test known input on CPU
input = torch.arange(1., 13).view(1, 3, 4)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2, 2]), align_corners=True)
groundtruth = torch.tensor(
[[[[[-2., -10., -18.], [0., 0., 0.]], [[2., 2., 2.], [4., 12., 20.]]],
[[[4., 4., 4.], [6., 14., 22.]], [[8., 16., 24.], [10., 26., 42.]]]]]).view(1, 2, 2, 2, 3)
self.assertEqual(output, groundtruth)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2, 2]), align_corners=False)
groundtruth = torch.tensor(
[[[[[1., -1., -3.], [2., 4., 6.]], [[3., 5., 7.], [4., 10., 16.]]],
[[[4., 6., 8.], [5., 11., 17.]], [[6., 12., 18.], [7., 17., 27.]]]]]).view(1, 2, 2, 2, 3)
self.assertEqual(output, groundtruth)
for align_corners in (True, False):
# do gradcheck
N = random.randint(1, 8)
C = random.randint(1, 8)
D = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, D, H, W])
inp = torch.randn(N, 3, 4, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
self.assertTrue(gradcheck(
lambda inp: F.affine_grid(inp, sz, align_corners=align_corners),
(inp,)))
# test CPU against CUDA
if TEST_CUDA:
N = random.randint(1, 8)
C = random.randint(1, 8)
D = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, D, H, W])
for align_corners in (True, False):
input_cpu = torch.randn(N, 3, 4, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cpu = F.affine_grid(input_cpu, sz, align_corners=align_corners)
gradients = torch.randn(out_cpu.size())
out_cpu.backward(gradients)
input_gpu = input_cpu.detach().cuda().requires_grad_()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cuda = F.affine_grid(input_gpu, sz, align_corners=align_corners)
out_cuda.backward(gradients.cuda())
self.assertEqual(out_cpu, out_cuda)
self.assertEqual(input_cpu.grad, input_gpu.grad)
def test_channel_shuffle(self):
# 3D tensor
x = torch.tensor(
[[[1, 2],
[5, 6],
[9, 10],
[13, 14],
]]
)
y_ref = torch.tensor(
[[[1, 2],
[9, 10],
[5, 6],
[13, 14],
]]
)
# ChannelsFirst
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast not supported for 3dim
# 4D tensor
x = torch.tensor(
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]],
[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]],
]]
)
y_ref = torch.tensor(
[[[[1, 2],
[3, 4]],
[[9, 10],
[11, 12]],
[[5, 6],
[7, 8]],
[[13, 14],
[15, 16]],
]]
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last), 2)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
# 5D tensor
x = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[5, 6],
[7, 8]]],
[[[9, 10],
[11, 12]]],
[[[13, 14],
[15, 16]]],
]]
)
y_ref = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[9, 10],
[11, 12]]],
[[[5, 6],
[7, 8]]],
[[[13, 14],
[15, 16]]],
]]
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last_3d), 2)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
def test_upsamplingLinear1d(self):
for align_corners in [True, False]:
for recompute_scale_factor in [True, False]:
kwargs = dict(
mode='linear', align_corners=align_corners, recompute_scale_factor=recompute_scale_factor
)
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
m = nn.Upsample(scale_factor=scale_factor, **kwargs)
in_t = torch.ones(1, 1, 2)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
self.assertEqual(torch.ones(1, 1, out_size), out_t.data)
input = torch.randn(1, 1, 2, requires_grad=True)
if not recompute_scale_factor:
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), (input,))
else:
gradcheck(lambda x: F.interpolate(x, scale_factor=scale_factor, **kwargs), (input,))
def test_upsamplingLinear1d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='linear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9)
in_t_9[:, :, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5])
self.assertEqual(out_t_9[:, :, :15], out_t_5)
def test_upsampling_not_recompute_scale_factor(self):
# test output against known input: result must match opencv
in_t = torch.arange(8.).view(1, 2, 2, 2)
expected_out_t = torch.tensor(
[[[[-0.32725, -0.08843, 0.37933, 0.79744],
[0.15039, 0.38921, 0.85697, 1.27508],
[1.08591, 1.32473, 1.79249, 2.21060],
[1.92213, 2.16095, 2.62871, 3.04682]],
[[3.67275, 3.91157, 4.37933, 4.79744],
[4.15039, 4.38921, 4.85697, 5.27508],
[5.08591, 5.32473, 5.79249, 6.21060],
[5.92213, 6.16095, 6.62871, 7.04682]]]])
if IS_PPC:
# Both OpenCV and PyTorch give a slightly different result on PPC
expected_out_t = torch.tensor(
[[[[-0.32725, -0.08843, 0.37933, 0.79744],
[0.15039, 0.38921, 0.85697, 1.27508],
[1.08591, 1.32473, 1.79249, 2.21060],
[1.92212, 2.16094, 2.62870, 3.04681]],
[[3.67275, 3.91157, 4.37933, 4.79743],
[4.15039, 4.38921, 4.85697, 5.27508],
[5.08591, 5.32473, 5.79249, 6.21059],
[5.92212, 6.16094, 6.62870, 7.04680]]]])
out_t = F.interpolate(in_t, scale_factor=2.3, mode='bicubic', align_corners=False, recompute_scale_factor=False)
torch.set_printoptions(precision=5)
self.assertEqual(out_t, expected_out_t, atol=1e-4, rtol=0)
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for align_corners in [True, False]:
kwargs = dict(mode='bicubic', align_corners=align_corners)
# test float scale factor up & downsampling
for device in device_list:
for scale_factor in [0.6, 1.6, 2.3]:
in_t = torch.ones(2, 2, 2, 2).to(device)
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
self.assertEqual(torch.ones(2, 2, out_size, out_size), out_t.data, atol=1e-5, rtol=0)
input = torch.randn(2, 2, 2, 2, requires_grad=True)
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
def test_upsamplingBilinear2d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='bilinear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9, 9)
in_t_9[:, :, :4, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5, :5])
self.assertEqual(out_t_9[:, :, :15, :15], out_t_5)
def test_upsamplingTrilinear3d(self):
for align_corners in [True, False]:
kwargs = dict(mode='trilinear', align_corners=align_corners)
for memory_format in [torch.contiguous_format, torch.channels_last_3d]:
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
m = nn.Upsample(scale_factor=scale_factor, **kwargs)
in_t = torch.ones(1, 2, 2, 2, 2).contiguous(memory_format=memory_format)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
self.assertEqual(torch.ones(1, 2, out_size, out_size, out_size), out_t.data)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
input = torch.randn(1, 2, 2, 2, 2, requires_grad=True)
self.assertEqual(
F.interpolate(input, (out_size, out_size, out_size), **kwargs),
F.interpolate(input, scale_factor=scale_factor, **kwargs))
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
gradgradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
def test_upsamplingTrilinear3d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='trilinear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9, 9, 9)
in_t_9[:, :, :4, :4, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5, :5, :5])
self.assertEqual(out_t_9[:, :, :15, :15, :15], out_t_5)
def test_upsampling_small_scale(self):
m = torch.nn.Upsample(scale_factor=0.5, mode="bilinear")
in_t = torch.arange(1, 5, dtype=torch.float64).reshape(1, 1, 2, 2)
out_t = m(in_t)
expected_out_t = torch.tensor([[[[2.5]]]])
self.assertEqual(expected_out_t, out_t)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_interpolate_illegal_memory_access(self):
in_s = 45
out_s = 14
input = torch.ones((1, 1, in_s), device='cuda', requires_grad=True)
# note we allocated grad_output to be larger so out of bound access
# woudl be visible in grad_input
grad = torch.ones((1, 1, out_s * 2), device='cuda', requires_grad=True)
grad = grad[:, :, :out_s]
input_ref = input.detach().cpu().requires_grad_()
grad_ref = grad.cpu()
out = F.interpolate(input, size=(out_s,), mode='nearest')
out.backward(grad)
out_ref = F.interpolate(input_ref, size=(out_s,), mode='nearest')
out_ref.backward(grad_ref)
self.assertEqual(out_ref, out)
self.assertEqual(input_ref.grad, input.grad)
def test_interpolate(self):
def _test_interpolate_helper(in_t, scale_factor, layer):
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
dim = len(in_t.shape) - 2
out_shape = [1, 1] + [out_size] * dim
with warnings.catch_warnings(record=True) as w:
out_t = layer(in_t)
self.assertEqual(torch.ones(out_shape), out_t)
self.assertEqual(
F.interpolate(in_t, (out_size,) * dim, **kwargs),
F.interpolate(in_t, scale_factor=scale_factor, **kwargs))
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t], nondet_tol=GRADCHECK_NONDET_TOL)
gradgradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t], nondet_tol=GRADCHECK_NONDET_TOL)
def _make_input(dim, device):
size = [1, 1]
size += [2] * dim
return torch.ones(size, requires_grad=True, device=device)
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
for scale_factor in [0.5, 1.5, 2]:
for mode in ['nearest', 'area']:
kwargs = dict(mode=mode)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
for input in [_make_input(1, device), _make_input(2, device), _make_input(3, device)]:
_test_interpolate_helper(input, scale_factor, m)
for align_corners in [True, False]:
kwargs = dict(mode='linear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(1, device), scale_factor, m)
kwargs = dict(mode='bilinear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(2, device), scale_factor, m)
kwargs = dict(mode='bicubic', align_corners=align_corners)
def m(t):
return F.interpolate(t, scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(2, device), scale_factor, m)
kwargs = dict(mode='trilinear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(3, device), scale_factor, m)
def test_linear_broadcasting(self):
m = nn.Linear(5, 8)
inp = torch.randn(2, 3, 5)
expected = m(inp.view(6, 5)).view(2, 3, 8)
self.assertEqual(expected, m(inp))
def test_bilinear(self):
module = nn.Bilinear(10, 10, 8)
input1 = torch.randn(4, 10, requires_grad=True)
input2 = torch.randn(4, 10, requires_grad=True)
grad_output = torch.randn(4, 8)
res = module(input1, input2)
expected = (torch.einsum("bi,kij,bj->bk", input1, module.weight, input2) +
module.bias)
self.assertEqual(res, expected)
grads = torch.autograd.grad(res, [module.weight, module.bias, input1, input2], grad_output)
grads_expected = torch.autograd.grad(expected, [module.weight, module.bias, input1, input2], grad_output)
for g, ge in zip(grads, grads_expected):
self.assertEqual(g, ge)
def test_bilinear_non_contiguous(self):
module = nn.Bilinear(7, 7, 5)
input1 = torch.randn(4, 7, 10, requires_grad=True)
input2 = torch.randn(4, 7, 10, requires_grad=True)
input1_tp = input1.transpose(1, 2)
input2_tp = input2.transpose(1, 2)
grad_output = torch.randn(4, 10, 5)
def run(input1_tp, input2_tp):
input1.grad = input2.grad = None
output = module(input1_tp, input2_tp)
output.backward(grad_output)
return output.data, input1.grad.data, input2.grad.data
out_nc, g1_nc, g2_nc = run(input1_tp, input2_tp)
input1_tp = input1_tp.contiguous()
input2_tp = input2_tp.contiguous()
out, g1, g2 = run(input1_tp, input2_tp)
self.assertEqual(out, out_nc)
self.assertEqual(g1, g1_nc)
self.assertEqual(g2, g2_nc)
def test_bilinear_no_bias(self):
module = nn.Bilinear(10, 10, 8)
module_no_bias = nn.Bilinear(10, 10, 8, False)
module.bias.data.zero_()
module.weight.data.copy_(module_no_bias.weight)
input1 = torch.randn(4, 10, requires_grad=True)
input2 = torch.randn(4, 10, requires_grad=True)
grad_output = torch.randn(4, 8)
def run(net):
input1.grad = input2.grad = None
output = net(input1, input2)
output.backward(grad_output)
return output.data, input1.grad.data, input2.grad.data
out, g1, g2 = run(module)
out_nb, g1_nb, g2_nb = run(module_no_bias)
self.assertEqual(out, out_nb)
self.assertEqual(g1, g1_nb)
self.assertEqual(g2, g2_nb)
_assertGradAndGradgradChecks(self,
lambda x1, x2: F.bilinear(x1, x2, module_no_bias.weight, module_no_bias.bias),
(input1, input2))
def test_bilinear_broadcasting(self):
m = nn.Bilinear(5, 6, 8)
input1 = torch.randn(2, 3, 5)
input2 = torch.randn(2, 3, 6)
expected = m(input1.view(6, 5), input2.view(6, 6)).view(2, 3, 8)
self.assertEqual(expected, m(input1, input2))
def test_conv_tbc(self):
inp = torch.randn(9, 4, 5, requires_grad=True)
weight = torch.randn(3, 5, 6, requires_grad=True)
bias = torch.randn(6, requires_grad=True)
gradcheck(lambda i, w, b, pad: F.conv_tbc(i, w, b, pad), (inp, weight, bias, 3))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
@skipIfRocmVersionLessThan((4, 3))
@skipIfNotMiopenSuggestNHWC
def test_grouped_conv_cudnn_nhwc_support(self):
# in order to catch the hols in grouped convolution in nhwc support for earlier cudnn version
input = torch.randn((16, 16, 8, 8), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
weight = torch.randn((8, 4, 3, 3), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
out = torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), False, (0, 0), 4)
input = torch.randn((16, 8, 8, 8), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
out_transpose = torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), True, (0, 0), 4)
@unittest.expectedFailure
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_conv_cudnn_memory_layout_dominance(self):
# desired behavior here is to have the memory_layout of conv.weight to
# dominante the layout of output.
# which is not the same as current behavior, we'll fix this in
# following up PRs and remove the `expectedFailure` tag
input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, device="cuda", requires_grad=True)
conv = nn.Conv2d(8, 4, 3).cuda().float()
out = conv(input)
self.assertTrue(out.is_contiguous())
input = input.contiguous(memory_format=torch.channels_last)
out = conv(input)
self.assertTrue(out.is_contiguous())
conv.weight.data = conv.weight.contiguous(memory_format=torch.channels_last)
out = conv(input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
input = input.contiguous()
out = conv(input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_cudnn_noncontiguous_weight(self):
# Noncontiguous weights must be contiguous() before being
# passed to cuDNN
input = torch.tensor([1, 1, 1], dtype=torch.double, device="cuda").view(1, 1, 3)
weights1 = torch.tensor([1], dtype=torch.double, device="cuda").expand(1, 1, 2)
weights2 = torch.tensor([1], dtype=torch.double, device="cuda").expand(1, 1, 2).contiguous()
self.assertEqual(F.conv1d(input, weights1, bias=None, stride=2, dilation=2),
F.conv1d(input, weights2, bias=None, stride=2, dilation=2))
def run_grad_conv_test(self, func_forward, func_backward, dim=1, gradient='input'):
for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:
for batch, stride, padding, chan_in, chan_out, dilation in \
product([1, 2], [1, 2], [0, 1, 2], [2], [3], [1]):
for has_bias in [True, False]:
input_shape = [batch, chan_in]
weight_shape = [chan_out, chan_in]
for _ in range(dim):
input_shape.append(inp_size)
weight_shape.append(kern)
input = torch.randn(input_shape, requires_grad=True)
weight = torch.randn(weight_shape, requires_grad=True)
if has_bias:
bias = torch.randn([chan_out], requires_grad=True)
output = func_forward(input, weight, stride=stride, padding=padding, dilation=dilation, bias=bias)
gradient_o = torch.randn(output.shape)
gradient_w = torch.autograd.grad(output, input if (gradient == 'input') else weight, gradient_o)
self.assertEqual(gradient_w[0],
func_backward(
input_shape if (gradient == 'input') else input,
weight_shape if (gradient == 'weight') else weight,
gradient_o,
stride=stride,
padding=padding,
dilation=dilation))
def test_grad_conv1d_input(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_input, 1, 'input')
def test_grad_conv1d_weight(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_weight, 1, 'weight')
def test_grad_conv2d_input(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_input, 2, 'input')
def test_grad_conv2d_weight(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_weight, 2, 'weight')
def test_grad_conv3d_input(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_input, 3, 'input')
def test_grad_conv3d_weight(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_weight, 3, 'weight')
@unittest.skipIf(not torch._nnpack_available(), "NNPACK unavailable")
def test_nnpack_conv(self):
for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:
for batch, stride, padding, chan_in, chan_out in \
product([1, 2, 3, 4], [1, 2], [0, 1, 2], [2], [3]):
for has_bias in [True, False]:
input_shape = [batch, chan_in]
weight_shape = [chan_out, chan_in]
for _ in range(2):
input_shape.append(inp_size)
weight_shape.append(kern)
input = torch.randn(input_shape, requires_grad=True, dtype=torch.float)
weight = torch.randn(weight_shape, requires_grad=True, dtype=torch.float)
if has_bias:
bias = torch.randn([chan_out], requires_grad=True, dtype=torch.float)
output = torch._nnpack_spatial_convolution(input, weight, stride=stride, padding=padding, bias=bias)
output_expected = torch.nn.functional.conv2d(input, weight, stride=stride, padding=padding, bias=bias)
self.assertEqual(output, output_expected, atol=3e-4, rtol=0)
gradient_o = torch.randn(output.shape, dtype=torch.float)
grads = torch.autograd.grad(output, [input, weight], gradient_o)
grads_expected = torch.autograd.grad(output_expected, [input, weight], gradient_o)
for gr, gr_expected in zip(grads, grads_expected):
self.assertEqual(gr, gr_expected, atol=3e-4, rtol=0)
def test_fold_invalid_arg(self):
# input.size(1) not divisible by \prod(kernel_size)
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
with self.assertRaisesRegex(RuntimeError, r"be divisible by the product of kernel_size"):
fold(torch.randn(1, 5, 9))
with self.assertRaisesRegex(RuntimeError, r"be divisible by the product of kernel_size"):
fold(torch.randn(1, 19, 9))
# input.size(2) not matching the total number of sliding blocks
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
fold(torch.randn(1, 6, 10))
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2))
fold(torch.randn(1, 6, 5))
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2), dilation=(1, 2), padding=(2, 0))
fold(torch.randn(1, 6, 5)) # should be 4 * 1 = 4 sliding blocks
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2), stride=1, dilation=8, padding=0)
with self.assertRaisesRegex(RuntimeError, r"calculated shape of the array of sliding blocks as"):
fold(torch.randn(1, 12, 12))
def test_unfold_invalid_arg(self):
# input wrong dimension
unfold = nn.Unfold(kernel_size=(2, 3))
with self.assertRaisesRegex(NotImplementedError, r"Only 4D input Tensors are supported"):
unfold(torch.randn(1, 5, 2))
# calculated output shape is too small
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(2, 3))
unfold(torch.randn(1, 2, 2, 2))
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(5, 3), padding=(1, 1))
unfold(torch.randn(1, 2, 2, 3))
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(1, 3), padding=(1, 1), dilation=(1, 2))
unfold(torch.randn(1, 2, 2, 2))
def test_conv_padding_mode(self):
with self.assertRaisesRegex(ValueError, "padding_mode must be one of"):
nn.Conv2d(3, 3, 3, padding_mode="xyz")
with self.assertRaisesRegex(ValueError, "padding_mode must be one of"):
nn.Conv2d(3, 3, 3, padding_mode=3)
with self.assertRaisesRegex(ValueError, "Only \"zeros\" "):
nn.ConvTranspose2d(3, 3, 3, padding_mode="reflect")
def test_softmin(self):
x = torch.randn(2, 16)
self.assertEqual(F.softmin(x, 1), F.softmax(-x, 1))
self.assertEqual(F.softmin(x, 0), F.softmax(-x, 0))
def test_log_softmax_cpu(self, dtype=torch.bfloat16):
inputf = torch.rand(32, 100, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
outf = F.log_softmax(inputf, dim=-1)
out = F.log_softmax(input, dim=-1)
self.assertEqual(out.dtype, dtype)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(out, outf, atol=0.1, rtol=0)
out.sum().backward()
outf.sum().backward()
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=0.1, rtol=0)
def test_softmax_cpu(self, dtype=torch.bfloat16):
inputf = torch.rand(32, 100, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
outf = F.softmax(inputf, dim=-1)
out = F.softmax(input, dim=-1)
self.assertEqual(out.dtype, dtype)
self.assertEqualIgnoreType(out, outf, atol=1e-3, rtol=0)
out.sum().backward()
outf.sum().backward()
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=1e-3, rtol=0)
def test_adaptive_log_softmax(self):
# args validation
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 15], div_value=2.)
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 10], div_value=2.)
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 25], div_value=2.)
with self.assertRaisesRegex(ValueError, "cutoffs should be a sequence of unique,"):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 20], div_value=2.)
# not raise
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 19], div_value=2.)
# input shapes
with self.assertRaisesRegex(RuntimeError, r"Input and target should have the same size"):
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 5, 10])
asfm(x, y)
# out-of-bound targets
with self.assertRaisesRegex(RuntimeError, r"Target values should be in"):
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 20])
asfm(x, y)
# cluster sizes
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 17])
self.assertEqual(asfm.head.weight.size(), (5 + 3, 16)) # 5 targets in head, 3 clusters, dimensionality 16
self.assertEqual(asfm.tail[0][1].weight.size(), (5, 8)) # 5 targets in this cluster, dimensionality 8
self.assertEqual(asfm.tail[1][1].weight.size(), (5, 4))
self.assertEqual(asfm.tail[2][1].weight.size(), (5, 2))
self.assertEqual(asfm(x, y).output.size(), (2, ))
# test no_batch_dim support
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(1, 16)
y = torch.tensor([17])
x2 = x.squeeze(0)
y2 = y.squeeze(0)
self.assertEqual(asfm(x, y).output.squeeze(0), asfm(x2, y2).output)
# log_probs actually returns log_proba
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 4, [2], div_value=2.)
x = torch.randn(4, 8)
logprob_out = asfm.log_prob(x)
self.assertEqual(torch.exp(logprob_out).data.sum(1), torch.ones(4))
# forward returns the same thing as log_probs
for v in [0, 1, 2, 3]:
y = torch.full((4,), v, dtype=torch.long)
out, loss = asfm(x, y)
self.assertEqual(out, logprob_out.gather(1, y.unsqueeze(1)).squeeze())
self.assertEqual(loss, F.nll_loss(logprob_out, y))
# predict
x = torch.randn(64, 8).abs_()
# argmax in shortlist
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
asfm.head.weight.data[asfm.shortlist_size:, :].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
# argmax outside of shortlist
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
asfm.head.weight.data[:asfm.shortlist_size, :].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
# half of the argmax in shortlist, half in clusters
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
x[:32, :asfm.shortlist_size].zero_()
x[32:, asfm.shortlist_size:].zero_()
asfm.head.weight.data[:asfm.shortlist_size, asfm.shortlist_size:].zero_()
asfm.head.weight.data[asfm.shortlist_size:, :asfm.shortlist_size].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
def test_cross_entropy_loss(self, dtype=torch.bfloat16):
loss_cpu = nn.CrossEntropyLoss().cpu()
inputf = torch.randn(15, 10, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
target = torch.empty(15, dtype=torch.long).random_(10)
outf = loss_cpu(inputf, target)
out = loss_cpu(input, target)
self.assertEqual(out.dtype, dtype)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(out, outf, atol=1e-1, rtol=0)
outf.backward()
out.backward()
self.assertEqual(input.grad.dtype, dtype)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(input.grad, inputf.grad, atol=1e-1, rtol=0)
def test_cross_entropy_loss_precision(self):
# Regression test for #55657
loss_cpu = nn.CrossEntropyLoss().cpu()
inputf = torch.randn(128, 2, 768, 768, device="cpu", dtype=torch.float)
inputd = inputf.double()
target = torch.randint(2, (128, 768, 768), dtype=torch.long)
outf = loss_cpu(inputf, target)
outd = loss_cpu(inputd, target)
self.assertEqual(outf, outd, exact_dtype=False)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_convert_sync_batchnorm(self):
module = torch.nn.Sequential(
torch.nn.BatchNorm1d(100),
torch.nn.InstanceNorm1d(100)
).cuda()
# necessary to have an anchor point for comparison, in case the
# convert_sync_batchnorm updates in place
comp_module = torch.nn.Sequential(
torch.nn.BatchNorm1d(100),
torch.nn.InstanceNorm1d(100)
).cuda()
comp_module.load_state_dict(module.state_dict())
sync_bn_module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module)
children = list(sync_bn_module.children())
self.assertEqual(children[0].__class__, torch.nn.SyncBatchNorm)
self.assertEqual(children[1].__class__, torch.nn.InstanceNorm1d)
for layer, converted_layer in zip(comp_module.children(), sync_bn_module.children()):
for key in layer.state_dict().keys():
self.assertEqual(layer.state_dict()[key].device, converted_layer.state_dict()[key].device)
self.assertEqual(layer.state_dict()[key], converted_layer.state_dict()[key])
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_sync_batchnorm_backward_elemt(self):
device = 'cuda'
saved_input = torch.rand(2, 3, 2, 1, device=device)
grad_output = torch.rand(2, 3, 2, 1, device=device)
mean = torch.rand(3, device=device)
invstd = torch.rand(3, device=device)
weight = torch.rand(3, device=device)
sum_dy = torch.rand(3, device=device)
sum_dy_xmu = torch.rand(3, device=device)
count_tensor = torch.tensor([5, 5, 5], dtype=torch.int32, device=device)
gI_contiguous = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor
)
# Test batch_norm_backward_elemt gives the same answer for all
# combinations of contiguous as channels_last input
for a, b in [
(torch.channels_last, torch.contiguous_format),
(torch.contiguous_format, torch.channels_last),
(torch.channels_last, torch.channels_last),
]:
gI_actual = torch.batch_norm_backward_elemt(
grad_output.contiguous(memory_format=a),
saved_input.contiguous(memory_format=b),
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor
)
self.assertEqual(gI_actual, gI_contiguous)
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_sync_batchnorm_accuracy_cuda(self):
# The target of this test is to test the functionality and accuracy of
# those single-GPU cuda kernels used in SyncBatchNorm
# They are:
# fwd: torch.batch_norm_stats, torch.batch_norm_gather_stats_with_counts, torch.batch_norm_elemt
# bwd: torch.batch_norm_backward_reduce, torch.batch_norm_backward_elemt
def _batch_norm_stats(data):
mean1, _ = torch.batch_norm_stats(data, 1e-5)
mean2, _ = torch.batch_norm_stats(data.to(memory_format=torch.channels_last), 1e-5)
mean_ref = torch.mean(data, (0, 2, 3), keepdim=False)
self.assertEqual(mean_ref, mean1)
self.assertEqual(mean_ref, mean2)
data = torch.randn(1, 96, 112, 112, dtype=torch.float, device='cuda')
_batch_norm_stats(data)
def test_functional_grad_conv(self):
# Conv 1D
input = torch.randn(1, 1, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, requires_grad=True)
output = F.conv1d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]
grad_input_functional = torch.nn.grad.conv1d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
# Conv 2D
input = torch.randn(1, 1, 5, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, 3, requires_grad=True)
output = F.conv2d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]
grad_input_functional = torch.nn.grad.conv2d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
# Conv 3D
input = torch.randn(1, 1, 5, 5, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, 3, 3, requires_grad=True)
output = F.conv3d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]
grad_input_functional = torch.nn.grad.conv3d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
# Warning for _grad_input_padding
with warnings.catch_warnings(record=True) as w:
torch.nn.grad._grad_input_padding(torch.rand(1, 2, 3), [1, 2, 5], (1,), (0,), (3,))
self.assertEqual(len(w), 1)
def test_flatten(self):
tensor_input = torch.randn(2, 1, 2, 3)
# Flatten Tensor
flatten = nn.Flatten(start_dim=1, end_dim=-1)
tensor_output = flatten(tensor_input)
self.assertEqual(tensor_output.size(), torch.Size([2, 6]))
def test_unflatten(self):
tensor_input = torch.randn(2, 50)
# Unflatten Tensor (unflattened_size as a tuple of ints and list of ints)
for us in ((2, 5, 5), [2, 5, 5]):
unflatten = nn.Unflatten(dim=1, unflattened_size=us)
tensor_output = unflatten(tensor_input)
self.assertEqual(tensor_output.size(), torch.Size([2, 2, 5, 5]))
# Unflatten NamedTensor
unflatten = nn.Unflatten(dim='features', unflattened_size=(('C', 2), ('H', 5), ('W', 5)))
named_tensor_input = tensor_input.refine_names('N', 'features')
named_tensor_output = unflatten(named_tensor_input)
self.assertEqual(named_tensor_output.size(), torch.Size([2, 2, 5, 5]))
def test_unflatten_invalid_arg(self):
# Wrong type for unflattened_size (tuple of floats)
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of ints, but found element of type float at pos 2"):
nn.Unflatten(dim=1, unflattened_size=(2, 5, 5.0))
# Wrong type for unflattened_size (list of lists and list of tuples)
for us in ([['C', 2], ['W', 5], ['H', 5]], [('C', 2), ('W', 5), ('H', 5)]):
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be a tuple of tuples, but found type list"):
nn.Unflatten(dim='features', unflattened_size=us)
# Wrong type for unflattened_size (tuple of lists)
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of tuples, but found element of type list at pos 0"):
nn.Unflatten(dim='features', unflattened_size=(['C', 2], ['W', 5], ['H', 5]))
# Wrong type for unflattened_size (tuple of dicts)
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of tuples, but found element of type dict at pos 0"):
nn.Unflatten(dim='features', unflattened_size=({'C': 2}, {'W': 5}, {'H': 5}))
def test_layer_norm_grads_with_create_graph_flag(self):
atol = 1e-5
rtol = 1e-3
x = torch.randn((4, 4, 16), requires_grad=True)
layer_norm = nn.LayerNorm((16,), 1e-5, True)
with torch.no_grad():
layer_norm.weight = torch.nn.Parameter(0.1 * torch.ones_like(layer_norm.weight))
grads1 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=False)[0]
grads2 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=True)[0]
self.assertEqual(grads1, grads2, rtol=rtol, atol=atol)
if TEST_CUDA:
x = x.to('cuda')
layer_norm = layer_norm.to('cuda')
grads1 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=False)[0]
grads2 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=True)[0]
self.assertEqual(grads1, grads2, rtol=rtol, atol=atol)
def test_padding_list(self):
# Padding can be a list, or tuple (regression test for gh-54452)
x = torch.randn(4, 8, 32, 32)
net = torch.nn.ConvTranspose2d(8, 16, kernel_size=3, padding=[3, 3])
y = net(x)
net = torch.nn.ConvTranspose2d(8, 16, kernel_size=3, padding=(3, 3))
y = net(x)
class TestNNInit(TestCase):
def setUp(self):
super(TestNNInit, self).setUp()
random.seed(123)
def _is_normal(self, tensor, mean, std):
samples = tensor.view(-1).tolist()
p_value = stats.kstest(samples, 'norm', args=(mean, std))[1]
return p_value > 0.0001
def _is_trunc_normal(self, tensor, mean, std, a, b):
# scipy's trunc norm is suited for data drawn from N(0, 1),
# so we need to transform our data to test it using scipy.
z_samples = (tensor.view(-1) - mean) / std
z_samples = z_samples.tolist()
a0 = (a - mean) / std
b0 = (b - mean) / std
p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1]
return p_value > 0.0001
def _is_uniform(self, tensor, a, b):
samples = tensor.view(-1).tolist()
p_value = stats.kstest(samples, 'uniform', args=(a, (b - a)))[1]
return p_value > 0.0001
def _create_random_nd_tensor(self, dims, size_min, size_max):
size = [random.randint(size_min, size_max) for _ in range(dims)]
tensor = torch.zeros(size)
return tensor
def _random_float(self, a, b):
return (b - a) * random.random() + a
def test_calculate_gain_linear(self):
for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
gain = init.calculate_gain(fn)
self.assertEqual(gain, 1)
def test_calculate_gain_nonlinear(self):
for fn in ['sigmoid', 'tanh', 'relu', 'leaky_relu']:
gain = init.calculate_gain(fn)
if fn == 'sigmoid':
self.assertEqual(gain, 1)
elif fn == 'tanh': # 5 / 3
self.assertEqual(gain, 1.6666666666666667)
elif fn == 'relu': # sqrt(2)
self.assertEqual(gain, 1.4142135623730951)
elif fn == 'leaky_relu': # sqrt(2 / 1 + slope^2))
self.assertEqual(gain, 1.4141428569978354)
elif fn == 'selu':
self.assertEqual(gain, 0.75)
def test_calculate_gain_leaky_relu(self):
for param in [None, 0, 0.01, 10]:
gain = init.calculate_gain('leaky_relu', param)
if param is None: # Default slope is 0.01
self.assertEqual(gain, 1.4141428569978354)
elif param == 0: # No slope = same gain as normal ReLU
self.assertEqual(gain, 1.4142135623730951)
elif param == 0.01:
self.assertEqual(gain, 1.4141428569978354)
elif param == 10:
self.assertEqual(gain, 0.14071950894605836)
def test_calculate_gain_leaky_relu_only_accepts_numbers(self):
for param in [True, [1], {'a': 'b'}]:
with self.assertRaises(ValueError):
init.calculate_gain('leaky_relu', param)
def test_calculate_gain_only_accepts_valid_nonlinearities(self):
for n in [2, 5, 25]:
# Generate random strings of lengths that definitely aren't supported
random_string = ''.join([random.choice(string.ascii_lowercase) for i in range(n)])
with self.assertRaises(ValueError):
init.calculate_gain(random_string)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_uniform(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
a = self._random_float(-3, 3)
b = a + self._random_float(1, 5)
init.uniform_(input_tensor, a=a, b=b)
assert self._is_uniform(input_tensor, a, b)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_normal(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
mean = self._random_float(-3, 3)
std = self._random_float(1, 5)
init.normal_(input_tensor, mean=mean, std=std)
assert self._is_normal(input_tensor, mean, std)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_trunc_normal(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
mean = self._random_float(-3, 3)
std = self._random_float(.01, 1)
a = self._random_float(mean - 2 * std, mean)
b = self._random_float(mean, mean + 2 * std)
init.trunc_normal_(input_tensor, mean=mean, std=std, a=a, b=b)
assert self._is_trunc_normal(input_tensor, mean, std, a, b)
def test_constant(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)
val = self._random_float(1, 10)
init.constant_(input_tensor, val)
self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
def test_ones_and_zeros(self):
for init_fn_, val in zip([init.ones_, init.zeros_], [1, 0]):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)
init_fn_(input_tensor)
self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
def test_eye(self):
input_tensor = self._create_random_nd_tensor(2, size_min=1, size_max=5)
init.eye_(input_tensor)
# Check every single element
for i in range(input_tensor.size(0)):
for j in range(input_tensor.size(1)):
if i == j:
assert input_tensor[i][j] == 1
else:
assert input_tensor[i][j] == 0
def test_eye_only_works_on_2d_inputs(self):
for dims in [1, 3]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.eye_(tensor)
def test_max_unpool(self):
# Test 1D
output, indices = F.max_pool1d(torch.randn([1, 1, 4]), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool1d(output, indices, 2), F.max_unpool1d(output, indices, 2, stride=2))
# Test list / tuple passed as argument to max_unpool1d
input = torch.randn([1, 1, 5], requires_grad=True)
output, indices = F.max_pool1d(input, 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool1d(output, indices, 2, stride=2, output_size=input.shape),
F.max_unpool1d(output, indices, 2, stride=2, output_size=input.size()))
gradcheck(F.max_unpool1d, (output, indices, 2), check_forward_ad=True)
# Test 2D
output, indices = F.max_pool2d(torch.randn([1, 1, 4, 4], requires_grad=True), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool2d(output, indices, 2), F.max_unpool2d(output, indices, 2, stride=2))
gradcheck(F.max_unpool2d, (output, indices, 2), check_forward_ad=True)
# Test 3D
output, indices = F.max_pool3d(torch.randn([4, 4, 4, 4, 4], requires_grad=True), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool3d(output, indices, 2), F.max_unpool3d(output, indices, 2, stride=2))
gradcheck(F.max_unpool3d, (output, indices, 2), check_forward_ad=True)
def test_dirac_properties(self):
for dims in [3, 4, 5]:
for groups in [1, 2, 3]:
# prepare random tensor with random sizes, but fits groups
a, c, d, e = (random.randint(1, 5) for _ in range(4))
b = random.randint(1, 5 * groups) # same range as a*groups but all range allowed
# make sure first dim divides by groups
input_tensor = torch.randn((a * groups, b, c, d, e)[:dims])
init.dirac_(input_tensor, groups)
c_out, c_in = input_tensor.size(0) // groups, input_tensor.size(1)
min_d = min(c_out, c_in)
# Check number of nonzeros is equivalent to smallest dim (for each group)
assert torch.nonzero(input_tensor).size(0) == min_d * groups
# Check sum of values (can have precision issues, hence assertEqual) is also equivalent
self.assertEqual(input_tensor.sum(), min_d * groups)
def test_dirac_identity(self):
for groups in [1, 3]:
batch, in_c, out_c, size, kernel_size = 8, 3, 9, 5, 3 # in_c, out_c must divide by groups
eff_out_c = out_c // groups
# Test 1D
input_var = torch.randn(batch, in_c, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv1d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data # Variables do not support nonzero
for g in range(groups):
# Assert in_c outputs are preserved (per each group)
self.assertEqual(input_tensor[:, :, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :])
# Assert extra outputs are 0
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :]).numel() == 0
# Test 2D
input_var = torch.randn(batch, in_c, size, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv2d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data # Variables do not support nonzero
for g in range(groups):
# Assert in_c outputs are preserved (per each group)
self.assertEqual(input_tensor[:, :, 1:-1, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :, :])
# Assert extra outputs are 0
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :, :]).numel() == 0
# Test 3D
input_var = torch.randn(batch, in_c, size, size, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size, kernel_size, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv3d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data
for g in range(groups):
# Assert in_c outputs are preserved (per each group)
self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :, :, :])
# Assert extra outputs are 0
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :, :, :]).numel() == 0
def test_dirac_only_works_on_3_4_5d_inputs(self):
for dims in [1, 2, 6]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.dirac_(tensor)
def test_xavier_uniform_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
with self.assertRaises(ValueError):
init.xavier_uniform_(tensor)
def test_xavier_normal_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
with self.assertRaises(ValueError):
init.xavier_normal_(tensor)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_xavier_uniform(self):
for use_gain in [True, False]:
for dims in [2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
gain = 1
if use_gain:
gain = self._random_float(0.1, 2)
init.xavier_uniform_(input_tensor, gain=gain)
else:
init.xavier_uniform_(input_tensor)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
bounds = expected_std * math.sqrt(3)
assert self._is_uniform(input_tensor, -bounds, bounds)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_xavier_normal(self):
for use_gain in [True, False]:
for dims in [2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
gain = 1
if use_gain:
gain = self._random_float(0.1, 2)
init.xavier_normal_(input_tensor, gain=gain)
else:
init.xavier_normal_(input_tensor)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
assert self._is_normal(input_tensor, 0, expected_std)
def test_kaiming_uniform_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
init.kaiming_uniform_(tensor)
def test_kaiming_normal_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
init.kaiming_normal_(tensor)
def test_kaiming_uniform_warning_on_0element_tensor(self):
tensor = torch.empty(0, 1)
with self.assertWarnsRegex(UserWarning, "Initializing zero-element tensors is a no-op"):
_ = init.kaiming_uniform_(tensor)
def test_kaiming_normal_warning_on_0element_tensor(self):
tensor = torch.empty(0, 1)
with self.assertWarnsRegex(UserWarning, "Initializing zero-element tensors is a no-op"):
_ = init.kaiming_normal_(tensor)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_kaiming_uniform(self):
for use_a in [True, False]:
for dims in [2, 4]:
for mode in ['fan_in', 'fan_out']:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
if use_a:
a = self._random_float(0.1, 2)
init.kaiming_uniform_(input_tensor, a=a, mode=mode)
else:
a = 0
init.kaiming_uniform_(input_tensor, mode=mode)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
if mode == 'fan_in':
n = fan_in
else:
n = fan_out
expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
bounds = expected_std * math.sqrt(3.0)
assert self._is_uniform(input_tensor, -bounds, bounds)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_kaiming_normal(self):
for use_a in [True, False]:
for dims in [2, 4]:
for mode in ['fan_in', 'fan_out']:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
if use_a:
a = self._random_float(0.1, 2)
init.kaiming_normal_(input_tensor, a=a, mode=mode)
else:
a = 0
init.kaiming_normal_(input_tensor, mode=mode)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
if mode == 'fan_in':
n = fan_in
else:
n = fan_out
expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
assert self._is_normal(input_tensor, 0, expected_std)
def test_sparse_only_works_on_2d_inputs(self):
for dims in [1, 3]:
with self.assertRaises(ValueError):
sparsity = self._random_float(0.1, 0.9)
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.sparse_(tensor, sparsity)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_sparse_default_std(self):
for use_random_std in [True, False]:
input_tensor = self._create_random_nd_tensor(2, size_min=30, size_max=35)
rows, cols = input_tensor.size(0), input_tensor.size(1)
sparsity = self._random_float(0.1, 0.2)
std = 0.01 # default std
if use_random_std:
std = self._random_float(0.01, 0.2)
init.sparse_(input_tensor, sparsity=sparsity, std=std)
else:
init.sparse_(input_tensor, sparsity=sparsity)
for col_idx in range(input_tensor.size(1)):
column = input_tensor[:, col_idx]
assert column[column == 0].nelement() >= math.ceil(sparsity * rows)
assert self._is_normal(input_tensor[input_tensor != 0], 0, std)
@skipIfNoLapack
def test_orthogonal(self):
for use_gain in [True, False]:
for tensor_size in [[3, 4], [4, 3], [20, 2, 3, 4], [2, 3, 4, 5]]:
input_tensor = torch.zeros(tensor_size)
gain = 1.0
if use_gain:
gain = self._random_float(0.1, 2)
init.orthogonal_(input_tensor, gain=gain)
else:
init.orthogonal_(input_tensor)
rows, cols = tensor_size[0], reduce(mul, tensor_size[1:])
flattened_tensor = input_tensor.view(rows, cols)
if rows > cols:
self.assertEqual(torch.mm(flattened_tensor.t(), flattened_tensor),
torch.eye(cols) * gain ** 2, atol=1e-6, rtol=0)
else:
self.assertEqual(torch.mm(flattened_tensor, flattened_tensor.t()),
torch.eye(rows) * gain ** 2, atol=1e-6, rtol=0)
def test_deprecation(self):
x = torch.randn(3, 3)
def fn():
init.normal(x)
with self.assertWarnsRegex(UserWarning, 'deprecated', msg='methods not suffixed with underscore should be deprecated'):
fn()
class TestFusionEval(TestCase):
@given(X=hu.tensor(shapes=((5, 3, 5, 5),)),
running_mean=hu.tensor(shapes=(6,)),
running_var=hu.tensor(shapes=(6,)))
def test_fuse_module_eval_numerics(self, X, running_mean, running_var):
inputs, _ = X
iC, oC = inputs.shape[1], len(running_mean[0])
inputs = torch.from_numpy(inputs).to(torch.double)
kernel_size = (3, 3)
conv_ref = torch.nn.Conv2d(iC, oC, bias=True, kernel_size=kernel_size)
bn_ref = torch.nn.BatchNorm2d(oC)
bn_ref.running_mean = torch.from_numpy(running_mean[0]).to(torch.double)
bn_ref.running_var = torch.from_numpy(running_var[0]).to(torch.double)
conv_ref.eval()
bn_ref.eval()
Y_ref = bn_ref(conv_ref(inputs))
conv_bn_fused = torch.nn.utils.fusion.fuse_conv_bn_eval(conv_ref,
bn_ref)
Y_hat = conv_bn_fused(inputs)
self.assertEqual(Y_ref, Y_hat, msg="Conv+BN fusion results are off")
na_bn_ref = torch.nn.BatchNorm2d(oC, affine=False)
na_bn_ref.running_mean = torch.from_numpy(running_mean[0]).to(torch.double)
na_bn_ref.running_var = torch.from_numpy(running_var[0]).to(torch.double)
na_bn_ref.eval()
Y_ref = na_bn_ref(conv_ref(inputs))
conv_na_bn_fused = torch.nn.utils.fusion.fuse_conv_bn_eval(conv_ref,
na_bn_ref)
Y_hat = conv_na_bn_fused(inputs)
self.assertEqual(Y_ref, Y_hat, msg="Conv+BN(non-affine) fusion results are off")
class TestConstantPadNd(TestCase):
def test_constant_pad_nd(self):
a = torch.tensor([[1, 2], [3, 4]])
res = torch.constant_pad_nd(a, [1, 2, 1, 0], 9)
expected = torch.tensor([
[9, 9, 9, 9, 9],
[9, 1, 2, 9, 9],
[9, 3, 4, 9, 9]
])
self.assertEqual(res, expected)
def test_preserves_memory_format(self):
nchw_tensor = torch.rand((1, 2, 5, 3))
nchw_padded = torch.constant_pad_nd(nchw_tensor, [1, 2], 0.5)
self.assertTrue(nchw_padded.is_contiguous(memory_format=torch.contiguous_format))
nhwc_tensor = nchw_tensor.contiguous(memory_format=torch.channels_last)
nhwc_padded = torch.constant_pad_nd(nhwc_tensor, [1, 2], 0.5)
self.assertTrue(nhwc_padded.is_contiguous(memory_format=torch.channels_last))
class TestAddRelu(TestCase):
def test_add_relu(self):
a = torch.rand((7, 11))
b = torch.rand((7, 11))
a = a.float()
b = b.float()
a = a * -10
a = a + 5
add_res = a + b
relu_res = torch.relu(add_res)
add_relu_res = torch._VF._add_relu(a, b)
self.assertEqual(add_relu_res, relu_res)
def test_add_relu_broadcasting(self):
a = torch.rand((1, 32))
b = 1
b_scalar = torch.ones(1, 32)
res = torch._VF._add_relu(a, b)
broadcasted_res = torch._VF._add_relu(a, b_scalar)
self.assertEqual(broadcasted_res, res)
def add_test(test, decorator=None):
def add(test_name, fn):
if hasattr(TestNN, test_name):
raise RuntimeError('Found two tests with the same name: ' + test_name)
if decorator is not None:
fn = decorator(fn)
setattr(TestNN, test_name, fn)
test_name = test.get_name()
if not hasattr(test, 'test_cpu') or test.test_cpu:
add(test_name, lambda self, test=test: test(self))
cuda_test_name = test_name + '_cuda'
# With dtype enable, it's good enough to test against three floating types
kwargs = {}
if 'extra_args' in get_function_arglist(test.test_cuda):
kwargs['extra_args'] = test.extra_args
if 'dtype' in get_function_arglist(test.test_cuda):
if tf32_is_not_fp32() and test.with_tf32:
def with_tf32_off(self, test=test, kwargs=kwargs):
with tf32_off():
test.test_cuda(self, dtype=torch.float, **kwargs)
add(cuda_test_name + '_fp32', with_tf32_off)
def with_tf32_on(self, test=test, kwargs=kwargs):
with tf32_on(self, test.tf32_precision):
test.test_cuda(self, dtype=torch.float, **kwargs)
add(cuda_test_name + '_tf32', with_tf32_on)
else:
add(cuda_test_name + '_float', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.float, **kwargs))
add(cuda_test_name + '_double', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.double, **kwargs))
def test_half(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.half, **kwargs)
if getattr(test, 'check_half', True):
add(cuda_test_name + '_half', test_half)
def test_bfloat16(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.bfloat16, **kwargs)
if getattr(test, 'check_bfloat16', True):
add(cuda_test_name + '_bfloat16', test_bfloat16)
def test_cfloat(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.cfloat, **kwargs)
def test_cdouble(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.cdouble, **kwargs)
if getattr(test, 'check_complex', False):
add(cuda_test_name + '_cfloat', test_cfloat)
add(cuda_test_name + '_cdouble', test_cdouble)
else:
def with_tf32_off(self, test=test, kwargs=kwargs):
with tf32_off():
test.test_cuda(self, **kwargs)
if tf32_is_not_fp32() and test.with_tf32:
add(cuda_test_name + '_fp32', with_tf32_off)
def with_tf32_on(self, test=test, kwargs=kwargs):
with tf32_on(self, test.tf32_precision):
test.test_cuda(self, **kwargs)
add(cuda_test_name + '_tf32', with_tf32_on)
else:
add(cuda_test_name, with_tf32_off)
for test_params in module_tests + new_module_tests:
# TODO: CUDA is not implemented yet
if 'constructor' not in test_params:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
decorator = test_params.pop('decorator', None)
test = NewModuleTest(**test_params)
add_test(test, decorator)
if 'check_eval' in test_params:
# create a new test that is identical but that sets module.training to False
desc = test_params.get('desc', None)
test_params['desc'] = 'eval' if desc is None else desc + '_eval'
def gen_eval_constructor(constructor):
def eval_constructor(*args, **kwargs):
cons = constructor(*args, **kwargs)
cons.training = False
return cons
eval_constructor.__name__ = constructor.__name__
return eval_constructor
test_params['constructor'] = gen_eval_constructor(test_params['constructor'])
test = NewModuleTest(**test_params)
add_test(test, decorator)
if 'check_with_long_tensor' in test_params:
fullname = test_params.get('fullname', None)
if fullname:
test_params['fullname'] = fullname + '_with_long_tensor'
else:
desc = test_params.get('desc', None)
test_params['desc'] = 'with_long_tensor' if desc is None else desc + '_with_long_tensor'
def double_equivalent_of_long_tensor(size):
return torch.randint(-1000, 1000, size=size).double()
def apply_to_cons(t):
if t.is_floating_point():
if isinstance(t, Parameter):
return Parameter(double_equivalent_of_long_tensor(t.size()))
elif isinstance(t, torch.Tensor):
return double_equivalent_of_long_tensor(t.size())
else:
return t
def gen_long_tensor_constructor(constructor):
def long_tensor_constructor(*args, **kwargs):
cons = constructor(*args, **kwargs)
cons._apply(apply_to_cons)
return cons
long_tensor_constructor.__name__ = constructor.__name__
return long_tensor_constructor
def gen_long_tensor_input(input_size):
def input_func():
return double_equivalent_of_long_tensor(input_size)
return input_func
def reference_fn(i, p, m):
# For bad reasons this would create LongTensors that requires gradients
# Remove requires_grad to avoid this
for p in m.parameters():
p.requires_grad_(False)
m._apply(lambda t: t.long())
input = i.long()
out = m.forward(input)
return out
test_params['constructor'] = gen_long_tensor_constructor(test_params['constructor'])
test_params['input_fn'] = gen_long_tensor_input(test_params['input_size'])
test_params['reference_fn'] = reference_fn
test_params['check_forward_only'] = True
# Currently we don't support conv2d/conv3d for LongTensor in CUDA
test_params['test_cuda'] = False
test = NewModuleTest(**test_params)
add_test(test, decorator)
for test_params in criterion_tests:
if 'constructor' not in test_params:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
test = CriterionTest(**test_params)
decorator = test_params.pop('decorator', None)
add_test(test, decorator)
if 'check_sum_reduction' in test_params:
desc = test_params.get('desc', None)
test_params['desc'] = 'sum_reduction' if desc is None else desc + '_sum_reduction'
def gen_sum_reduction_constructor(constructor):
def sum_reduction_constructor(*args, **kwargs):
cons = constructor(*args, reduction='sum', **kwargs)
return cons
sum_reduction_constructor.__name__ = constructor.__name__
return sum_reduction_constructor
test_params['constructor'] = gen_sum_reduction_constructor(test_params['constructor'])
test = CriterionTest(**test_params)
add_test(test, decorator)
class UnpoolingNet(nn.Module):
def __init__(self, pool, unpool):
super(UnpoolingNet, self).__init__()
self.pool = pool
self.unpool = unpool
def forward(self, input):
return self.unpool(*self.pool(input))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool1d(2, return_indices=True),
nn.MaxUnpool1d(2)),
input_size=(1, 1, 4),
fullname='MaxUnpool1d_net',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool2d(2, return_indices=True),
nn.MaxUnpool2d(2)),
input_size=(1, 1, 2, 4),
fullname='MaxUnpool2d_net',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool3d(2, return_indices=True),
nn.MaxUnpool3d(2)),
input_size=(1, 1, 2, 4, 6),
fullname='MaxUnpool3d_net',
check_gradgrad=False,))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool1d(2, return_indices=True),
nn.MaxUnpool1d(2)),
input_size=(1, 4),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool1d_net_no_batch_dim',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool2d(2, return_indices=True),
nn.MaxUnpool2d(2)),
input_size=(1, 2, 4),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool2d_net_no_batch_dim',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool3d(2, return_indices=True),
nn.MaxUnpool3d(2)),
input_size=(1, 2, 4, 6),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool3d_net_no_batch_dim',
check_gradgrad=False))
class _AdaptiveLogSoftmaxWithLoss(nn.AdaptiveLogSoftmaxWithLoss):
def __call__(self, input):
t = torch.tensor([0, 1, 4, 8]).to(input.device)
return nn.AdaptiveLogSoftmaxWithLoss.__call__(self, input, t).output
add_test(NewModuleTest(
constructor=lambda: _AdaptiveLogSoftmaxWithLoss(16, 10, [2, 6]),
input_size=(4, 16),
fullname='AdaptiveLogSoftmax',
with_tf32=True,
tf32_precision=0.005))
# The following are helpers for TestNN.test_affine_*
if torch.cuda.is_available():
def device_():
return ['cpu', 'cuda']
else:
def device_():
return ['cpu']
def angle_rad_():
return [r * math.pi * 2 for r in [0.0, 0.5, 0.25, 0.125, random.random()]]
def axis_vector_():
t = (random.random(), random.random(), random.random())
l = sum(x ** 2 for x in t) ** 0.5
return [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), tuple(x / l for x in t)]
def input_size2d_():
return [[1, 1, 3, 5], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 3, 4]]
def output_size2d_():
return [[1, 1, 5, 3], [1, 1, 3, 5], [1, 1, 4, 3], [1, 1, 5, 5], [1, 1, 6, 6]]
def input_size2dsq_():
return [[1, 1, 2, 2], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 6, 6]]
def output_size2dsq_():
return [[1, 1, 2, 2], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 5, 5], [1, 1, 6, 6]]
def input_size3d_():
return [[1, 1, 2, 2, 2], [1, 1, 2, 3, 4], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 3, 4, 5]]
def input_size3dsq_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 6, 6, 6]]
def output_size3dsq_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 5, 5, 5], [1, 1, 6, 6, 6]]
def output_size3d_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 3, 4, 5], [1, 1, 4, 3, 2], [1, 1, 5, 5, 5], [1, 1, 6, 6, 6]]
def _buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad):
input_center = [(x - 1) / 2.0 for x in input_size]
output_center = [(x - 1) / 2.0 for x in output_size]
s = math.sin(angle_rad)
c = math.cos(angle_rad)
intrans_ary = np.array([
[1, 0, input_center[2]],
[0, 1, input_center[3]],
[0, 0, 1],
], dtype=np.float64)
inscale_ary = np.array([
[input_center[2], 0, 0],
[0, input_center[3], 0],
[0, 0, 1],
], dtype=np.float64)
rotation_ary = np.array([
[c, -s, 0],
[s, c, 0],
[0, 0, 1],
], dtype=np.float64)
outscale_ary = np.array([
[1.0 / output_center[2], 0, 0],
[0, 1.0 / output_center[3], 0],
[0, 0, 1],
], dtype=np.float64)
outtrans_ary = np.array([
[1, 0, -output_center[2]],
[0, 1, -output_center[3]],
[0, 0, 1],
], dtype=np.float64)
reorder_ary = np.array([
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
], dtype=np.float64)
transform_ary = np.dot(np.dot(np.dot(np.dot(
intrans_ary,
inscale_ary),
rotation_ary.T),
outscale_ary),
outtrans_ary)
grid_ary = np.dot(np.dot(np.dot(reorder_ary, rotation_ary.T), outscale_ary), outtrans_ary)
transform_tensor = torch.from_numpy((rotation_ary)).to(device, torch.float32)
transform_tensor = transform_tensor[:2].unsqueeze(0)
return transform_tensor, transform_ary, grid_ary
def _buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_rad, axis_vector):
input_center = [(x - 1) / 2.0 for x in input_size]
output_center = [(x - 1) / 2.0 for x in output_size]
s = math.sin(angle_rad)
c = math.cos(angle_rad)
c1 = 1 - c
intrans_ary = np.array([
[1, 0, 0, input_center[2]],
[0, 1, 0, input_center[3]],
[0, 0, 1, input_center[4]],
[0, 0, 0, 1],
], dtype=np.float64)
inscale_ary = np.array([
[input_center[2], 0, 0, 0],
[0, input_center[3], 0, 0],
[0, 0, input_center[4], 0],
[0, 0, 0, 1],
], dtype=np.float64)
l, m, n = axis_vector
scipyRotation_ary = np.array([
[l * l * c1 + c, m * l * c1 - n * s, n * l * c1 + m * s, 0],
[l * m * c1 + n * s, m * m * c1 + c, n * m * c1 - l * s, 0],
[l * n * c1 - m * s, m * n * c1 + l * s, n * n * c1 + c, 0],
[0, 0, 0, 1],
], dtype=np.float64)
z, y, x = axis_vector
torchRotation_ary = np.array([
[x * x * c1 + c, y * x * c1 - z * s, z * x * c1 + y * s, 0],
[x * y * c1 + z * s, y * y * c1 + c, z * y * c1 - x * s, 0],
[x * z * c1 - y * s, y * z * c1 + x * s, z * z * c1 + c, 0],
[0, 0, 0, 1],
], dtype=np.float64)
outscale_ary = np.array([
[1.0 / output_center[2], 0, 0, 0],
[0, 1.0 / output_center[3], 0, 0],
[0, 0, 1.0 / output_center[4], 0],
[0, 0, 0, 1],
], dtype=np.float64)
outtrans_ary = np.array([
[1, 0, 0, -output_center[2]],
[0, 1, 0, -output_center[3]],
[0, 0, 1, -output_center[4]],
[0, 0, 0, 1],
], dtype=np.float64)
reorder_ary = np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
], dtype=np.float64)
transform_ary = np.dot(np.dot(np.dot(np.dot(
intrans_ary,
inscale_ary),
np.linalg.inv(scipyRotation_ary)),
outscale_ary),
outtrans_ary)
grid_ary = np.dot(np.dot(np.dot(reorder_ary, np.linalg.inv(scipyRotation_ary)), outscale_ary), outtrans_ary)
transform_tensor = torch.from_numpy((torchRotation_ary)).to(device, torch.float32)
transform_tensor = transform_tensor[:3].unsqueeze(0)
return transform_tensor, transform_ary, grid_ary
# end TestNN.test_affine_* helpers
class TestNNDeviceType(NNTestCase):
def run_conv_double_back_test(self, kern, stride, padding, chan_in, chan_out, batch_size,
inp_size, dilation, no_weight, groups=1, use_cuda=False,
use_bias=True, dtype=torch.double):
if use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
x = torch.randn(batch_size, chan_in, inp_size, inp_size, device=device,
dtype=dtype, requires_grad=True)
weight = torch.randn(chan_out, chan_in // groups, kern, kern, device=device,
dtype=dtype, requires_grad=not no_weight)
if use_bias:
bias = torch.randn(chan_out, device=device, dtype=dtype, requires_grad=True)
else:
bias = None
def func(*inputs):
if use_bias:
lx, lweight, lbias = inputs
else:
lx, lweight = inputs
lbias = None
# We disable cudnn during forward to avoid finite difference imprecision issues
with cudnn.flags(enabled=False):
out = F.conv2d(lx, lweight, lbias, stride, padding, dilation, groups)
return out
if use_bias:
inputs = x, weight, bias
else:
inputs = x, weight
dummy_out = func(*inputs)
grad_y = torch.randn_like(dummy_out, device=device, dtype=dtype, requires_grad=True)
# Issue #15353: test mkldnn double backward, don't run gradgradcheck due
# to imprecision issues
if dtype == torch.float:
g, = torch.autograd.grad(dummy_out.sum(), x, create_graph=True)
return g.requires_grad
return gradgradcheck(func, inputs, (grad_y,))
def _test_dropout(self, cls, device, input, memory_format=torch.contiguous_format):
p = 0.2
input = input.to(device).fill_(1 - p)
module = cls(p)
input_var = input.clone(memory_format=memory_format).requires_grad_()
output = module(input_var)
self.assertTrue(output.is_contiguous(memory_format=memory_format))
self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)
output.backward(input)
self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))
self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)
module = cls(p, True)
input_var = input.clone(memory_format=memory_format).requires_grad_()
output = module(input_var + 0)
self.assertTrue(output.is_contiguous(memory_format=memory_format))
self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)
output.backward(input)
self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))
self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)
# check eval mode doesn't change anything
for inplace in [True, False]:
module = cls(p, inplace).eval()
self.assertEqual(input, module(input))
# Check that these don't raise errors
module.__repr__()
str(module)
def _test_dropout_discontiguous(self, cls, device, memory_format=torch.contiguous_format):
# In this test, we verify that dropout preserves the layout and data for different memory formats.
# We check whether, we get same values for the output of dropout, when the probability
# of dropout is 0 or very close to 0.
# Reference: https://github.com/pytorch/pytorch/issues/47176
close_to_zero_p = 1e-10 # Should be almost zero but not zero, as for p=0 different path is taken
for p in [0, close_to_zero_p]:
inp = torch.ones(2, 3, 3, 3, device=device)
inp_discontiguous = torch.empty(2, 3, 3, 6, device=device, memory_format=memory_format)[..., ::2]
inp_discontiguous.copy_(inp)
mod = cls(p=p)
out = mod(inp_discontiguous)
if p != 0: # Zero will keep strides as is based on input.
# When prob == 0, input stride (54, 18, 6, 2) -> output stride (54, 18, 6, 2)
# When prob != 0, input stride (54, 18, 6, 2) -> output stride (27, 9, 3, 1)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(inp_discontiguous, out)
def _test_dropout_stride_mean_preserve(self, cls, device):
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2], d[3])
inp = torch.ones(2, 3, 4, 5, device=device)
shifts = [(0, 0), (1, 0), (0, 1), (1, 1)]
for perm in itertools.permutations((0, 1, 2, 3), r=4):
for shift in shifts:
for p in [1e-10, 0.3, 0.5, 0.7]:
mod = cls(p=p)
permuted_inp = inp.permute(perm).contiguous().permute(invert_perm(perm))
permuted_inp = permuted_inp[shift[0]:, shift[1]:, :, :]
out = mod(permuted_inp)
self.assertTrue(out.permute(perm).is_contiguous())
self.assertEqual(inp.mean(), out.mean(), rtol=0.5, atol=0.5)
if p == 1e-10:
self.assertEqual(permuted_inp, out)
else:
self.assertNotEqual(permuted_inp, out)
def _test_InstanceNorm_general(self, cls, input, device, dtype=torch.float):
# default case track_running_stats=False
b, c = input.size(0), input.size(1)
input_var = input.to(device=device, dtype=dtype).requires_grad_()
IN = cls(c, eps=0).to(device, dtype)
output = IN(input_var)
out_reshaped = output.view(b * c, -1)
mean = out_reshaped.mean(1)
var = out_reshaped.var(1, unbiased=False)
self.assertEqual(torch.abs(mean.data).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), 1, atol=1e-5, rtol=0)
# check that eval mode doesn't change behavior
grad_out = torch.randn_like(output)
res1 = output.data.clone()
output.backward(grad_out)
grad1 = input_var.grad.data.clone()
IN.eval()
output = IN(input_var)
input_var.grad = None
output.backward(grad_out)
res2 = output.data
grad2 = input_var.grad.data
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
# If track_running_stats=True and momentum=1, running_mean/var should be
# equal to mean/var of the input (with unbias correction)
IN = cls(c, momentum=1, eps=0, track_running_stats=True).to(device, dtype)
output = IN(input_var)
input_reshaped = input_var.transpose(1, 0).reshape(c, -1)
mean = input_reshaped.mean(1)
input_reshaped = input_var.transpose(1, 0).reshape(c, b, -1)
var = input_reshaped.var(2, unbiased=True)[:, :]
self.assertEqual(torch.abs(mean.data - IN.running_mean).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var.data.mean(1) - IN.running_var).mean(), 0, atol=1e-5, rtol=0)
# in eval mode, adding X * std to a channel in input should make the
# corresponding channel in output have mean X
IN.eval()
delta = IN.running_var.sqrt() * torch.arange(c, device=device, dtype=dtype)
delta = delta.view(-1, *[1 for _ in range(2, input.dim())])
output = IN(input_var + delta)
self.assertEqual(output.transpose(0, 1).reshape(c, -1).mean(1), torch.arange(c, dtype=dtype))
def _test_InstanceNorm_cuda_half(self, cls, input, device):
# THNN
input = input.to(device=device, dtype=torch.half).random_(1, 10).requires_grad_(True)
m = cls(input.size(1), affine=True, track_running_stats=True).to(device, torch.half)
thnn_output = m(input)
thnn_output.sum().backward()
thnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(thnn_output, input)
# cuDNN
if TEST_CUDNN:
input.grad = None
m = m.float()
cudnn_output = m(input)
cudnn_output.sum().backward()
cudnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(cudnn_output, input)
self.assertEqual(cudnn_output, thnn_output, atol=1e-4, rtol=0)
self.assertEqual(cudnn_input_grad, thnn_input_grad, atol=1e-3, rtol=0)
def _test_LayerNorm_general(self, device, dtype=torch.float):
for i in range(2, 6):
shape = torch.randint(3, 6, (i,), dtype=torch.long).tolist()
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
normalized_ndim = random.randint(1, i - 1) # inclusive
normalized_shape = shape[-normalized_ndim:]
unnormalized_shape = shape[:-normalized_ndim]
# test that LN normalizes to mean 0 and stddev 1
ln = nn.LayerNorm(normalized_shape, eps=0).to(device, dtype)
ln.weight.data.fill_(1)
ln.bias.data.fill_(0)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
delta = 1e-1 if dtype == torch.bfloat16 else 1e-5
self.assertEqual(torch.abs(mean.data).mean(), 0, atol=delta, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), 1, atol=delta, rtol=0)
# test that LN applies weight and bias correctly
scale, bias = torch.empty(2).uniform_(0.2, 2).tolist()
ln.weight.data.fill_(scale)
ln.bias.data.fill_(bias)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertEqual(torch.abs(mean.data).mean(), bias, atol=delta, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), scale ** 2, atol=delta, rtol=0)
bad_norm_shape_input_shape = {
(): (),
(2, 3): (3,),
(2,): (1, 2, 3),
(10,): (2, 3),
10: (2, 3),
}
for norm_shape, input_shape in bad_norm_shape_input_shape.items():
ln = nn.LayerNorm(norm_shape)
input = torch.empty(input_shape, device=device, dtype=dtype).uniform_(0, 10)
self.assertRaises(RuntimeError, lambda: ln(input))
def _test_LayerNorm_cuda_half(self, device):
input = torch.empty(2, 3, 3, 2, device=device, dtype=torch.half).random_(1, 10).requires_grad_(True)
m = nn.LayerNorm([3, 2]).to(device, torch.half)
output = m(input)
output.sum().backward()
self.assertEqualTypeString(output, input)
def _test_GroupNorm_general(self, device, dtype=torch.float):
good_shape_g = {
(1, 2, 3, 4): 2,
(2, 3, 10): 3,
(3, 1, 1, 1, 2): 1,
(2, 6, 4, 2, 2): 3,
(1, 256, 1, 1): 32,
}
for shape_g, grad in product(good_shape_g.items(), [True, False]):
shape, g = shape_g
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
x.requires_grad_(grad)
b = shape[0]
c = shape[1]
# test that GN normalizes to mean 0 and stddev 1
gn = nn.GroupNorm(g, c, eps=0).to(device, dtype)
gn.weight.data.fill_(1)
gn.bias.data.fill_(0)
output = gn(x)
out_reshaped = output.view(b, g, -1)
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
# TODO: fix numerical issue. See #44863
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-3, rtol=1e-3)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-3, rtol=1e-3)
output.backward(torch.randn_like(output))
if output.is_cuda:
torch.cuda.synchronize()
# test that GN applies weight and bias correctly
scale = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
bias = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
gn.weight.data.copy_(scale)
gn.bias.data.copy_(bias)
output = gn(x)
out_reshaped = output.view(b, c, -1)
out_normed = (out_reshaped - bias.view(c, 1)) / scale.view(c, 1)
out_normed_reshaped = out_normed.view(b, g, -1)
mean = out_normed_reshaped.mean(-1)
var = out_normed_reshaped.var(-1, unbiased=False)
# TODO: fix numerical issue. See #44863
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-3, rtol=1e-3)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-3, rtol=1e-3)
bad_shape_g = {
(1, 2, 3, 4): 3,
(2, 3, 10): 2,
(3, 1, 1, 1, 2): 10,
(2, 6, 4, 2, 2): 4,
}
for shape, g in bad_shape_g.items():
gn = nn.GroupNorm(g, shape[1])
input = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
self.assertRaises(RuntimeError, lambda: gn(input))
def _test_GroupNorm_cuda_half(self):
input = torch.zeros(2, 4, 3, 2, requires_grad=True).cuda().half().random_(1, 10)
m = nn.GroupNorm(2, 4).to("cuda", torch.half)
output = m(input)
output.sum().backward()
self.assertEqualTypeString(output, input)
def _test_module_empty_input(self, module, inp, check_size=True):
inp.requires_grad_(True)
out = module(inp)
gO = torch.rand_like(out)
out.backward(gO)
if check_size:
self.assertEqual(out.size(), inp.size())
for p in module.parameters():
if p.requires_grad:
self.assertEqual(p.grad, torch.zeros_like(p.grad))
self.assertEqual(inp.grad, torch.zeros_like(inp))
def _test_module_empty_inputs(self, module, inputs):
for _inp in inputs:
_inp.requires_grad_(True)
out = module(*inputs)
gO = torch.rand_like(out)
out.backward(gO)
for p in module.parameters():
if p.requires_grad:
self.assertEqual(p.grad, torch.zeros_like(p.grad))
for _inp in inputs:
self.assertEqual(_inp.grad, torch.zeros_like(_inp))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off()
def test_affine_2d_rotate0(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
input_size = [1, 1, 3, 3]
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = [1, 1, 5, 5]
angle_rad = 0.
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary.mean(), gridsample_ary.mean())
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.001)
def test_affine_2d_rotate90(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
for input_size2dsq, output_size2dsq in \
itertools.product(input_size2dsq_(), output_size2dsq_()):
input_size = input_size2dsq
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = output_size2dsq
angle_rad = 0.25 * math.pi * 2
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=True))
if input_size2dsq == output_size2dsq:
self.assertEqual(scipy_ary.mean(), input_ary.mean())
self.assertEqual(scipy_ary[0, 0], input_ary[0, 0, 0, -1])
self.assertEqual(scipy_ary[0, -1], input_ary[0, 0, -1, -1])
self.assertEqual(scipy_ary[-1, -1], input_ary[0, 0, -1, 0])
self.assertEqual(scipy_ary[-1, 0], input_ary[0, 0, 0, 0])
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary.mean(), gridsample_ary.mean())
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_2d_rotate45(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
input_size = [1, 1, 3, 3]
input_ary = np.array(np.zeros(input_size), dtype=np.float32)
input_ary[0, 0, 0, :] = 0.5
input_ary[0, 0, 2, 2] = 1.0
output_size = [1, 1, 3, 3]
angle_rad = 0.125 * math.pi * 2
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_2d_rotateRandom(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
for angle_rad, input_size2d, output_size2d in \
itertools.product(angle_rad_(), input_size2d_(), output_size2d_()):
input_size = input_size2d
input_ary = np.array(np.random.random(input_size), dtype=np.float32).round(3)
output_size = output_size2d
input_ary[0, 0, 0, 0] = 2
input_ary[0, 0, 0, -1] = 4
input_ary[0, 0, -1, 0] = 6
input_ary[0, 0, -1, -1] = 8
transform_tensor, transform_ary, grid_ary = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
affine_tensor = affine_tensor.to('cpu')
for r in range(affine_tensor.size(1)):
for c in range(affine_tensor.size(2)):
grid_out = np.dot(grid_ary, [r, c, 1])
self.assertEqual(affine_tensor[0, r, c], grid_out[:2], exact_dtype=False)
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_3d_rotateRandom(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
for angle_rad, axis_vector, input_size3d, output_size3d in \
itertools.product(angle_rad_(), axis_vector_(), input_size3d_(), output_size3d_()):
input_size = input_size3d
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = output_size3d
input_ary[0, 0, 0, 0, 0] = 2
input_ary[0, 0, 0, 0, -1] = 3
input_ary[0, 0, 0, -1, 0] = 4
input_ary[0, 0, 0, -1, -1] = 5
input_ary[0, 0, -1, 0, 0] = 6
input_ary[0, 0, -1, 0, -1] = 7
input_ary[0, 0, -1, -1, 0] = 8
input_ary[0, 0, -1, -1, -1] = 9
transform_tensor, transform_ary, grid_ary = \
_buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_rad, axis_vector)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
affine_tensor = affine_tensor.to('cpu')
for i in range(affine_tensor.size(1)):
for r in range(affine_tensor.size(2)):
for c in range(affine_tensor.size(3)):
grid_out = np.dot(grid_ary, [i, r, c, 1])
self.assertEqual(affine_tensor[0, i, r, c], grid_out[:3], exact_dtype=False)
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@onlyCUDA
@skipCUDAIfNoCudnn
@dtypes(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
def test_Conv2d_deterministic_cudnn(self, device, dtype):
inputs = torch.randn(2, 3, 5, 5, device=device, dtype=dtype, requires_grad=True)
with cudnn.flags(enabled=True, benchmark=True, deterministic=True):
conv1 = torch.nn.Conv2d(3, 3, 3).to(device, dtype)
conv2 = torch.nn.Conv2d(3, 3, 3).to(device, dtype)
conv2.bias.data.copy_(conv1.bias.data)
conv2.weight.data.copy_(conv1.weight.data)
out1 = conv1(inputs)
out2 = conv2(inputs)
self.assertEqual(out1, out2, atol=0.0, rtol=0)
y = torch.randn(out1.size(), device=device, dtype=dtype)
out1.backward(y)
out2.backward(y)
self.assertEqual(conv1.bias.grad.data, conv2.bias.grad.data, atol=0.0, rtol=0)
self.assertEqual(conv1.weight.grad.data, conv2.weight.grad.data, atol=0.0, rtol=0)
@onlyCUDA
@dtypes(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
def test_Conv2d_large_workspace(self, device, dtype):
# These sizes require huge cuDNN workspaces. Make sure we choose a
# reasonable algorithm that does not run out of memory
sizes = [
(1, 256, 109, 175),
(1, 256, 80, 128),
(1, 256, 120, 192),
]
def run_test(benchmark):
with torch.backends.cudnn.flags(benchmark=benchmark):
conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).to(device, dtype)
for size in sizes:
x = torch.randn(size, device=device, dtype=dtype)
out = conv(x.detach().clone().requires_grad_())
out.backward(torch.ones_like(out))
run_test(benchmark=False)
run_test(benchmark=True)
@onlyCUDA
@dtypes(torch.half, torch.float)
def test_ConvTranspose2d_large_output_padding(self, device, dtype):
net1 = torch.nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device=device, dtype=dtype)
net2 = torch.nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device=device, dtype=dtype)
net3 = torch.nn.ConvTranspose2d(32, 3, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device=device, dtype=dtype)
x = torch.rand(1, 128, 6, 6, device=device, dtype=dtype, requires_grad=True)
x = net1(x)
x = net2(x)
x = net3(x)
x.backward(torch.randn_like(x))
torch.cuda.synchronize()
@onlyCUDA
@tf32_on_and_off(0.01)
@dtypes(torch.float, torch.double, torch.half)
# Very similar to test_Conv2d_naive_groups but with special care to handle
# the number of groups == number of input channels
def test_Conv2d_depthwise_naive_groups(self, device, dtype):
for depth_multiplier in [1, 2]:
m = nn.Conv2d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 2, 6, 6, device="cuda", dtype=dtype).div_(2).requires_grad_()
output = m(i)
grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, device=device, dtype=dtype) / 2
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = i.detach()[:, :1].clone().requires_grad_()
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = i.detach()[:, 1:].clone().requires_grad_()
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data,
m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data,
m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@dtypes(torch.float, torch.double, torch.half)
@tf32_on_and_off(0.005)
def test_Conv3d_depthwise_naive_groups(self, device, dtype):
for depth_multiplier in [1, 2]:
m = nn.Conv3d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 2, 6, 6, 6, device="cuda", dtype=dtype).div_(2).requires_grad_()
output = m(i)
grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, 4, device=device, dtype=dtype) / 2
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = i.detach()[:, :1].clone().requires_grad_()
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = i.detach()[:, 1:].clone().requires_grad_()
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data,
m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data,
m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@dtypes(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
def test_noncontig_conv_grad(self, device, dtype):
# FIXME: remove after adding non-contiguous grad tests for all modules
module = nn.Conv2d(3, 5, kernel_size=3, padding=1).to(device, dtype)
input = torch.randn(2, 3, 10, 10, dtype=dtype, device=device, requires_grad=True)
output = module(input)
grad = torch.randn(2, 2, 5, 10, 10, dtype=dtype, device=device)[:, 1]
assert not grad.is_contiguous()
output.backward(grad, retain_graph=True)
self.assertIsNotNone(input.grad)
result = input.grad.data.clone()
input.grad.data.zero_()
output.backward(grad.contiguous())
self.assertEqual(result, input.grad.data, atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@dtypes(torch.float, torch.half)
def test_batchnorm_large_batch(self, device, dtype):
bn = nn.BatchNorm2d(1).to(device, dtype)
data = torch.rand(880801, 1, 1, 1, device=device, dtype=dtype)
out = bn(data).sum().backward()
@onlyCUDA
@dtypes(torch.double)
def test_conv_double_backward(self, device, dtype):
with torch.backends.cudnn.flags(deterministic=True):
# Double backward only runs with DoubleTensor due to precision reason
batch_size = 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 9, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([1], [2], [2], [3], dilations):
no_weight = stride == 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_cuda=True, dtype=dtype)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))
def test_conv_double_backward_no_bias(self):
kern = 3
stride = 2
chan_in, chan_out = 2, 4
batch_size = 2
inp_size = 5
padding = 1
dilation = 1
no_weight = False
use_bias = True
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_bias=use_bias)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))
def test_conv_double_backward_groups(self):
kern = 3
stride = 1
padding = 2
chan_in, chan_out = 2, 4
batch_size = 2
inp_size = 6
dilation = 1
no_weight = False
groups = 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in * groups, chan_out * groups,
batch_size, inp_size, dilation,
no_weight, groups=groups)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation) +
"\ngroups: " + str(groups))
def test_conv_double_backward_stride(self):
batch_size = 2
# Cannot provide ggW when stride is > 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([2], [0, 1], [1], [2], dilations):
no_weight = False
self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight)
def test_conv1d_same_padding(self, device):
# Test padding='same' outputs the correct shape
test_args = [
# in_size
range(50, 55),
# kernel_size
[1, 2, 3, 8],
# dilation
range(1, 4),
# stride
[1],
]
for in_size, k_size, dilation, stride in itertools.product(*test_args):
x = torch.rand(1, 1, in_size, device=device)
y = torch.rand(1, 1, k_size, device=device)
z = F.conv1d(x, y, padding='same', dilation=dilation, stride=stride)
self.assertEqual(z.size(2), int(math.ceil(in_size / stride)))
# Compare F.conv1d padding='same' output against manual padding
# Without strides/dilation
x = torch.rand(1, 1, 12, device=device)
y = torch.rand(1, 1, 3, device=device)
expect = F.conv1d(x, y, padding=1)
actual = F.conv1d(x, y, padding='same')
self.assertEqual(expect, actual)
# With dilation
x = torch.rand(1, 1, 12, device=device)
y = torch.rand(1, 1, 4, device=device)
expect = F.conv1d(x, y, padding=3, dilation=2)
actual = F.conv1d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual)
# Dilation with asymmetric padding
expect = F.conv1d(x, y, padding=5, dilation=3)[..., 1:]
actual = F.conv1d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual)
def test_conv2d_same_padding(self, device):
# Compare F.conv2d padding='same' output against manual padding
# Without strides/dilation
x = torch.rand(1, 1, 10, 11, device=device)
y = torch.rand(1, 1, 4, 5, device=device)
expect = F.conv2d(x, y, padding=(2, 2))[..., 1:, :]
actual = F.conv2d(x, y, padding='same')
self.assertEqual(expect, actual)
# With dilation
y = torch.rand(1, 1, 3, 4, device=device)
expect = F.conv2d(x, y, padding=(2, 3), dilation=2)
actual = F.conv2d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual)
# Dilation with asymmetric padding
y = torch.rand(1, 1, 4, 4, device=device)
expect = F.conv2d(x, y, padding=5, dilation=3)[..., 1:, 1:]
actual = F.conv2d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual)
def test_conv3d_same_padding(self, device):
# Compare F.conv3d padding='same' output against manual padding
# Without strides/dilation
x = torch.rand(1, 1, 10, 11, 12, device=device)
y = torch.rand(1, 1, 1, 2, 5, device=device)
expect = F.conv3d(x, y, padding=(0, 1, 2))[..., :, 1:, :]
actual = F.conv3d(x, y, padding='same')
self.assertEqual(expect, actual)
# With dilation
expect = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)
actual = F.conv3d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual)
# Dilation with asymmetric padding
y = torch.rand(1, 1, 4, 4, 4, device=device)
expect = F.conv3d(x, y, padding=5, dilation=3)[..., 1:, 1:, 1:]
actual = F.conv3d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual)
def test_conv1d_valid_padding(self, device):
# Test F.conv1d padding='valid' is the same as no padding
x = torch.rand(1, 1, 10, device=device)
y = torch.rand(1, 1, 4, device=device)
expect = F.conv1d(x, y)
actual = F.conv1d(x, y, padding='valid')
self.assertEqual(expect, actual)
def test_conv2d_valid_padding(self, device):
# Test F.conv2d padding='valid' is the same as no padding
x = torch.rand(1, 1, 1, 10, device=device)
y = torch.rand(1, 1, 1, 4, device=device)
expect = F.conv2d(x, y)
actual = F.conv2d(x, y, padding='valid')
self.assertEqual(expect, actual)
def test_conv3d_valid_padding(self, device):
# Test F.conv3d padding='valid' is the same as no padding
x = torch.rand(1, 1, 1, 1, 10, device=device)
y = torch.rand(1, 1, 1, 1, 4, device=device)
expect = F.conv3d(x, y)
actual = F.conv3d(x, y, padding='valid')
self.assertEqual(expect, actual)
def test_conv1d_same_padding_backward(self, device):
# Test F.conv1d gradients work with padding='same'
x = torch.rand(1, 1, 12, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, device=device, requires_grad=True)
# Symmetric padding
z = F.conv1d(x, y, padding=3, dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
# Asymmetric padding
z = F.conv1d(x, y, padding=2)[..., 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
def test_conv2d_same_padding_backward(self, device):
# Test F.conv2d gradients work with padding='same'
x = torch.rand(1, 1, 10, 11, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, 5, device=device, requires_grad=True)
# Symmetric padding
z = F.conv2d(x, y, padding=(3, 4), dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv2d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
# Asymmetric padding
y = torch.rand(1, 1, 4, 4, device=device, requires_grad=True)
z = F.conv2d(x, y, padding=2)[..., 1:, 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv2d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
def test_conv3d_same_padding_backward(self, device):
check_forward_ad = torch.device(device).type != 'xla'
# Test F.conv3d gradients work with padding='same'
x = torch.rand(1, 1, 1, 11, 12, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 2, 5, device=device, requires_grad=True)
# Symmetric padding
z = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv3d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
gradcheck(lambda x, y: F.conv3d(x, y, padding='same', dilation=2), (x, y),
check_forward_ad=check_forward_ad, nondet_tol=1e-5)
if torch.device(device).type != 'cuda':
# https://github.com/pytorch/pytorch/issues/70702
gradgradcheck(lambda x, y: F.conv3d(x, y, padding='same', dilation=2), (x, y),
check_fwd_over_rev=True)
# Asymmetric padding
y = torch.rand(1, 1, 1, 4, 4, device=device, requires_grad=True)
z = F.conv3d(x, y, padding=2)[..., 1:, 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv3d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
gradcheck(lambda x, y: F.conv3d(x, y, padding='same'), (x, y),
check_forward_ad=check_forward_ad, nondet_tol=1e-5)
if torch.device(device).type != 'cuda':
# https://github.com/pytorch/pytorch/issues/70702
gradgradcheck(lambda x, y: F.conv3d(x, y, padding='same'), (x, y),
check_fwd_over_rev=True)
def test_conv1d_valid_padding_backward(self, device):
# Test F.conv1d gradients work with padding='valid'
x = torch.rand(1, 1, 10, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, device=device, requires_grad=True)
F.conv1d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv1d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
def test_conv2d_valid_padding_backward(self, device):
# Test F.conv2d gradients work with padding='valid'
x = torch.rand(1, 1, 1, 10, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 4, device=device, requires_grad=True)
F.conv2d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv2d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
def test_conv3d_valid_padding_backward(self, device):
check_forward_ad = torch.device(device).type != 'xla'
# Test F.conv3d gradients work with padding='valid'
x = torch.rand(1, 1, 1, 1, 10, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 1, 4, device=device, requires_grad=True)
F.conv3d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv3d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
gradcheck(lambda x, y: F.conv3d(x, y, padding='valid'), (x, y), check_forward_ad=check_forward_ad)
gradgradcheck(lambda x, y: F.conv3d(x, y, padding='valid'), (x, y), check_fwd_over_rev=check_forward_ad)
@skipMeta
@parametrize_test("input_shape,transposed,dilated,groups,layout,backend_expected", [
# === slow ===
subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Slow2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d'),
subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_transposed'),
subtest(((2, 6, 7), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_dilated'),
subtest(((2, 6, 7), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_dilated_transposed'),
subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Slow2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d'),
subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_transposed'),
subtest(((2, 6, 7, 8), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_dilated'),
subtest(((2, 6, 7, 8), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_dilated_transposed'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Slow3d),
decorators=[onlyCPU, disableMkldnn], name='slow3d_cpu'),
# CUDA doesn't have a slow 3D implementation, so it goes to the dilated 3D implementation instead
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.SlowDilated3d),
decorators=[onlyCUDA, disablecuDNN], name='slow3d_cuda'),
subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose3d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_transposed'),
subtest(((2, 6, 7, 8, 9), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated3d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_dilated'),
subtest(((2, 6, 7, 8, 9), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose3d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_dilated_transposed'),
subtest(((0, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch1d'),
subtest(((2, 0, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel1d'),
subtest(((0, 0, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel1d'),
subtest(((0, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch2d'),
subtest(((2, 0, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel2d'),
subtest(((0, 0, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel2d'),
subtest(((0, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch3d'),
subtest(((2, 0, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel3d'),
subtest(((0, 0, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel3d'),
# === cuda ===
# Note that disablecuDNN disables miopen as well.
subtest(((2, 6, 7), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise2d),
decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise1d'),
subtest(((2, 6, 7, 8), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise2d),
decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise2d'),
subtest(((2, 6, 7, 8, 9), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise3d),
decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise3d'),
# === cudnn ===
subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn1d'),
subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn2d'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn3d'),
subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn1d_transposed'),
subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn2d_transposed'),
subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn3d_transposed'),
# === miopen ===
subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen1d'),
subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen2d'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen3d'),
subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen1d_transposed'),
subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen2d_transposed'),
subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen3d_transposed'),
subtest(((2, 6, 7), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise1d'),
subtest(((2, 6, 7, 8), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise2d'),
subtest(((2, 6, 7, 8, 9), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise3d'),
# === mkldnn ===
subtest(((2, 6, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn1d'),
subtest(((2, 6, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn2d'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn3d'),
# Transposed convolution is broken for mkldnn. See https://github.com/pytorch/pytorch/issues/68775.
subtest(((2, 6, 7), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn1d_transposed'),
subtest(((2, 6, 7, 8), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn2d_transposed'),
subtest(((2, 6, 7, 8, 9), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn3d_transposed'),
subtest(((2, 6, 7), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn1d_cpu_input'),
subtest(((2, 6, 7, 8), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn2d_cpu_input'),
subtest(((2, 6, 7, 8, 9), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn3d_cpu_input'),
subtest(((0, 6, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch1d'),
subtest(((2, 0, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel1d'),
subtest(((0, 0, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel1d'),
subtest(((0, 6, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch2d'),
subtest(((2, 0, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel2d'),
subtest(((0, 0, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel2d'),
subtest(((0, 6, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch3d'),
subtest(((2, 0, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel3d'),
subtest(((0, 0, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel3d'),
# Note: Tests for mobile backends are not currently supported. This comprises
# NnpackSpatial, Winograd3x3Depthwise, and Xnnpack2d backends. Testing these
# requires the ability to gate tests by whether PyTorch is built with USE_MOBILE=1.
])
# Test with both bias and no bias.
@parametrize_test("has_bias", [False, True])
# Test with both stride=1 and stride>1 cases.
@parametrize_test("strided", [False, True])
# Test with both contiguous and non-contiguous inputs.
@parametrize_test("contiguous", [False, True])
def test_conv_backend(
self, device, input_shape, has_bias, strided, contiguous, transposed, dilated, groups,
layout, backend_expected):
# Build up inputs.
dtype = torch.float32
C_in, C_out, dim, kernel_size = input_shape[1], 12, len(input_shape) - 2, 3
x = torch.randn(*input_shape, device=device, dtype=dtype, requires_grad=True)
weight = torch.randn(C_in if transposed else C_out,
C_out // groups if transposed else C_in // groups,
*[kernel_size for _ in range(dim)],
device=device, dtype=dtype, requires_grad=True)
bias = torch.randn(C_out, device=device, dtype=dtype, requires_grad=True) if has_bias else None
def _make_noncontiguous(inp):
if inp is None:
return None
old_requires_grad = inp.requires_grad
inp = torch.repeat_interleave(inp, 2, dim=-1)
inp = inp[..., ::2].detach().requires_grad_(old_requires_grad)
return inp
if not contiguous:
x = _make_noncontiguous(x)
weight = _make_noncontiguous(weight)
bias = _make_noncontiguous(bias)
if layout is torch._mkldnn:
x = x.to_mkldnn()
# Note that weight and bias are not supported as mkldnn tensors during training.
stride = (2,) * dim if strided else (1,) * dim
padding = (0,) * dim
dilation = (2,) * dim if dilated else (1,) * dim
output_padding = (0,) * dim
inputs = [x, weight, bias, stride, padding, dilation, transposed, output_padding, groups]
# Ensure correct backend is selected.
backend_actual = torch._C._select_conv_backend(*inputs)
self.assertEqual(backend_actual, backend_expected)
# Ensure backward call succeeds.
convolution = torch.ops.aten.convolution
output = convolution(*inputs)
grad_output = torch.randn(output.shape, device=device, dtype=dtype)
if not contiguous:
grad_output = _make_noncontiguous(grad_output)
if layout is torch._mkldnn:
grad_output = grad_output.to_mkldnn()
output.backward(grad_output)
# mkldnn doesn't support gradcheck :(
if layout is torch._mkldnn:
return
# Convert to float64 for gradcheck.
x = x.to(torch.float64).detach().requires_grad_(True)
weight = weight.to(torch.float64).detach().requires_grad_(True)
if bias is not None:
bias = bias.to(torch.float64).detach().requires_grad_(True)
inputs = [x, weight, bias, stride, padding, dilation, transposed, output_padding, groups]
# Set some backend-specific validation settings.
gradcheck_nondet_tol = 0.0
if torch.backends.cudnn.is_available():
# cuDNN introduces non-determinism
gradcheck_nondet_tol = GRADCHECK_NONDET_TOL
self.assertTrue(gradcheck(convolution, inputs, nondet_tol=gradcheck_nondet_tol))
# double backward doesn't support bias gradients
if bias is not None:
bias.requires_grad_(False)
self.assertTrue(gradgradcheck(convolution, inputs, nondet_tol=gradcheck_nondet_tol))
def test_Dropout(self, device):
input = torch.empty(1000)
self._test_dropout(nn.Dropout, device, input)
self._test_dropout_discontiguous(nn.Dropout, device)
self._test_dropout_discontiguous(nn.Dropout, device, memory_format=torch.channels_last)
self._test_dropout_stride_mean_preserve(nn.Dropout, device)
if self.device_type == 'cuda' or self.device_type == 'cpu':
input = input.bfloat16()
self._test_dropout(nn.Dropout, device, input)
def _test_dropoutNd_no_batch(self, dropout, input):
input_clone = input.clone()
with freeze_rng_state():
res_no_batch = dropout(input)
with freeze_rng_state():
res_batched = dropout(input_clone.unsqueeze(0)).squeeze(0)
self.assertEqual(res_no_batch, res_batched)
def _test_dropoutNd_channel_zero(self, dropout, input):
# Verify the number of zeros in a channel is 0 or the number of elements in the channel
# for a fully positive input tensor
shape = input.shape
B = shape[0]
C = shape[1]
channel_numel = torch.tensor(shape[2:]).prod()
result = dropout(input)
for b, c in product(range(B), range(C)):
self.assertTrue(result[b, c].count_nonzero() in (0, channel_numel))
@expectedFailureXLA # seems like freeze_rng_state is not honoured by XLA
def test_Dropout2d(self, device):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
num_features = 1000
input = torch.empty(num_features, b, w, h)
self._test_dropout(nn.Dropout2d, device, input)
self._test_dropout(nn.Dropout2d, device, input, memory_format=torch.channels_last)
self._test_dropout_discontiguous(nn.Dropout2d, device)
self._test_dropout_discontiguous(nn.Dropout2d, device, memory_format=torch.channels_last)
with self.assertWarnsRegex(UserWarning, "Received a 5-D input to dropout2d"):
nn.Dropout2d(p=0.5)(torch.rand(1, 2, 2, 2, 2, device=device))
with self.assertWarnsRegex(UserWarning, "Received a 2-D input to dropout2d"):
nn.Dropout2d(p=0.5)(torch.rand(1, 2, device=device))
# no batch dims
input = torch.rand(50, 2, 2, device=device)
self._test_dropoutNd_no_batch(nn.Dropout2d(p=0.5), input)
self._test_dropoutNd_no_batch(nn.Dropout2d(p=0.5, inplace=True), input)
# check that complete channels are dropped
input = torch.ones(10, 4, 2, 2, device=device)
self._test_dropoutNd_channel_zero(nn.Dropout2d(p=0.5), input)
self._test_dropoutNd_channel_zero(nn.Dropout2d(p=0.5, inplace=True), input)
@expectedFailureXLA # seems like freeze_rng_state is not honoured by XLA
def test_Dropout3d(self, device):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
d = random.randint(1, 2)
num_features = 1000
input = torch.empty(num_features, b, d, w, h)
self._test_dropout(nn.Dropout3d, device, input)
self._test_dropout_discontiguous(nn.Dropout3d, device)
self._test_dropout_discontiguous(nn.Dropout3d, device, memory_format=torch.channels_last)
with self.assertWarnsRegex(UserWarning, "Received a 6-D input to dropout3d"):
nn.Dropout3d(p=0.5)(torch.rand(1, 2, 2, 2, 2, 2, device=device))
with self.assertWarnsRegex(UserWarning, "Received a 3-D input to dropout3d"):
nn.Dropout3d(p=0.5)(torch.rand(1, 2, 2, device=device))
# no batch dims
input = torch.rand(50, 2, 2, 2, device=device)
self._test_dropoutNd_no_batch(nn.Dropout3d(p=0.5), input)
self._test_dropoutNd_no_batch(nn.Dropout3d(p=0.5, inplace=True), input)
# check that complete channels are dropped
input = torch.ones(10, 4, 2, 2, 2, device=device)
self._test_dropoutNd_channel_zero(nn.Dropout3d(p=0.5), input)
self._test_dropoutNd_channel_zero(nn.Dropout3d(p=0.5, inplace=True), input)
def test_InstanceNorm1d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
d = random.randint(8, 10)
input = torch.rand(b, c, d)
self._test_InstanceNorm_general(nn.InstanceNorm1d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm1d, input, device)
def test_InstanceNorm2d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
w = random.randint(3, 6)
h = random.randint(6, 8)
input = torch.rand(b, c, h, w)
self._test_InstanceNorm_general(nn.InstanceNorm2d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm2d, input, device)
def test_InstanceNorm3d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
w = random.randint(2, 5)
h = random.randint(2, 5)
d = random.randint(2, 5)
input = torch.rand(b, c, h, w, d)
self._test_InstanceNorm_general(nn.InstanceNorm3d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm3d, input, device)
def test_instancenorm_raises_error_if_less_than_one_value_per_channel(self, device):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.InstanceNorm1d(10)(x).to(device)
def test_instancenorm_raises_error_for_single_spatial_element_during_training(self, device):
BATCH_SIZE = 10
NUM_CHANNELS = 3
norms = [torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d]
for i, norm in enumerate(norms):
m = norm(NUM_CHANNELS, track_running_stats=True)
m.to(device)
# Create an appropriately-sized input with a single spatial element.
input = torch.randn(BATCH_SIZE, NUM_CHANNELS, *[1 for _ in range(i + 1)],
device=device)
with self.assertRaises(ValueError):
m(input)
# Single spatial element should be fine in eval.
m.eval()
m(input)
def test_LayerNorm_general(self, device):
self._test_LayerNorm_general(device)
if self.device_type == 'cuda' or self.device_type == 'cpu':
self._test_LayerNorm_general(device, dtype=torch.bfloat16)
if self.device_type == 'cuda':
self._test_LayerNorm_cuda_half(device)
@onlyNativeDeviceTypes
def test_LayerNorm_numeric(self, device):
def layer_norm_ref(X, gamma, beta, normalized_shape, eps):
feature_size = np.prod(normalized_shape)
X_view = X.view(-1, feature_size)
mean = X_view.mean(dim=-1, keepdim=True)
var = X_view.var(dim=-1, unbiased=False, keepdim=True)
Y = (X_view - mean) / torch.sqrt(var + eps)
Y = Y * gamma.view(-1) + beta.view(-1)
return Y.view(*X.size())
normalized_shape = [256, 256, 144]
layer_norm = nn.LayerNorm(normalized_shape).float().to(device)
X = torch.rand(2, *normalized_shape, dtype=torch.float32,
device=device)
Y = layer_norm(X)
Y_ref = layer_norm_ref(X, layer_norm.weight.data, layer_norm.bias.data,
normalized_shape, layer_norm.eps)
self.assertEqual(Y, Y_ref, rtol=0, atol=1e-5)
if self.device_type == 'cuda':
layer_norm.cpu()
Y_cpu = layer_norm(X.cpu())
self.assertEqual(Y_cpu, Y, rtol=0, atol=1e-5)
@onlyNativeDeviceTypes
def test_GroupNorm_general(self, device):
self._test_GroupNorm_general(device)
if self.device_type == 'cuda':
self._test_GroupNorm_cuda_half()
def test_GroupNorm_raises_error_if_one_value_per_group(self, device):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.GroupNorm(10, 10)(x).to(device)
def test_GroupNorm_empty(self, device):
mod = torch.nn.GroupNorm(2, 4).to(device)
inp = torch.randn(0, 4, 2, 2, device=device)
self._test_module_empty_input(mod, inp)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp)
@onlyCPU
@dtypes(torch.float, torch.double)
def test_groupnorm_nhwc(self, device, dtype):
def helper(self, size, groups):
channels = size[1]
input = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device=device)
grad = grad.contiguous(memory_format=torch.channels_last)
gn = nn.GroupNorm(groups, channels).to(device).to(dtype)
gn.weight.data.uniform_()
gn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_gn = nn.GroupNorm(groups, channels).to(device).to(dtype)
ref_gn.load_state_dict(gn.state_dict())
out = gn(input)
out.backward(grad)
ref_out = ref_gn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(gn.weight.grad, ref_gn.weight.grad)
self.assertEqual(gn.bias.grad, ref_gn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
helper(self, (4, 8, 10, 10), 4)
helper(self, (2, 30, 9, 9), 3)
@onlyNativeDeviceTypes
def test_GroupNorm_numeric(self, device):
def group_norm_ref(X, gamma, beta, groups, channels, eps):
batch_size = X.size()[0]
X_view = X.view(batch_size, groups, -1)
mean = X_view.mean(dim=-1, keepdim=True)
var = X_view.var(dim=-1, unbiased=False, keepdim=True)
Y = ((X_view - mean) / torch.sqrt(var + eps)).view(
batch_size, channels, -1)
Y = Y * gamma.view(channels, 1) + beta.view(channels, 1)
return Y.view(*X.size())
batch_size = 1
groups = 2
channels = 8
group_norm = nn.GroupNorm(groups, channels).float().to(device)
X = torch.rand(batch_size, channels, 256, 256, 72,
dtype=torch.float32, device=device)
Y = group_norm(X)
Y_ref = group_norm_ref(
X, group_norm.weight.data, group_norm.bias.data, groups,
channels, group_norm.eps)
self.assertEqual(Y, Y_ref, rtol=0, atol=1e-5)
if self.device_type == 'cuda':
group_norm.cpu()
Y_cpu = group_norm(X.cpu())
self.assertEqual(Y_cpu, Y, rtol=0, atol=1e-5)
@onlyNativeDeviceTypes
@dtypes(torch.float64, torch.complex128)
def test_pad(self, device, dtype):
# Assert assertion errors are raised for invalid circular padding values
inputs = torch.randn(1, 1, 4, device=device, dtype=dtype, requires_grad=True)
# Should raise error when trying to wrap around more than once
self.assertRaises(AssertionError, lambda: F.pad(inputs, (5, 4), mode='circular'))
self.assertRaises(AssertionError, lambda: F.pad(inputs, (3, 6), mode='circular'))
# Should raise error when negative padding results in negative output shape
self.assertRaises(AssertionError, lambda: F.pad(inputs, (-3, -2), mode='circular'))
# assert that relfection padding errors when pad >= input size
expected_err_msg = r"Padding size should be less than the corresponding input dimension"
inputs = torch.randn(1, 1, 2, 3, device=device, dtype=dtype)
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(inputs, (1, 1, 3, 0), mode='reflect'))
inputs = torch.randn(1, 1, 2, device=device, dtype=dtype)
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(inputs, (2, 1), mode='reflect'))
inputs = torch.rand(1, 3, 4, 4, device=device, dtype=dtype)
# assert that pad doesn't return a view into the input tensor
for mode in 'constant', 'reflect', 'replicate', 'circular':
out = F.pad(inputs, (0, 0, 0, 0), mode=mode)
out.fill_(4)
self.assertTrue(torch.all(torch.abs(inputs) < 2))
out = F.pad(inputs, (0, 0, -1, -1), mode=mode)
out.fill_(4)
self.assertTrue(torch.all(torch.abs(inputs) < 2))
@onlyNativeDeviceTypes
@dtypes(torch.float64, torch.complex128)
def test_ReplicationPad_empty(self, device, dtype):
for mod, inp in [
(torch.nn.ReplicationPad1d(3), torch.randn(0, 3, 10, device=device, dtype=dtype)),
(torch.nn.ReplicationPad2d(3), torch.randn(0, 3, 10, 10, device=device, dtype=dtype)),
(torch.nn.ReplicationPad3d(3), torch.randn(0, 3, 10, 10, 10, device=device, dtype=dtype))]:
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, 'Expected 2D or 3D'):
mod = torch.nn.ReplicationPad1d(2)
inp = torch.randn(3, 0, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, 'Expected 3D or 4D'):
mod = torch.nn.ReplicationPad2d((2, 2, 2, 2))
inp = torch.randn(43, 0, 10, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, 'Expected 4D or 5D'):
mod = torch.nn.ReplicationPad3d((2, 2, 2, 2, 2, 2))
inp = torch.randn(3, 0, 10, 10, 10, device=device, dtype=dtype)
mod(inp)
def test_ReplicationPad1d_large(self, device):
shapes = ([2, 65736, 4], [65736, 2, 4])
pl, pr = 3, 4
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad1d((pl, pr))
# forward
out = model(x)
self.assertEqual(out[:, :, pl : -pr], x)
left_padding = out[:, :, : pl]
self.assertEqual(left_padding, x[:, :, :1].expand_as(left_padding))
right_padding = out[:, :, -pr :]
self.assertEqual(right_padding, x[:, :, -1:].expand_as(right_padding))
# backward
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1 : -1], g[:, :, pl + 1 : -pr - 1])
self.assertEqual(x.grad[:, :, 0], g[:, :, : pl + 1].sum(-1))
self.assertEqual(x.grad[:, :, -1], g[:, :, -pr - 1:].sum(-1))
def test_ReplicationPad2d_large(self, device):
shapes = ([2, 65736, 4, 4], [65736, 2, 4, 4])
pl, pr, pt, pb = 3, 4, 5, 6
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad2d((pl, pr, pt, pb))
# forward center, edge
out = model(x)
self.assertEqual(out[:, :, pt : -pb, pl : -pr], x)
left_padding = out[:, :, pt : -pb, : pl]
self.assertEqual(left_padding, x[:, :, :, :1].expand_as(left_padding))
right_padding = out[:, :, pt : -pb, -pr :]
self.assertEqual(right_padding, x[:, :, :, -1:].expand_as(right_padding))
top_padding = out[:, :, : pt, pl : -pr]
self.assertEqual(top_padding, x[:, :, :1, :].expand_as(top_padding))
bottom_padding = out[:, :, -pb : , pl : -pr]
self.assertEqual(bottom_padding, x[:, :, -1:, :].expand_as(bottom_padding))
# forward corner
tl_padding = out[:, :, : pt + 1, : pl + 1]
self.assertEqual(tl_padding, x[:, :, :1, :1].expand_as(tl_padding))
tr_padding = out[:, :, : pt + 1, -pr - 1:]
self.assertEqual(tr_padding, x[:, :, :1, -1:].expand_as(tr_padding))
bl_padding = out[:, :, -pb - 1:, : pl + 1]
self.assertEqual(bl_padding, x[:, :, -1:, :1].expand_as(bl_padding))
br_padding = out[:, :, -pb - 1:, -pr - 1:]
self.assertEqual(br_padding, x[:, :, -1:, -1:].expand_as(br_padding))
# backward center, edge
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1:-1, 1:-1], g[:, :, pt + 1 : -pb - 1, pl + 1 : -pr - 1])
self.assertEqual(x.grad[:, :, 1:-1, 0], g[:, :, pt + 1 : -pb - 1, : pl + 1].sum(-1))
self.assertEqual(x.grad[:, :, 1:-1, -1], g[:, :, pt + 1 : -pb - 1, -pr - 1 :].sum(-1))
self.assertEqual(x.grad[:, :, 0, 1:-1], g[:, :, : pt + 1, pl + 1 : -pr - 1].sum(-2))
self.assertEqual(x.grad[:, :, -1, 1:-1], g[:, :, -pb - 1 :, pl + 1 : -pr - 1].sum(-2))
# backward corner
self.assertEqual(x.grad[:, :, 0, 0], g[:, :, : pt + 1, : pl + 1].sum((-2, -1)))
self.assertEqual(x.grad[:, :, 0, -1], g[:, :, : pt + 1, -pr - 1 :].sum((-2, -1)))
self.assertEqual(x.grad[:, :, -1, 0], g[:, :, -pb - 1 :, : pl + 1].sum((-2, -1)))
self.assertEqual(x.grad[:, :, -1, -1], g[:, :, -pb - 1 :, -pr - 1 :].sum((-2, -1)))
@largeTensorTest("6GB")
def test_ReplicationPad3d_large(self, device):
shapes = ([1, 65736, 2, 2, 2], [65736, 1, 2, 2, 2])
pl, pr, pt, pbt, pf, pbk = 3, 4, 5, 6, 7, 8
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad3d((pl, pr, pt, pbt, pf, pbk))
# forward center
out = model(x)
self.assertEqual(out[:, :, pf : -pbk, pt : -pbt, pl : -pr], x)
# backward center
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1:-1, 1:-1, 1:-1], g[:, :, pf + 1 : -pbk - 1, pt + 1 : -pbt - 1, pl + 1 : -pr - 1])
@onlyNativeDeviceTypes
def test_Bilinear_empty(self, device):
mod = torch.nn.Bilinear(20, 30, 40).to(device)
inp1 = torch.randn(0, 10, 20, requires_grad=True, device=device)
inp2 = torch.randn(0, 10, 30, requires_grad=True, device=device)
output = mod(inp1, inp2)
output.sum().backward()
self.assertEqual(inp1, torch.zeros_like(inp1))
self.assertEqual(inp2, torch.zeros_like(inp2))
self.assertEqual(inp1.grad, torch.zeros_like(inp1))
self.assertEqual(inp2.grad, torch.zeros_like(inp2))
@expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]
@onlyNativeDeviceTypes
def test_TransformerEncoderLayer_empty(self, device):
for batch_first, input_shape in [(True, (0, 10, 512)),
(False, (10, 0, 512))]:
input = torch.rand(*input_shape, device=device)
encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
self._test_module_empty_input(encoder_layer, input, check_size=False)
@expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]
@onlyNativeDeviceTypes
def test_TransformerEncoder_empty(self, device):
for batch_first, input_shape in [(True, (0, 10, 512)),
(False, (10, 0, 512))]:
input = torch.rand(*input_shape, device=device)
encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6).to(device)
self._test_module_empty_input(transformer_encoder, input, check_size=False)
@expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]
@onlyNativeDeviceTypes
def test_TransformerDecoderLayer_empty(self, device):
for batch_first, memory_shape, tgt_shape in [(True, (0, 10, 512), (0, 20, 512)),
(False, (10, 0, 512), (20, 0, 512))]:
memory = torch.rand(*memory_shape, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
self._test_module_empty_inputs(decoder_layer, [tgt, memory])
@expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]
@onlyNativeDeviceTypes
def test_TransformerDecoder_empty(self, device):
for batch_first, memory_shape, tgt_shape in [(True, (0, 10, 512), (0, 20, 512)),
(False, (10, 0, 512), (20, 0, 512))]:
memory = torch.rand(*memory_shape, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6).to(device)
self._test_module_empty_inputs(transformer_decoder, [tgt, memory])
@expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]
@onlyNativeDeviceTypes
def test_Transformer_empty(self, device):
for batch_first, src_shape, tgt_shape in [(True, (10, 0, 512), (20, 0, 512))]:
transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12).to(device)
src = torch.rand(*src_shape, requires_grad=True, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
self._test_module_empty_inputs(transformer_model, [src, tgt])
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.complex64)
def test_ReflectionPad_empty(self, device, dtype):
for mod, inp in [
(torch.nn.ReflectionPad1d(2), torch.randn(0, 3, 10, device=device, dtype=dtype)),
(torch.nn.ReflectionPad2d(2), torch.randn(0, 3, 10, 10, device=device, dtype=dtype)),
(torch.nn.ReflectionPad3d(3), torch.randn(0, 3, 10, 10, 10, device=device, dtype=dtype))]:
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, '2D or 3D'):
mod = torch.nn.ReflectionPad1d(2)
inp = torch.randn(3, 0, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, '3D or 4D'):
mod = torch.nn.ReflectionPad2d(2)
inp = torch.randn(3, 0, 10, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, '4D or 5D'):
mod = torch.nn.ReflectionPad3d(3)
inp = torch.randn(3, 0, 10, 10, 10, device=device, dtype=dtype)
mod(inp)
@onlyCUDA # Test if CPU and GPU results match
def test_ReflectionPad2d_large(self, device):
shapes = ([2, 65736, 6, 6], [65736, 2, 6, 6])
pad = (1, 2, 3, 4)
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
ref_x = x.detach().cpu().requires_grad_()
out = F.pad(x, pad, mode='reflect')
ref_out = F.pad(ref_x, pad, mode='reflect')
self.assertEqual(out, ref_out)
g = torch.randn_like(out)
ref_g = g.cpu()
out.backward(g)
ref_out.backward(ref_g)
self.assertEqual(x.grad, ref_x.grad)
@onlyNativeDeviceTypes
def test_LocalResponseNorm_empty(self, device):
mod = torch.nn.LocalResponseNorm(2).to(device)
inp = torch.ones(0, 5, 24, 24, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
@onlyCUDA # Test if CPU and GPU results match
def test_ReflectionPad3d_large(self, device):
shapes = ([2, 1000, 7, 7, 7], [1000, 2, 7, 7, 7])
pad = (1, 2, 3, 4, 5, 6)
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
ref_x = x.detach().cpu().requires_grad_()
out = F.pad(x, pad, mode='reflect')
ref_out = F.pad(ref_x, pad, mode='reflect')
self.assertEqual(out, ref_out)
g = torch.randn_like(out)
ref_g = g.cpu()
out.backward(g)
ref_out.backward(ref_g)
self.assertEqual(x.grad, ref_x.grad)
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_MarginLoss_empty(self, device, dtype):
for mod, x, y in [
(torch.nn.MultiMarginLoss().to(device),
torch.randn(0, 10, requires_grad=True, device=device, dtype=dtype),
torch.ones(0, device=device).type(torch.long)),
(torch.nn.MultiLabelMarginLoss().to(device),
torch.randn(0, 10, requires_grad=True, device=device, dtype=dtype),
torch.ones(0, 10, device=device).type(torch.long))]:
out = mod(x, y)
out.sum().backward()
self.assertEqual(x, torch.zeros_like(x))
self.assertEqual(x.grad, torch.zeros_like(x))
with self.assertRaisesRegex(RuntimeError, 'Expected'):
x = torch.randn(0, requires_grad=True, device=device, dtype=dtype)
y = torch.ones(10, device=device).type(torch.long)
mod(x, y)
with self.assertRaisesRegex(RuntimeError, 'Expected'):
x = torch.randn(10, 0, requires_grad=True, device=device, dtype=dtype)
y = torch.ones(10, 0, device=device).type(torch.long)
mod(x, y)
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_adaptive_pooling_zero_batch(self, dtype, device):
inp = torch.ones(0, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool1d(5).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
inp = torch.ones(0, 10, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool2d((5, 5)).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
inp = torch.ones(0, 10, 10, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool3d((5, 5, 5)).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
@onlyNativeDeviceTypes
def test_FractionalMaxPool2d_zero_batch(self, device):
mod = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
inp = torch.ones(0, 16, 50, 32, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected input"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
@onlyNativeDeviceTypes
def test_FractionalMaxPool3d_zero_batch(self, device):
mod = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5)).to(device)
inp = torch.ones(0, 16, 50, 32, 32, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected input"):
inp = torch.randn(1, 0, 50, 32, 32, device=device)
mod(inp)
@onlyNativeDeviceTypes
def test_Unfold_empty(self, device):
inp = torch.randn(0, 3, 3, 4, device=device)
unfold = torch.nn.Unfold(kernel_size=(2, 3)).to(device)
self._test_module_empty_input(unfold, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, 'Expected 3D or 4D'):
inp = torch.randn(3, 0, 3, 4, device=device)
unfold = torch.nn.Unfold(kernel_size=(2, 3)).to(device)
unfold(inp)
@onlyNativeDeviceTypes
def test_MaxPool_zero_batch_dim(self, device):
inp = torch.randn(0, 16, 50, device=device)
mod = torch.nn.MaxPool1d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
# 1D is supposed to be okay with 0 numel() inputs so dont test
# error raising for that case.
inp = torch.randn(0, 16, 50, 32, device=device)
mod = torch.nn.MaxPool2d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
inp = torch.ones(0, 16, 50, 44, 31, device=device)
mod = torch.nn.MaxPool3d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.ones(1, 0, 50, 44, 31, device=device)
mod(inp)
@onlyNativeDeviceTypes
def test_MaxUnpool_zero_batch_dim(self, device):
pool = torch.nn.MaxPool1d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool1d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
output.requires_grad_(True)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
pool = torch.nn.MaxPool2d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool2d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
pool = torch.nn.MaxPool3d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool3d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
output.requires_grad_(True)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
@onlyNativeDeviceTypes
def test_AdaptiveMaxPool_zero_batch_dim(self, device):
inp = torch.randn(0, 16, 50, device=device)
mod = torch.nn.AdaptiveMaxPool1d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, device=device)
mod(inp)
inp = torch.randn(0, 16, 50, 32, device=device)
mod = torch.nn.AdaptiveMaxPool2d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
inp = torch.ones(0, 16, 50, 44, 31, device=device)
mod = torch.nn.AdaptiveMaxPool3d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.ones(1, 0, 50, 44, 31, device=device)
mod(inp)
@onlyCUDA
@dtypes(torch.float, torch.double)
@tf32_on_and_off(0.005)
def test_rnn_fused(self, device, dtype):
def copy_rnn(rnn1, rnn2):
for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):
for x, y in zip(x_layer, y_layer):
x.data.copy_(y.data)
def check_rnn_grads(rnn1, rnn2):
for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):
for x, y in zip(x_layer, y_layer):
self.assertEqual(x.grad, y.grad, atol=5e-5, rtol=0)
input_size = 10
hidden_size = 6
num_layers = 2
seq_length = 7
batch = 6
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, hidden_size, dtype=dtype)
hx_val = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
with torch.backends.cudnn.flags(enabled=False, allow_tf32=None):
for module in (nn.GRU, nn.LSTM):
for bias in (True, False):
rnn = module(input_size, hidden_size, num_layers, bias=bias).to(dtype)
rnn_device = module(input_size, hidden_size, num_layers, bias=bias).to(device, dtype)
copy_rnn(rnn, rnn_device)
is_lstm = isinstance(rnn, nn.LSTM)
if is_lstm:
hx = (hx_val.clone().requires_grad_(True),
hx_val.clone().add(1).requires_grad_(True))
hx_device = (hx_val.clone().to(device).requires_grad_(True),
hx_val.clone().to(device).add(1).requires_grad_(True))
else:
hx = hx_val.clone().requires_grad_(True)
hx_device = hx_val.clone().to(device).requires_grad_(True)
inp = input_val.clone().requires_grad_(True)
inp_cu = input_val.clone().to(device).requires_grad_(True)
output1, hy1 = rnn(inp, hx)
output2, hy2 = rnn_device(inp_cu, hx_device)
if is_lstm:
torch.autograd.backward(
[output1, hy1[0], hy1[1]], [grad_output, grad_hy, grad_hy + 1]
)
torch.autograd.backward(
[output2, hy2[0], hy2[1]],
[grad_output.to(device), grad_hy.to(device), (grad_hy + 1).to(device)]
)
else:
torch.autograd.backward([output1, hy1], [grad_output, grad_hy])
torch.autograd.backward([output2, hy2], [grad_output.to(device), grad_hy.to(device)])
self.assertEqual(output1, output2)
self.assertEqual(hy1, hy2)
check_rnn_grads(rnn, rnn_device)
self.assertEqual(inp.grad, inp_cu.grad)
if is_lstm:
self.assertEqual(hx[0].grad, hx_device[0].grad)
self.assertEqual(hx[1].grad, hx_device[1].grad)
else:
self.assertEqual(hx.grad, hx_device.grad)
def test_BatchNorm_empty(self, device):
mod = torch.nn.BatchNorm2d(3).to(device)
inp = torch.randn(0, 3, 2, 2, device=device)
self._test_module_empty_input(mod, inp)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp)
self.assertEqual(mod.running_mean, torch.tensor([0., 0, 0], device=device))
self.assertEqual(mod.running_var, torch.tensor([1., 1, 1], device=device))
self.assertEqual(mod.weight.grad, torch.tensor([0., 0, 0], device=device))
self.assertEqual(mod.bias.grad, torch.tensor([0., 0, 0], device=device))
def test_conv_empty_channel(self, device):
in_channels = 0
mod = torch.nn.Conv1d(in_channels, 8, 2, stride=2).to(device)
inp = torch.randn(2, 0, 15, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 0, device=device)
mod(inp)
mod = torch.nn.Conv2d(in_channels, 33, 3, stride=2).to(device)
inp = torch.randn(2, 0, 50, 100, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 40, 0, device=device)
mod(inp)
mod = torch.nn.Conv3d(in_channels, 33, 3, stride=2).to(device)
inp = torch.randn(2, 0, 50, 20, 40, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 50, 0, 40, device=device)
mod(inp)
def test_group_conv_empty(self, device):
mod = torch.nn.Conv2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
def test_group_convTranspose_empty(self, device):
mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
def test_convTranspose_empty(self, device):
mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
@onlyNativeDeviceTypes
def test_AvgPool2d_empty(self, device):
avgpool = torch.nn.AvgPool2d(3, stride=2).to(device)
inp = torch.randn(0, 16, 20, 32, device=device)
self._test_module_empty_input(avgpool, inp, check_size=False)
clast_inp = torch.randn(0, 16, 20, 32, device=device).contiguous(memory_format=torch.channels_last)
self._test_module_empty_input(avgpool, clast_inp, check_size=False)
# test with empty non-batch input
with self.assertRaisesRegex(RuntimeError, '3D or 4D'):
inp = torch.randn(16, 0, 20, 32, device=device)
avgpool(inp)
@onlyCUDA
@largeTensorTest('16GB')
def test_prelu_backward_32bit_indexing(self, device):
m = torch.nn.PReLU().cuda().half()
input_ = torch.ones((1024, 1024, 1024, 2), dtype=torch.half, device=device)
output = m(input_)
output.backward(input_)
def test_linear_empty(self, device):
mod = torch.nn.Linear(7, 7).to(device)
inp = torch.randn(0, 7, device=device)
self._test_module_empty_input(mod, inp)
def test_one_hot(self, device):
if self.device_type != 'cuda': # cuda throws device assert for invalid data
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, -1, 0], device=device), -1)
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), 3)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device))
expected = torch.tensor([[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), -1)
expected = torch.tensor([[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), 6)
expected = torch.tensor([[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([[3, 4], [1, 0]], device=device))
expected = torch.tensor([[[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]],
[[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor(4, device=device))
expected = torch.tensor([0, 0, 0, 0, 1], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.empty([4, 0], dtype=torch.long, device=device), 100)
expected = torch.empty([4, 0, 100], dtype=torch.long)
self.assertEqual(t, expected)
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.empty([4, 0], dtype=torch.long, device=device))
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), -2)
def test_nn_scalars(self, device):
# One off tests to ensure scalars from nn.yaml are properly applied
def verify_scalars(input, output):
if input.dim() == 0:
self.assertEqual((), output.shape)
else:
self.assertNotEqual((), output.shape)
output.sum().backward()
self.assertEqual(input.shape, input.grad.shape)
for input_shape in [(5, 6), ()]:
for module in [torch.nn.ELU, torch.nn.Hardtanh, torch.nn.LeakyReLU, torch.nn.LogSigmoid,
torch.nn.RReLU, torch.nn.Softshrink, torch.nn.Softplus, torch.nn.Sigmoid,
torch.nn.Tanh]:
input = torch.randn(input_shape, device=device, requires_grad=True)
m = module()
output = m(input)
verify_scalars(input, output)
def test_nn_scalars_reductions(self, device):
# One off tests to ensure scalars from nn.yaml are properly applied
def verify_reduction_scalars(input, reduction, output):
if reduction != 'none' or input.dim() == 0:
self.assertEqual((), output.shape)
else:
self.assertNotEqual((), output.shape)
output.sum().backward()
self.assertEqual(input.shape, input.grad.shape)
for input_shape in [(5, 6), ()]:
for reduction in ['none', 'mean', 'sum']:
for module in [torch.nn.BCELoss, torch.nn.L1Loss, torch.nn.MSELoss,
torch.nn.SmoothL1Loss, torch.nn.SoftMarginLoss]:
input = torch.randn(input_shape, device=device, requires_grad=True)
target = torch.empty(input_shape, device=device).random_(2)
sigmoid = nn.Sigmoid()
input = torch.randn(input_shape, device=device, requires_grad=True)
m = module(reduction=reduction)
output = m(sigmoid(input), target)
verify_reduction_scalars(input, reduction, output)
# verify that bogus reduction strings are errors
@onlyNativeDeviceTypes
def test_invalid_reduction_strings(self, device):
input = torch.randn(3, 5, requires_grad=True, device=device)
cinput = torch.randn(3, 5, requires_grad=True, device=device, dtype=torch.cfloat)
target = torch.tensor([1, 0, 4], device=device)
var = torch.ones(size=input.size(), requires_grad=True, device=device)
for reduction in ['none', 'invalid']:
def v(fn):
if reduction == 'invalid':
self.assertRaises(ValueError, lambda: fn())
else:
fn()
v(lambda: F.nll_loss(input, target, reduction=reduction))
v(lambda: F.cross_entropy(input, target, reduction=reduction))
v(lambda: F.multi_margin_loss(input, target, reduction=reduction))
v(lambda: F.kl_div(input, input, reduction=reduction))
v(lambda: F.huber_loss(input, input, reduction=reduction))
v(lambda: F.smooth_l1_loss(input, input, reduction=reduction))
v(lambda: F.l1_loss(input, input, reduction=reduction))
v(lambda: F.l1_loss(cinput, cinput, reduction=reduction))
v(lambda: F.mse_loss(input, input, reduction=reduction))
v(lambda: F.hinge_embedding_loss(input, input, reduction=reduction))
v(lambda: F.poisson_nll_loss(input, input, reduction=reduction))
v(lambda: F.gaussian_nll_loss(input, input, var, reduction=reduction))
v(lambda: F.binary_cross_entropy(torch.sigmoid(input), input, reduction=reduction))
v(lambda: F.binary_cross_entropy_with_logits(input, input, reduction=reduction))
zeros = torch.zeros_like(input).to(torch.int64)
v(lambda: F.multilabel_soft_margin_loss(input, zeros, reduction=reduction))
v(lambda: F.multilabel_margin_loss(input, zeros, reduction=reduction))
v(lambda: F.triplet_margin_loss(input, input, input, reduction=reduction))
v(lambda: F.triplet_margin_with_distance_loss(input, input, input, reduction=reduction))
v(lambda: F.margin_ranking_loss(input, input, input.sign(), reduction=reduction))
v(lambda: F.cosine_embedding_loss(input, input, input[:, 0].sign(), reduction=reduction))
log_probs = torch.randn(50, 16, 20, requires_grad=True, device=device).log_softmax(2)
targets = torch.randint(1, 20, (16, 30), dtype=torch.long, device=device)
input_lengths = torch.full((16,), 50, dtype=torch.long, device=device)
target_lengths = torch.randint(10, 30, (16,), dtype=torch.long, device=device)
v(lambda: F.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction=reduction))
# FIXME: should we allow derivatives on these?
v(lambda: F.soft_margin_loss(input, input.sign().detach(), reduction=reduction))
@onlyNativeDeviceTypes
def test_smooth_l1_loss_vs_huber_loss(self, device):
def _make_test_tensor(shape, contiguous=True):
if contiguous:
test_tensor = torch.randn(shape, device=device)
else:
# Select every other element in the innermost dimension to
# make it non-contiguous.
doubled_shape = list(shape)
doubled_shape[-1] *= 2
test_tensor = torch.randn(doubled_shape, device=device)
test_tensor = test_tensor[..., ::2]
return test_tensor
def _test_smooth_l1_loss_vs_huber_loss_helper(input, target, beta, require_equal):
for reduction in ['mean', 'sum', 'none']:
smooth_l1 = torch.nn.SmoothL1Loss(beta=beta, reduction=reduction)
# beta hyper-parameter is called delta for Huber
huber = torch.nn.HuberLoss(delta=beta, reduction=reduction)
smooth_l1_loss = smooth_l1(input, target)
huber_loss = huber(input, target)
if require_equal:
self.assertEqual(smooth_l1_loss, huber_loss)
else:
# Huber loss should be larger than smooth L1 loss by a factor of beta.
self.assertEqual(smooth_l1_loss * beta, huber_loss)
def _test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta, require_equal):
# Test the non-vectorized case.
shape = (2, 2)
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape),
target=_make_test_tensor(shape),
beta=beta,
require_equal=require_equal)
# Test the vectorized case (innermost dim > 32).
shape = (64, 64)
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape),
target=_make_test_tensor(shape),
beta=beta,
require_equal=require_equal)
# Test the non-contiguous case.
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape, contiguous=False),
target=_make_test_tensor(shape, contiguous=False),
beta=beta,
require_equal=require_equal)
def test_equal_when_beta_is_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=1.0, require_equal=True)
def test_unequal_when_beta_is_less_than_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=0.5, require_equal=False)
def test_unequal_when_beta_is_greater_than_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=1.5, require_equal=False)
test_equal_when_beta_is_one()
test_unequal_when_beta_is_less_than_one()
test_unequal_when_beta_is_greater_than_one()
# We don't want to make propagating NaN a hard requirement on ops, but for
# these easy ones, we should make them do so.
def test_nonlinearity_propagate_nan(self, device):
def test(nonlinearity, *args, **kwargs):
x = torch.tensor([nan], device=device)
fn = getattr(F, nonlinearity)
try:
self.assertTrue(math.isnan(fn(x, *args, **kwargs).item()))
except Exception as e:
if 'not implemented' not in str(e):
raise
test('relu')
test('relu', inplace=True)
test('relu6')
test('elu')
test('selu')
test('celu')
test('rrelu')
test('rrelu', inplace=True)
test('hardtanh')
test('tanh')
test('sigmoid')
test('logsigmoid')
test('hardshrink')
test('tanhshrink')
test('softsign')
test('softmin', 0)
test('softmax', 0)
test('log_softmax', 0)
test('leaky_relu', 0.2)
test('threshold', 3, 2)
test('threshold', 3, 2, inplace=True)
def test_pooling_shape(self, device):
''' Test the output shape calculation for pooling functions '''
# Checks output shape against expected for 1D, 2D and 3D
def check(expected_out_shape, sizes, *args, **kwargs):
for kernel in ['max', 'avg']:
for i in [1, 2, 3]:
if hasattr(torch.nn.functional, f'{kernel}_pool{i}d'):
op = getattr(torch.nn.functional, f'{kernel}_pool{i}d')
t = torch.randn(sizes[:i + 2], device=device)
self.assertEqual(op(t, *args, **kwargs).shape, expected_out_shape[:i + 2])
check((1, 1, 3, 3, 4), (1, 1, 5, 6, 7), kernel_size=1, stride=2, padding=0, ceil_mode=True)
check((1, 1, 2, 3, 3), (1, 1, 3, 4, 5), kernel_size=2, stride=2, padding=1, ceil_mode=False)
check((1, 1, 2, 3, 3), (1, 1, 3, 4, 5), kernel_size=2, stride=2, padding=1, ceil_mode=True)
# Test case from issue https://github.com/pytorch/pytorch/issues/45357
x = torch.randn(1, 1, 6, 7, device=device)
y = torch.nn.functional.max_pool2d(x, 1, stride=(2, 2), padding=0, ceil_mode=True)
self.assertEqual(y.size(), (1, 1, 3, 4))
@onlyNativeDeviceTypes # TODO: fix on XLA
def test_adaptive_avg_pool2d_output_size_one(self, device):
def helper(size, memory_format):
x = torch.randint(1, 10, size, dtype=torch.float, device=device, requires_grad=True)
if memory_format == 'non_contiguous':
x = x[::2, ::2, ::2, ::2]
else:
x = x.to(memory_format=memory_format)
net = torch.nn.AdaptiveAvgPool2d((1, 1))
out = net(x)
ref_out = x.contiguous().mean((-1, -2)).view((x.size(0), x.size(1), 1, 1))
out.sum().backward() # make sure it doesn't crash
self.assertEqual(out, ref_out)
if memory_format == torch.channels_last:
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, c, c])
else:
self.assertTrue(out.is_contiguous())
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, 1, 1])
for mf in (torch.contiguous_format, torch.channels_last, 'non_contiguous'):
helper((2, 3, 6, 6), mf)
@onlyNativeDeviceTypes
def test_adaptive_avg_pool3d_output_size_one(self, device):
x = torch.randn((2, 3, 6, 6, 6) , dtype=torch.float, device=device, requires_grad=True)
net = torch.nn.AdaptiveAvgPool3d(1)
out = net(x)
ref_out = x.contiguous().mean((-1, -2, -3)).view(out.shape)
out.sum().backward() # make sure it doesn't crash
self.assertEqual(out, ref_out)
self.assertTrue(out.is_contiguous())
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, 1, 1, 1])
@expectedFailureMeta # Runtime Error not raised for meta
@onlyNativeDeviceTypes
@dtypes(torch.uint8, torch.int8, torch.short, torch.int, torch.long)
def test_adaptive_pooling_no_suppot_input(self, device, dtype):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * numel
module = module_cls(output_size)
input = torch.randn((4,) * (numel + 1), device=device).to(dtype)
with self.assertRaisesRegex(RuntimeError, "not implemented"):
output = module(input)
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
def test_avg_pool2d_nhwc(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride=None,
count_include_pad=True, divisor_override=None, padding=0):
if stride is None:
stride = kernel_size
input = torch.randn(n, c, h, w, dtype=dtype, device=device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(n, c, (h - kernel_size) // stride + 1, (w - kernel_size) // stride + 1,
dtype=dtype, device=device)
pool = torch.nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=count_include_pad,
divisor_override=divisor_override).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=count_include_pad,
divisor_override=divisor_override).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 3)
helper(4, 8, 8, 8, 3, count_include_pad=False, padding=1)
helper(4, 8, 8, 8, 3, count_include_pad=False, padding=2, stride=2)
helper(4, 8, 8, 8, 3, divisor_override=42)
helper(4, 8, 8, 8, 7)
# ROCm 16GB MI25 hits OOM error. Clear caching allocator prior to running large subtest.
if TEST_WITH_ROCM and 'cuda' in device:
torch.cuda.empty_cache()
helper(200, 512, 28, 28, 2)
helper(4, 8, 7, 7, 3, stride=1)
helper(4, 8, 7, 7, 3, padding=2, stride=1)
helper(10, 512, 31, 31, 3, stride=2)
helper(1, 129, 8, 8, 3, stride=2)
@onlyCPU
@dtypes(torch.float)
def test_max_pool1d_errors(self, device, dtype):
def check(x, args, message):
model = torch.nn.MaxPool1d(*args)
with self.assertRaisesRegex(RuntimeError, r'max_pool1d\(\) ' + message):
model(torch.tensor(x, device=device, dtype=dtype))
# Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)
check(0, (1,), "Expected 2D or 3D input tensor, but got")
check([], (1,), "Expected 2D or 3D input tensor, but got")
check([[]], (1, 0), "stride must be greater than zero, but got 0")
check([[]], (1, 1, -1), "padding must be non-negative, but got -1")
check([[]], (1, 1, 2), "padding should be at most half of kernel size, but got padding=2 and kernel_size=1")
check([[]], (1, 1, 0, 0), "dilation must be greater than zero, but got 0")
check([[]], (5, 1, 0, 1), "Invalid computed output size: -4")
@onlyCPU
@dtypes(torch.float, torch.double)
def test_max_pool1d_corner_cases(self, device, dtype):
def check(x, args, expected):
model = torch.nn.MaxPool1d(*args)
if isinstance(x, list):
x = torch.tensor(x, device=device, dtype=dtype)
expected = torch.tensor(expected, device=device, dtype=dtype)
self.assertEqual(model(x), expected)
# Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)
check([[]], (1, None, 0, 1, False, False), [[]])
check([[[]]], (1, None, 0, 1, False, False), [[[]]])
check([[[]]], (2, 1, 1, 2, False, True), [[[]]])
check([[1]], (1, None, 0, 1, False, False), [[1]])
check([[1]], (2, None, 1, 2, False, False), [[float('-inf')]])
check([[1], [1]], (2, None, 1, 2, False, False), [[float('-inf')], [float('-inf')]])
check([[1, 2]], (2, 1, 1, 2, False, False), [[2, 1]])
check([[1, 2]], (2, 2, 1, 2, False, True), [[2, 2]])
empty_tensor = torch.empty((2, 0, 1), device=device, dtype=dtype)
check(empty_tensor, (1, None, 0, 1, False, False), empty_tensor)
@onlyCPU
@dtypes(torch.float, torch.double)
def test_max_pool1d(self, device, dtype):
# FIXME For now compare against max_pool1d with indices
def check(x, *args, **kwargs):
model = torch.nn.MaxPool1d(*args, **kwargs)
ref_model = torch.nn.MaxPool1d(*args, **kwargs, return_indices=True)
self.assertEqual(model(x), ref_model(x)[0])
sizes = [random.sample(range(8, 128), 3) for _ in range(3)]
kernel_sizes = random.sample(range(1, 5), 3)
strides = random.sample(range(1, 5), 3)
dilations = random.sample(range(1, 5), 3)
ceil_modes = [True, False]
for size, kernel_size, stride, dilation, ceil_mode in \
itertools.product(sizes, kernel_sizes, strides, dilations, ceil_modes):
padding = random.sample(range(0, math.floor(kernel_size / 2) + 1), 1)
check(torch.randn(size, device=device, dtype=dtype),
kernel_size, stride, padding, dilation, ceil_mode=ceil_mode)
# Non-contiguous test
tensor = torch.randn(5, 151, 33, device=device, dtype=dtype)[::2, ::3, ::2]
check(tensor, 3, 2, 1, 2, ceil_mode=True)
check(tensor.transpose(1, 2), 3, 2, 1, 2, ceil_mode=True)
@onlyCUDA
def test_max_pool2d(self, device):
def helper(n, c, h, w, ks):
x = torch.randn(n, c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
ref_x = x.detach().clone().cpu().requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size=ks)
y = pool(x)
ref_y = pool(ref_x)
y.sum().backward()
ref_y.sum().backward()
self.assertEqual(y, ref_y)
self.assertEqual(x.grad, ref_x.grad)
helper(2, 8, 4, 4, ks=2)
helper(1, 100000, 32, 32, ks=4)
helper(1, 100000, 1, 4, ks=(1, 4)) # test for max_pool1d
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
def test_max_pool2d_nhwc(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride=None):
if stride is None:
stride = kernel_size
input = torch.randn(n, c, h, w, dtype=dtype, device=device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(n, c, (h - kernel_size) // stride + 1, (w - kernel_size) // stride + 1,
dtype=dtype, device=device)
pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 7)
helper(200, 512, 28, 28, 2)
helper(4, 8, 7, 7, 3, stride=1)
helper(10, 512, 31, 31, 3, stride=2)
helper(1, 129, 8, 8, 3, stride=2)
@onlyCPU
def test_max_pool2d_bfloat16(self, device):
def helper(n, c, h, w, kernel_size, stride, memory_format):
input = torch.randn(n, c, h, w, dtype=torch.float32, device=device).bfloat16()
input = input.to(memory_format=memory_format).requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)
input2 = input.detach().clone().float().requires_grad_(True)
out, ind = pool(input)
out.sum().backward()
out2, ind2 = pool(input2)
out2.sum().backward()
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(out.dtype, torch.bfloat16)
self.assertEqual(input.grad.dtype, torch.bfloat16)
self.assertEqual(out, out2.bfloat16())
self.assertEqual(ind, ind2)
self.assertEqual(input.grad, input2.grad.bfloat16())
helper(4, 30, 8, 8, 7, 1, torch.contiguous_format)
helper(4, 65, 8, 8, 7, 1, torch.channels_last)
helper(1, 19, 20, 10, 8, 2, torch.contiguous_format)
helper(1, 19, 20, 10, 8, 2, torch.channels_last)
@onlyCUDA
def test_max_pool2d_indices(self, device):
def helper(n, c, h, w, ks):
if n is None:
x = torch.randn(c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
else:
x = torch.randn(n, c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
ref_x = x.detach().clone().cpu().requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size=ks, return_indices=True)
y, idx = pool(x)
ref_y, ref_idx = pool(ref_x)
y.sum().backward()
ref_y.sum().backward()
self.assertEqual(y, ref_y)
self.assertEqual(idx, ref_idx) # assertEqual implicitly compares shape for tensors
self.assertEqual(x.grad, ref_x.grad)
helper(2, 8, 4, 4, ks=2)
helper(None, 3, 50, 50, ks=5)
@onlyCPU
def test_avg_pool2d_bfloat16(self, device):
def helper(n, c, h, w, kernel_size, stride, memory_format):
input = torch.randn(n, c, h, w, dtype=torch.float32, device=device).bfloat16()
input = input.to(memory_format=memory_format).requires_grad_()
pool = torch.nn.AvgPool2d(kernel_size, stride).to(device)
input2 = input.detach().clone().float().requires_grad_(True)
out = pool(input)
out.sum().backward()
out2 = pool(input2)
out2.sum().backward()
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(out.dtype, torch.bfloat16)
self.assertEqual(input.grad.dtype, torch.bfloat16)
self.assertEqual(out, out2.bfloat16())
self.assertEqual(input.grad, input2.grad.bfloat16())
helper(4, 30, 8, 8, 7, 1, torch.contiguous_format)
helper(4, 65, 8, 8, 7, 1, torch.channels_last)
helper(1, 19, 20, 10, 8, 2, torch.contiguous_format)
helper(1, 19, 20, 10, 8, 2, torch.channels_last)
def test_upsamplingNearest1d(self, device):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
def helper(mode):
m = nn.Upsample(size=4, mode=mode)
in_t = torch.ones(1, 1, 2, device=device)
in_uint8_t = torch.ones(1, 1, 2, dtype=torch.uint8, device=device)
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
out_uint8_t = m(in_uint8_t)
self.assertEqual(torch.ones(1, 1, 4, device=device), out_t.data)
self.assertEqual(torch.ones(1, 1, 4, dtype=torch.uint8, device=device), out_uint8_t.data)
# Checks upsampling
input = torch.randn(1, 1, 2, requires_grad=True, device=device)
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
# Checks downsampling
input = torch.randn(1, 1, 20, requires_grad=True, device=device)
gradcheck(lambda x: F.interpolate(x, 11, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
# consistency CUDA/CPU check
if torch.device(device).type == 'cuda':
input_cuda = torch.randn(1, 1, 20, device=device)
input_cpu = input_cuda.cpu()
output_cuda = F.interpolate(input_cuda, 4, mode=mode)
output_cpu = F.interpolate(input_cpu, 4, mode=mode)
self.assertEqual(output_cuda.cpu(), output_cpu)
output_cuda = F.interpolate(input_cuda, 24, mode=mode)
output_cpu = F.interpolate(input_cpu, 24, mode=mode)
self.assertEqual(output_cuda.cpu(), output_cpu)
helper("nearest")
helper("nearest-exact")
def test_upsamplingNearest1d_correctness(self, device):
# Here we check if output matches OpenCV's INTER_NEAREST-like result
def helper(isize, osize):
in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
out_t = F.interpolate(
in_t, size=(osize, ), recompute_scale_factor=False, mode="nearest"
)
# compute expected output as OpenCV
expected_out = torch.zeros(osize, dtype=torch.float).unsqueeze(0).unsqueeze(0)
scale = 1.0 * isize / osize
for o in range(osize):
i_f32 = o * scale
i = int(i_f32)
expected_out[0, 0, o] = in_t[0, 0, i]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(20, 11)
helper(10, 15)
def test_upsamplingNearestExact1d_rescale(self, device):
# Checks https://github.com/pytorch/pytorch/issues/62237
isize = 20
in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
# for s in [1.00001, 0.99999]: # 0.9999 case is broken
# See issue: https://github.com/pytorch/pytorch/issues/62396
for s in [1.00001, ]:
out_t = F.interpolate(
in_t, scale_factor=s, recompute_scale_factor=False, mode="nearest-exact"
)
expected_out = in_t
self.assertEqual(out_t, expected_out, msg=f"scale: {s}")
# checks data duplication if output_size == 2 * input_size
# for s in [2.00001, 1.99999]: # 1.99999 case is broken
# See issue: https://github.com/pytorch/pytorch/issues/62396
for s in [2.00001, ]:
out_t = F.interpolate(
in_t, scale_factor=s, recompute_scale_factor=False, mode="nearest-exact"
)
# input is [[[0, 1, 2, 3, ..., 9]]]
# expected out is [[[0, 0, 1, 1, 2, 2, ..., 9, 9]]]
expected_out = in_t.repeat_interleave(2, dim=-1)
self.assertEqual(out_t, expected_out)
def test_upsamplingNearestExact1d_correctness(self, device):
# Here we check if output matches Scikit-Image/Scipy-like result
# Checks https://github.com/pytorch/pytorch/issues/34808
def helper(isize, osize):
in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
out_t = F.interpolate(
in_t, size=(osize, ), recompute_scale_factor=False, mode="nearest-exact"
)
# compute expected output as scikit-image/scipy
expected_out = torch.zeros(osize, dtype=torch.float).unsqueeze(0).unsqueeze(0)
scale = 1.0 * isize / osize
for o in range(osize):
i_f32 = (o + 0.5) * scale
i = int(i_f32)
expected_out[0, 0, o] = in_t[0, 0, i]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(20, 11)
helper(10, 15)
def test_upsamplingNearest2d(self, device):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
def helper(memory_format, mode):
in_t = torch.ones(1, 2, 2, 2, device=device).contiguous(memory_format=memory_format)
in_uint8_t = torch.ones(1, 2, 2, 2, dtype=torch.uint8, device=device).contiguous(memory_format=memory_format)
with warnings.catch_warnings(record=True) as w:
out_t = F.interpolate(in_t, size=4, mode=mode)
out_uint8_t = F.interpolate(in_uint8_t, size=4, mode=mode)
self.assertEqual(len(w), 0)
self.assertEqual(torch.ones(1, 2, 4, 4, device=device), out_t)
self.assertEqual(torch.ones(1, 2, 4, 4, dtype=torch.uint8, device=device), out_uint8_t)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
# test forward when input's height is not same as width
in_t = torch.ones(1, 2, 2, 1, device=device).contiguous(memory_format=memory_format).requires_grad_()
out_t = F.interpolate(in_t, size=(4, 2), mode=mode)
self.assertEqual(torch.ones(1, 2, 4, 2, device=device), out_t)
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
out_t.backward(torch.randn_like(out_t))
self.assertTrue(in_t.grad.is_contiguous(memory_format=memory_format))
# test backward when input's height is not same as width
input = torch.ones(1, 2, 2, 1, requires_grad=True, device=device).contiguous(memory_format=memory_format)
gradcheck(lambda x: F.interpolate(x, size=(4, 2), mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, size=(4, 2), mode=mode), [input], check_fwd_over_rev=check_forward_ad)
input = torch.randn(1, 2, 2, 2, requires_grad=True, device=device).contiguous(memory_format=memory_format)
self.assertEqual(
F.interpolate(input, 4, mode=mode),
F.interpolate(input, scale_factor=2, mode=mode))
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
# Assert that cpu and cuda handle channels_last memory format in the same way
# https://github.com/pytorch/pytorch/issues/54590
if torch.device(device).type == 'cuda':
for shapes, scale_factor in product([
(2, 2, 3, 4), (2, 3, 4, 5), (3, 1, 2, 2), (1, 5, 3, 2)
], [0.5, 1.5, 2]):
a_cuda = torch.randn(*shapes, device=device).contiguous(memory_format=memory_format).requires_grad_()
a_cpu = a_cuda.detach().cpu().requires_grad_()
out_cuda = F.interpolate(a_cuda, scale_factor=scale_factor, mode=mode)
out_cpu = F.interpolate(a_cpu, scale_factor=scale_factor, mode=mode)
self.assertEqual(out_cpu.cuda(), out_cuda)
g_cuda = torch.randn_like(out_cuda)
g_cpu = g_cuda.cpu()
out_cuda.backward(g_cuda)
out_cpu.backward(g_cpu)
self.assertEqual(a_cuda.grad, a_cpu.grad)
helper(torch.contiguous_format, "nearest")
helper(torch.channels_last, "nearest")
# Uncomment below once F.interpolate is updated
helper(torch.contiguous_format, "nearest-exact")
helper(torch.channels_last, "nearest-exact")
def test_upsamplingNearest2d_correctness(self, device):
# Here we check if output matches OpenCV's INTER_NEAREST-like result
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize, dtype=torch.float, device=device).reshape(1, 1, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize), recompute_scale_factor=False, mode="nearest"
)
# compute expected output as OpenCV
expected_out = torch.zeros(1, 1, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = o1 * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = o2 * scale
i2 = int(i2_f32)
expected_out[0, 0, o1, o2] = in_t[0, 0, i1, i2]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last, 10, 15)
def test_upsamplingNearestExact2d_correctness(self, device):
# Here we check if output matches Scikit-Image/Scipy-like result
# Checks https://github.com/pytorch/pytorch/issues/34808
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize, dtype=torch.float, device=device).reshape(1, 1, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize), recompute_scale_factor=False, mode="nearest-exact"
)
# compute expected output as Scikit-Image/Scipy
expected_out = torch.zeros(1, 1, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = (o1 + 0.5) * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = (o2 + 0.5) * scale
i2 = int(i2_f32)
expected_out[0, 0, o1, o2] = in_t[0, 0, i1, i2]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last, 10, 15)
def test_upsamplingNearest3d(self, device):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
def helper(memory_format, mode):
m = nn.Upsample(size=4, mode=mode)
in_t = torch.ones(1, 2, 2, 2, 2, device=device).contiguous(memory_format=memory_format)
in_uint8_t = torch.ones(
1, 2, 2, 2, 2, dtype=torch.uint8, device=device
).contiguous(memory_format=memory_format)
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
out_uint8_t = m(in_uint8_t)
expected_output = torch.ones(1, 2, 4, 4, 4, device=device)
self.assertEqual(expected_output, out_t)
self.assertEqual(expected_output.to(torch.uint8), out_uint8_t)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
input = torch.randn(
1, 2, 2, 2, 2, requires_grad=True, device=device
).contiguous(memory_format=memory_format)
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
# Assert that cpu and cuda handle channels_last memory format in the same way
# https://github.com/pytorch/pytorch/issues/54590
if torch.device(device).type == 'cuda':
a = torch.ones(
2, 2, 2, 3, 4, device=device, requires_grad=True
).contiguous(memory_format=torch.channels_last_3d)
# make the data asymmetric; ensure that cuda/cpu handle channels_last appropriately.
a[1][1][1][2][2] = a[1][1][1][2][3] = 0
out_cuda = torch.nn.functional.interpolate(a, scale_factor=2, mode=mode)
out_cpu = torch.nn.functional.interpolate(a.to('cpu'), scale_factor=2, mode=mode)
self.assertEqual(out_cpu, out_cuda.to('cpu'))
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a], check_fwd_over_rev=check_forward_ad)
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a.to('cuda')], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a.to('cuda')], check_fwd_over_rev=check_forward_ad)
helper(torch.contiguous_format, "nearest")
helper(torch.channels_last_3d, "nearest")
helper(torch.contiguous_format, "nearest-exact")
helper(torch.channels_last_3d, "nearest-exact")
def test_upsamplingNearest3d_correctness(self, device):
# Here we check if output matches OpenCV's INTER_NEAREST-like result
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize * isize, dtype=torch.float, device=device)
in_t = in_t.reshape(1, 1, isize, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize, osize), recompute_scale_factor=False, mode="nearest"
)
# compute expected output as OpenCV
expected_out = torch.zeros(1, 1, osize, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = o1 * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = o2 * scale
i2 = int(i2_f32)
for o3 in range(osize):
i3_f32 = o3 * scale
i3 = int(i3_f32)
expected_out[0, 0, o1, o2, o3] = in_t[0, 0, i1, i2, i3]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last_3d, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last_3d, 10, 15)
def test_upsamplingNearestExact3d_correctness(self, device):
# Here we check if output matches Scikit-Image/Scipy-like result
# Checks https://github.com/pytorch/pytorch/issues/34808
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize * isize, dtype=torch.float, device=device)
in_t = in_t.reshape(1, 1, isize, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize, osize), recompute_scale_factor=False, mode="nearest-exact"
)
# compute expected output as Scikit-Image/Scipy
expected_out = torch.zeros(1, 1, osize, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = (o1 + 0.5) * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = (o2 + 0.5) * scale
i2 = int(i2_f32)
for o3 in range(osize):
i3_f32 = (o3 + 0.5) * scale
i3 = int(i3_f32)
expected_out[0, 0, o1, o2, o3] = in_t[0, 0, i1, i2, i3]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last_3d, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last_3d, 10, 15)
@parametrize_test("antialias", [True, False])
@parametrize_test("align_corners", [True, False])
def test_upsamplingBilinear2d(self, device, antialias, align_corners):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
kwargs = dict(mode='bilinear', align_corners=align_corners, antialias=antialias)
for memory_format in [torch.contiguous_format, torch.channels_last]:
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
in_t = torch.ones(2, 3, 8, 8, device=device).contiguous(memory_format=memory_format).requires_grad_()
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
self.assertEqual(torch.ones(2, 3, out_size, out_size, device=device), out_t.data)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
out_t.backward(torch.randn_like(out_t))
self.assertTrue(in_t.grad.is_contiguous(memory_format=memory_format))
if torch.device(device).type == 'cuda':
# Bilinear backward is nondeterministic because of atomicAdd usage
nondet_tol = 1e-5
else:
nondet_tol = 0.0
input = torch.randn(2, 3, 8, 8, device=device).contiguous(memory_format=memory_format).requires_grad_()
gradcheck(
lambda x: F.interpolate(x, out_size, **kwargs),
[input],
check_forward_ad=check_forward_ad, nondet_tol=nondet_tol
)
gradgradcheck(
lambda x: F.interpolate(x, out_size, **kwargs),
[input],
check_fwd_over_rev=check_forward_ad, nondet_tol=nondet_tol
)
# Assert that cpu and cuda give same results
if torch.device(device).type == 'cuda':
for shapes in [
(2, 2, 3, 4), (2, 3, 4, 5), (3, 1, 2, 2), (1, 5, 3, 2)
]:
a_cuda = torch.randn(
*shapes, device=device
).contiguous(memory_format=memory_format).requires_grad_()
a_cpu = a_cuda.detach().cpu().requires_grad_()
with warnings.catch_warnings(record=True):
out_cuda = F.interpolate(a_cuda, scale_factor=scale_factor, **kwargs)
out_cpu = F.interpolate(a_cpu, scale_factor=scale_factor, **kwargs)
self.assertEqual(out_cpu, out_cuda.cpu())
g_cuda = torch.randn_like(out_cuda)
g_cpu = g_cuda.cpu()
out_cuda.backward(g_cuda)
out_cpu.backward(g_cpu)
self.assertEqual(a_cuda.grad, a_cpu.grad)
@parametrize_test("memory_format", [torch.contiguous_format, torch.channels_last])
def test_upsamplingBilinear2d_aa_correctness(self, device, memory_format):
t_in = torch.arange(3 * 8 * 8, dtype=torch.float, device=device).reshape(1, 3, 8, 8)
t_in = t_in.contiguous(memory_format=memory_format)
# This expected result is obtain using PIL.Image.resize
# for c in range(3):
# a_in = t_in.numpy()[0, c, ...]
# pil_in = Image.fromarray(a_in)
# pil_out = pil_in.resize((2, 2), resample=Image.LINEAR)
expected_out = torch.tensor([
17.035713, 20.25, 42.75, 45.964287, 81.03572, 84.25,
106.75, 109.96428, 145.0357, 148.25, 170.75, 173.9643
], device=device, dtype=t_in.dtype).reshape(1, 3, 2, 2)
t_out = F.interpolate(t_in, size=(2, 2), mode="bilinear", align_corners=False, antialias=True)
self.assertEqual(expected_out, t_out)
@parametrize_test("antialias", [True, False])
@parametrize_test("align_corners", [True, False])
def test_upsamplingBicubic2d(self, device, antialias, align_corners):
kwargs = dict(mode='bicubic', align_corners=align_corners, antialias=antialias)
# test float scale factor up & downsampling
# for scale_factor in [0.5, 1, 1.5, 2]:
for scale_factor in [2, ]:
in_t = torch.ones(2, 3, 8, 8, device=device)
print("dtype: ", in_t.dtype)
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
print(out_t)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
expected_out = torch.ones(2, 3, out_size, out_size, device=device)
self.assertEqual(expected_out, out_t, atol=1e-5, rtol=0)
if torch.device(device).type == 'cuda':
# Bicubic backward is nondeterministic because of atomicAdd usage
nondet_tol = 1e-5
else:
nondet_tol = 0.0
inpt = torch.ones(2, 3, 8, 8, requires_grad=True, device=device)
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [inpt], nondet_tol=nondet_tol)
def test_upsamplingBicubic2d_correctness(self, device):
# test output against known input: align_corners=False result must match opencv
in_t = torch.arange(8., device=device).view(1, 2, 2, 2)
expected_out_t = torch.tensor(
[[[[-0.31641, 0.01562, 0.56250, 0.89453],
[0.34766, 0.67969, 1.22656, 1.55859],
[1.44141, 1.77344, 2.32031, 2.65234],
[2.10547, 2.43750, 2.98438, 3.31641]],
[[3.68359, 4.01562, 4.56250, 4.89453],
[4.34766, 4.67969, 5.22656, 5.55859],
[5.44141, 5.77344, 6.32031, 6.65234],
[6.10547, 6.43750, 6.98438, 7.31641]]]], device=device)
out_t = F.interpolate(in_t, scale_factor=2, mode='bicubic', align_corners=False)
torch.set_printoptions(precision=5)
self.assertEqual(out_t, expected_out_t, atol=1e-5, rtol=0)
@parametrize_test("memory_format", [torch.contiguous_format, torch.channels_last])
def test_upsamplingBicubic2d_aa_correctness(self, device, memory_format):
t_in = torch.arange(3 * 8 * 8, dtype=torch.float, device=device).reshape(1, 3, 8, 8)
t_in = t_in.contiguous(memory_format=memory_format)
# This expected result is obtain using PIL.Image.resize
# for c in range(3):
# a_in = t_in.numpy()[0, c, ...]
# pil_in = Image.fromarray(a_in)
# pil_out = pil_in.resize((2, 2), resample=Image.BICUBIC)
expected_out = torch.tensor([
15.1205635, 18.760439, 44.23956, 47.879436, 79.12056, 82.76044,
108.23956, 111.87944, 143.12057, 146.76044, 172.23956, 175.87943
], device=device, dtype=t_in.dtype).reshape(1, 3, 2, 2)
t_out = F.interpolate(t_in, size=(2, 2), mode="bicubic", align_corners=False, antialias=True)
self.assertEqual(expected_out, t_out)
@dtypes(torch.float, torch.double)
def test_adaptive_pooling_max_nhwc(self, device, dtype):
def helper(n, c, h, w, output_height, output_width, contig):
input = torch.randint(1, 10, (n, c, h, w), device=device, dtype=dtype)
input = input.contiguous(memory_format=torch.channels_last)
grad = torch.randint(1, 10, (4, 8, output_height, output_width), device=device, dtype=dtype)
grad = grad.contiguous(memory_format=torch.channels_last)
if not contig:
input = input[:, ::2, :, :]
grad = grad[:, ::2, :, :]
input.requires_grad_(True)
pool = torch.nn.AdaptiveMaxPool2d((output_height, output_width), return_indices=True).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveMaxPool2d((output_height, output_width), return_indices=True).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
for contig in [True, False]:
helper(4, 8, 10, 10, 7, 7, contig)
helper(4, 8, 9, 14, 5, 8, contig)
helper(4, 8, 11, 11, 1, 1, contig)
def test_embedding_dense_grad(self, device):
embd = nn.Embedding(20, 20).to(device)
weight = embd.weight
def fn_wrapper(device):
def fn(weight):
inp = torch.tensor([[0, 1, 1, 2], [3, 5, 7, 11]], dtype=torch.long).to(device)
return torch.nn.functional.embedding(inp, weight)
return fn
fn = fn_wrapper(device)
_assertGradAndGradgradChecks(self, fn, (weight, ))
def test_embedding_scalar_weight_error(self, device):
indices = torch.rand(2, 2, device=device).long()
weights = [
torch.tensor(1.0, device=device),
torch.tensor(1.0, device=device).reshape(1, 1, 1),
]
for weight in weights:
with self.assertRaisesRegex(RuntimeError, "'weight' must be 2-D"):
torch.nn.functional.embedding(indices, weight)
@dtypesIfCUDA(torch.float16, torch.float64)
@dtypes(torch.float64)
def test_embedding_backward(self, device, dtype):
embedding = nn.Embedding(10, 3, sparse=True)
tensor = torch.tensor([[7, 1, 3]])
ones = torch.tensor(1., dtype=dtype).expand(3, 3)
tensorTwice = tensor.repeat(1, 2)
onesTwice = torch.cat((ones, ones))
embedding = embedding.to(dtype=dtype).to(device)
tensor = tensor.to(device)
ones = ones.to(device)
tensorTwice = tensorTwice.to(device)
onesTwice = onesTwice.to(device)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
self.assertEqual(embedding.weight.grad._indices(), tensor)
self.assertEqual(embedding.weight.grad._values(), ones)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
embedding(tensor[0]).sum().backward()
self.assertEqual(embedding.weight.grad._indices(), tensorTwice)
self.assertEqual(embedding.weight.grad._values(), onesTwice)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
tensor[0, 0] = 8
embedding(tensor[0]).sum().backward()
tensorTwice[0, 3] = 8
self.assertEqual(embedding.weight.grad._indices(), tensorTwice)
self.assertEqual(embedding.weight.grad._values(), onesTwice)
@dtypesIfCUDA(*((torch.float, torch.double, torch.bfloat16, torch.half)
if TEST_WITH_ROCM else (torch.float, torch.double, torch.half)))
@dtypes(torch.float32)
def test_embedding_padding_idx(self, device, dtype):
embedding = nn.Embedding(10, 20, padding_idx=0).to(device, dtype)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][0].sum(), 0)
self.assertEqual(output[1][2].sum(), 0)
embedding = nn.Embedding(10, 20, padding_idx=0, sparse=True).to(device, dtype)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][0].sum(), 0)
self.assertEqual(output[1][2].sum(), 0)
# negative indexing check for padding_idx
# padding_idx=-2, num_embeddings=10 ==> index 8 padded
embedding = nn.Embedding(10, 20, padding_idx=-2).to(device, dtype)
input = torch.tensor([[0, 2, 8, 5], [4, 8, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][2].sum(), 0)
self.assertEqual(output[1][1].sum(), 0)
embedding = nn.Embedding(10, 20, padding_idx=-2, sparse=True).to(device, dtype)
input = torch.tensor([[0, 2, 8, 5], [4, 8, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][2].sum(), 0)
self.assertEqual(output[1][1].sum(), 0)
# change padding vector
padding_vector = torch.ones(20, dtype=dtype, device=device)
embedding = nn.Embedding(10, 20, padding_idx=2, sparse=True).to(device, dtype)
with torch.no_grad():
embedding.weight[2] = padding_vector
input = torch.tensor([0, 2], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[1], padding_vector)
# out of bounds check for padding_idx
self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=25)
self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=-25)
padding_idx = 0
embedding = nn.Embedding(5, 2, padding_idx=padding_idx).to(device, dtype)
for n in (1, 2, 1000): # Need large N to trigger all the methods we have implemented
for other_indices in ([], [1, 3], [2]):
indices = torch.tensor(other_indices + [padding_idx] * n, dtype=torch.long).to(device)
pre = embedding.weight[padding_idx].clone()
embedding(indices).sum().backward()
after = (embedding.weight + embedding.weight.grad)[padding_idx]
embedding.zero_grad()
self.assertEqual(after, pre)
# test double backward
emb_sum = embedding(indices).sum()
emb_grad = torch.autograd.grad(outputs=emb_sum, inputs=list(embedding.parameters()), retain_graph=True)
scalar = emb_grad[0].sum() + emb_sum
scalar.backward()
after = (embedding.weight + embedding.weight.grad)[padding_idx]
embedding.zero_grad()
self.assertEqual(after, pre)
# Check correctness of torch.nn.functional.embedding_bag forward and
# backward functions with padding_idx, given a 1D input separated into bags
# with an offset array. Compare against an equivalent 2D input that uses
# padding indices to fill in the gaps indicated by the offset array
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
@dtypesIfCUDA(torch.half, torch.bfloat16)
def test_embedding_bag_1D_padding_idx(self, device, dtype):
num_features = 3
max_indices_per_bag = 10
num_bags = 10
num_words = 100
def gen_1D_indices_offsets(include_last_offset, allpad):
indices = []
offsets = []
cur_offset = 0
# Make one bag full and one bag empty, for extra coverage
empty_bag = random.randint(0, num_bags - 1)
full_bag = empty_bag
while full_bag == empty_bag:
full_bag = random.randint(0, num_bags - 1)
for bag in range(num_bags):
offsets.append(cur_offset)
if bag == full_bag:
bag_size = max_indices_per_bag
elif bag == empty_bag:
bag_size = 0
else:
bag_size = random.randint(1, max_indices_per_bag - 1)
indices += [1 if allpad else random.randint(0, num_words - 1) for _ in range(bag_size)]
cur_offset += bag_size
# embedding_bag requires first entry of offsets to be 0
assert offsets[0] == 0
indices = torch.tensor(indices, device=device)
if include_last_offset:
offsets.append(indices.size(0))
offsets = torch.tensor(offsets, device=device)
return indices, offsets
# Convert a 1-D indices-offsets representation into 2-D. Fill any empty
# indices with padding_idx
def gen_2D_indices_from_1D(indices_1D, offsets, include_last_offset, padding_idx):
assert offsets[0] == 0
if include_last_offset:
offsets = offsets[:-1]
indices_2D = torch.empty(num_bags, max_indices_per_bag, device=device, dtype=torch.long)
for bag in range(num_bags):
# Determine the start and end position of the bag within indices_1D
start = offsets[bag]
end = len(indices_1D) if bag + 1 == num_bags else offsets[bag + 1]
end = min(len(indices_1D), end)
# Pull out the bag's indices from indices_1D, and fill any
# remaining space with padding indices
indices_in_bag = []
for item_pos in range(0, max_indices_per_bag):
if (start + item_pos) < end:
indices_in_bag.append(indices_1D[start + item_pos])
else:
indices_in_bag.append(padding_idx)
indices_2D[bag] = torch.tensor(indices_in_bag, device=device)
return indices_2D
test_cases = product(['max', 'mean', 'sum'], [False, True], [False, True], [False, True])
for mode, sparse, include_last_offset, allpad in test_cases:
# Max sparse and bfloat16 are not supported
if mode == 'max':
if sparse or (dtype == torch.bfloat16):
continue
indices_1D, offsets = gen_1D_indices_offsets(include_last_offset, allpad)
for padding_idx_1D in list(set(indices_1D.tolist())) + [None]:
msg = (
f"mode: '{mode}', sparse: {sparse}, include_last_offset: {include_last_offset}, "
f"padding_idx_1D: {padding_idx_1D}")
# If 1D input does not use a padding index, we still need one for the 2D input,
# so we can add one dummy word to the weights to act as the padded word
padding_idx_2D = padding_idx_1D if padding_idx_1D is not None else num_words
num_words_with_padding = num_words if padding_idx_1D is not None else num_words + 1
indices_2D = gen_2D_indices_from_1D(
indices_1D,
offsets,
include_last_offset,
padding_idx_2D)
weights = torch.randn(
num_words_with_padding,
num_features,
dtype=dtype,
device=device,
requires_grad=True)
weights_check = weights.clone().detach().requires_grad_(True)
bag = torch.nn.functional.embedding_bag(
indices_1D,
weights,
offsets,
padding_idx=padding_idx_1D,
mode=mode,
sparse=sparse,
include_last_offset=include_last_offset)
bag_check = torch.nn.functional.embedding_bag(
indices_2D,
weights_check,
padding_idx=padding_idx_2D,
mode=mode,
sparse=sparse)
self.assertEqual(bag, bag_check, msg=msg)
bag.sum().backward()
bag_check.sum().backward()
# Sometimes, half dtype gradients mismatch by a greater amount
# than other dtypes
if dtype in [torch.half, torch.bfloat16]:
atol = 0.01
rtol = 0.01
else:
atol = None
rtol = None
self.assertEqual(weights.grad, weights_check.grad, msg=msg, atol=atol, rtol=rtol)
# Check correctness of torch.nn.functional.embedding_bag forward and
# backward functions with padding_idx, given a 2D indices input. Compare
# against torch.nn.functional.embedding followed by a reduction.
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
@dtypesIfCUDA(torch.half, torch.bfloat16)
def test_embedding_bag_2D_padding_idx(self, device, dtype):
# Use a Python implementation of embedding_bag with padding_idx support
# to check torch.nn.functional.embedding_bag correctness
def embedding_bag_check(indices, weights, mode, sparse, padding_idx):
assert padding_idx is not None
embedding = torch.nn.functional.embedding(
indices,
weights,
padding_idx=padding_idx,
sparse=sparse)
reduction_dim = indices.dim() - 1
if mode == 'sum' or mode == 'mean':
# We must avoid including elements at padding_idx in the
# sum/mean, so multiply those elements by 0, and multiply
# all other elements by 1
per_sample_weights = indices.ne(padding_idx).to(dtype).unsqueeze(-1)
res = embedding.mul(per_sample_weights).sum(dim=reduction_dim)
if mode == 'mean':
weights_sum = per_sample_weights.sum(dim=reduction_dim)
res = res.div(weights_sum)
elif mode == 'max':
# We must avoid allowing elements at padding_idx to be chosen
# as the max, so set those elements to negative infinity
res = embedding.masked_fill(
indices.unsqueeze(-1) == padding_idx, -float('inf')
).amax(dim=reduction_dim)
else:
raise RuntimeError(f"mode '{mode}' is not available")
# If a row is all padding, set its corresponding result row to 0.
# This is needed because the above mean and max mode
# implementations set these elements to nan and -inf, respectively
if mode in ['mean', 'max']:
res = res.masked_fill(
indices.eq(padding_idx).all(dim=-1).unsqueeze(-1),
0)
return res
num_features = 3
num_words = 10
indices_dim1 = 10
for mode, sparse, allpad, indices_dim0 in product(['max', 'mean', 'sum'], [False, True], [False, True], [1, 10]):
# Max sparse and bfloat16 are not supported
if mode == 'max':
if sparse or (dtype == torch.bfloat16):
continue
if allpad:
indices = torch.empty(indices_dim0, indices_dim1, dtype=torch.long, device=device).fill_(1)
else:
indices = torch.randint(0, num_words, (indices_dim0, indices_dim1), device=device)
if indices_dim0 > 1:
# Fill one row with duplicate index so we can test with a fully
# padded row
duplicate_row = random.randint(0, indices_dim0 - 1)
indices[duplicate_row] = indices[duplicate_row][0]
for padding_idx in list(set(indices.flatten(0, -1).tolist())):
weights = torch.randn(num_words, num_features, dtype=dtype, device=device, requires_grad=True)
weights_check = weights.clone().detach().requires_grad_(True)
msg = (
f"mode: '{mode}', sparse: {sparse}, padding_idx: {padding_idx}, "
f"allpad: {allpad}, indices.size(): {indices.size()}")
# Check forward with a Python implementation of padding_idx embedding_bag
bag_check = embedding_bag_check(
indices,
weights_check,
mode,
sparse,
padding_idx)
bag = torch.nn.functional.embedding_bag(
indices,
weights,
padding_idx=padding_idx,
mode=mode,
sparse=sparse)
self.assertEqual(bag, bag_check, msg=msg)
bag_check.sum().backward()
grad_check = weights_check.grad
bag.sum().backward()
grad = weights.grad
# Sometimes, half dtype gradients mismatch by a greater amount
# than other dtypes
if dtype in [torch.half, torch.bfloat16]:
atol = 0.01
rtol = 0.01
else:
atol = None
rtol = None
self.assertEqual(grad, grad_check, msg=msg, atol=atol, rtol=rtol)
def test_masked_softmax(self, device):
sizes = [(1, 1, 32), (3, 16, 310), (12, 4, 1024), (4, 2, 1200)]
for (B, num_heads, L) in sizes:
input = torch.randn((B, num_heads, L, L))
mask = torch.randint(0, 2, (B, L))
if (self.device_type == "cuda"):
input = input.cuda()
mask = mask.cuda()
mask = mask.reshape(B, 1, 1, L).expand(B, num_heads, L, L).bool()
native_res = torch._masked_softmax(input, mask)
mask = mask.float()
def slow_masked_softmax(input, mask):
exp = torch.exp(input)
exp = exp * mask
s = exp.sum(dim=3, keepdim=True).expand(exp.size())
return exp / s
pt_res = slow_masked_softmax(input, mask)
self.assertEqual(pt_res, native_res, exact_dtype=True)
@onlyCUDA
def test_masked_softmax_transformer_layout(self, device):
B = 211
num_heads = 16
L = 42
input = torch.randn((B, num_heads, L, L))
mask = torch.randint(0, 2, (B, L))
if (self.device_type == "cuda"):
input = input.cuda()
mask = mask.cuda()
mask = mask.bool()
native_res = torch._masked_softmax(input, mask)
mask = mask.reshape(B, 1, 1, L).expand(B, num_heads, L, L)
mask = mask.float()
def slow_masked_softmax(input, mask):
exp = torch.exp(input)
exp = exp * mask
s = exp.sum(dim=3, keepdim=True).expand(exp.size())
return exp / s
pt_res = slow_masked_softmax(input, mask)
self.assertEqual(pt_res, native_res, exact_dtype=True)
# Test fails on Vg20
@skipCUDAIfRocm
@dtypesIfCUDA(torch.half, torch.float)
@dtypes(torch.float)
def test_softmax_results(self, device, dtype):
# Non-even sizes and non-zero shifts test fallback paths in vectorized kernel
# Note: dim1 > 1024 is needed to exercise the vectorized (non-persistent) path, (16, 30576) is BERT-esque
sizes = [(0, 10), (32, 20), (10, 0), (31, 20), (32, 21), (31, 23), (32, 1536), (31, 2048), (33, 2049), (16, 30576)]
shifts = [(0, 0), (1, 0), (0, 1), (1, 1)]
for fn in [F.softmax, F.log_softmax]:
for size in sizes:
for shift in shifts:
input = torch.rand(size, device=device, dtype=dtype)
# Note: With the largest tests we can hit upper limit of fp16 when we
# sum, so scale the input down to stay in a nicer range.
if dtype == torch.float16:
input = input / 100.
input = input[shift[0]:, shift[1]:]
# Note; Don't want to bprop back through slice op
input = input.detach().requires_grad_(True)
ref_input = input.clone().cpu().detach().requires_grad_(True)
for dim in [0, 1]:
ref_output = fn(ref_input, dtype=torch.float, dim=dim)
output = fn(input, dtype=torch.float, dim=dim)
grad_output = torch.rand(size, device=device, dtype=dtype)
grad_output = grad_output[shift[0]:, shift[1]:]
ref_grad_output = grad_output.clone().cpu().detach()
grad_input, = torch.autograd.grad(output, input, grad_outputs=(grad_output), create_graph=True)
ref_grad_input, = torch.autograd.grad(ref_output, ref_input,
grad_outputs=(ref_grad_output), create_graph=True)
grad_input.sum().backward()
ref_grad_input.sum().backward()
self.assertEqual(output, ref_output)
self.assertEqual(grad_input, ref_grad_input)
self.assertEqual(input.grad, ref_input.grad)
@onlyCUDA
@dtypes(torch.float, torch.half)
@largeTensorTest("20GB")
@largeTensorTest("90GB", "cpu")
@precisionOverride({torch.half: 0.001})
def test_softmax_64bit_indexing(self, device, dtype):
def run_test(*shape):
x = torch.randn(shape, device="cuda", dtype=torch.float16, requires_grad=True)
y = F.log_softmax(x, dim=-1, dtype=dtype)
y.backward(y)
with torch.no_grad():
xx = x.cpu().requires_grad_()
yy = F.log_softmax(xx.float(), dim=-1).to(dtype)
yy.backward(yy)
self.assertEqual(y, yy)
self.assertEqual(x.grad, xx.grad)
run_test(1100000000, 2) # Illegal memory access https://github.com/pytorch/pytorch/issues/52715
run_test(2200000000, 1) # invalid configuration argument https://github.com/pytorch/pytorch/issues/52716
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.half)
def test_log_softmax_big(self, device, dtype):
def _test_helper(shape):
# generate a tensor with big numbers that are exactly representable in dtype
# and are at a constant offset from tensor with small numbers
# the logsoftmax of a small and big tensors should be equal
x_small = torch.randint(100, shape, dtype=dtype, device=device)
offset = 1.5e3 if dtype == torch.half else 1e7
x_big = x_small + offset
self.assertEqual(F.log_softmax(x_small, -1), F.log_softmax(x_big, -1))
_test_helper((16, 4))
if self.device_type == 'cuda':
# test non-persistent softmax kernel
_test_helper((4, 1536))
@onlyCUDA
@largeTensorTest('12GB')
def test_conv_large_nosplit(self, device):
# Here we just test the convolution correctly route to the fallback implementation
# that is, it does not crash. The correctness of fallback implementation should be
# covered in other tests
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv1 = nn.Conv2d(2, 2, 8, 8).to(device).to(dtype)
input_large = torch.randn(1, 2, 1024, 1024 * 1024, dtype=dtype, device=device)
conv1(input_large)
conv2 = torch.nn.Conv2d(1, 1024, 1, 1).to(device).to(dtype)
input_large = torch.randn(1, 1, 2048, 1024 , dtype=dtype, device=device)
conv2(input_large)
def test_conv_noncontig_weights(self, device):
for dim in (1, 2, 3):
for grouped in (False, True):
nc = 3
groups = 3 if grouped else 1
w = torch.randn([3] * dim, device=device)
w = w.expand([nc, int(nc / groups)] + list(w.shape))
w = w.detach().requires_grad_()
x = torch.randn([1, nc] + ([5] * dim), device=device, requires_grad=True)
y = getattr(F, 'conv{}d'.format(dim))(x, w, groups=groups)
y.sum().backward()
y = getattr(F, 'conv_transpose{}d'.format(dim))(x, w, groups=groups)
y.sum().backward()
def test_conv_noncontig_weights_and_bias(self, device):
# need floats to exercise https://github.com/pytorch/pytorch/issues/16018
for bias in [True, False]:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=bias).to(device, torch.float)
input_nc = torch.randn((1, 3, 224, 224, 2), device=device, dtype=torch.float)[:, :, :, :, 1]
input_c = input_nc.contiguous()
weight_nc = torch.randn((64, 3, 7, 7, 2), device=device, dtype=torch.float)[:, :, :, :, 1]
conv1.weight = nn.Parameter(weight_nc)
weight_c = conv1.weight.contiguous()
if bias:
bias_nc = torch.randn((64, 2), device=device, dtype=torch.float)[:, 1]
conv1.bias = nn.Parameter(bias_nc)
bias_c = conv1.bias.contiguous()
out1 = conv1(input_nc)
conv1.weight = nn.Parameter(weight_c)
if bias:
conv1.bias = nn.Parameter(bias_c)
out2 = conv1(input_c)
self.assertEqual(out1, out2)
def test_save_lstm_compatibility(self, device):
# Test that saving an LSTM in PyTorch 1.7 and older can still be
# loaded in newer versions of PyTorch.
model = nn.LSTM(2, 3)
x = torch.randn(32, 5, 2)
expected = model(x)
# Get a state dict for PyTorch 1.7 LSTM. Before PyTorch 1.8, proj_size
# didn't exist.
assert model.proj_size == 0
state_dict = model.__dict__
del state_dict['proj_size']
# load a model
loaded_model = nn.LSTM(2, 3)
loaded_model.__setstate__(state_dict)
result = loaded_model(x)
self.assertEqual(result, expected)
@onlyCUDA
@tf32_on_and_off(0.005)
def test_grid_sample_large(self, device):
def issue_35202():
input_tensor = torch.rand(1, 1, 480, 640, dtype=torch.float, device=device, requires_grad=True)
coords = torch.tensor([[-10059144, 67680944], [67680944, 67680944]], dtype=torch.float, device=device)
coords = coords.unsqueeze(0).unsqueeze(0).repeat(1, 1, 1, 1)
result = torch.nn.functional.grid_sample(input_tensor, coords)
self.assertEqual(result, torch.tensor([[[[0., 0.]]]], dtype=torch.float, device=device))
result.backward(torch.ones_like(result))
torch.cuda.synchronize()
issue_35202()
def issue_24823_1(dtype):
image = torch.arange(27, 0, -1, dtype=dtype, device=device).view(1, 1, 3, 3, 3)
image.requires_grad_()
grid = torch.nn.functional.affine_grid(
torch.tensor([[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]], dtype=dtype, device=device),
(1, 1, 3, 3, 3))
grid[:, 1, 1, 1, 0] = float('inf')
result = torch.nn.functional.grid_sample(image, grid, padding_mode='zeros')
self.assertEqual(result, torch.tensor([[[[[27., 26., 25.], [24., 23., 22.], [21., 20., 19.]],
[[18., 17., 16.], [15., 0., 13.], [12., 11., 10.]],
[[9., 8., 7.], [6., 5., 4.], [3., 2., 1.]]]]],
device=device, dtype=dtype))
result.backward(torch.ones_like(result))
expected_grad = torch.ones_like(image)
expected_grad[0, 0, 1, 1, 1] = 0
self.assertEqual(image.grad, expected_grad, atol=0.005, rtol=0)
issue_24823_1(torch.half)
issue_24823_1(torch.float)
issue_24823_1(torch.double)
def issue_24823_2():
param = torch.tensor([[[-1.0e+20, 0.0, 0.0], [0.0, -1.0e+20, 0.0]]], dtype=torch.float, device=device)
img = torch.zeros((1, 1, 4, 4), dtype=torch.float, device=device, requires_grad=True)
grid = torch.nn.functional.affine_grid(param, img.size())
result = torch.nn.functional.grid_sample(img, grid)
self.assertEqual(result, torch.zeros(1, 1, 4, 4, device=device, dtype=torch.float))
result.backward(torch.ones_like(result))
torch.cuda.synchronize()
issue_24823_2()
@dtypes(torch.float, torch.double)
@largeTensorTest(lambda self, device, dtype:
# Compute sum of the large tensor sizes:
# (im.numel() + small_image.numel() + small_image.grad.numel() +
# large_view.grad.numel()) * sizeof(dtype)
32769 * (65536 + 3 * 65536 / 128) *
torch.tensor([], dtype=dtype).element_size())
def test_grid_sample_large_index_2d(self, device, dtype):
# Test 64-bit indexing with grid_sample (gh-41656)
# Try accessing the corners, there should be no segfault
coords = torch.tensor([[[-1., -1.],
[+1., -1.]],
[[-1., +1.],
[+1., +1.]]], device=device, dtype=dtype)
coords = coords.expand(1, 2, 2, 2)
im = torch.zeros([1, 1, 32769, 65536], device=device, dtype=dtype)
# Compare sampling with large strides to the same op on a contiguous tensor
coords = torch.rand(1, 4, 4, 2, device=device, dtype=dtype)
large_view = im[..., 127::128]
small_image = torch.rand_like(large_view)
large_view[...] = small_image
large_view.requires_grad, small_image.requires_grad = True, True
self.assertTrue(
sum(i * s for i, s in zip(large_view.size(), large_view.stride())) >= 2 ** 31,
msg="View must use 64-bit indexing")
for mode, padding_mode, align_corners in itertools.product(
('nearest', 'bilinear', 'bicubic'), ('zeros', 'border', 'reflection'), (True, False)):
a = F.grid_sample(
small_image, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
a.sum().backward()
b = F.grid_sample(
large_view, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
b.sum().backward()
self.assertEqual(a, b)
self.assertEqual(small_image.grad, large_view.grad)
small_image.grad.zero_()
large_view.grad.zero_()
@dtypes(torch.float, torch.double)
@largeTensorTest(lambda self, device, dtype:
# Compute sum of the large tensor sizes:
# (im.numel() + small_image.numel() + small_image.grad.numel() +
# large_view.grad.numel()) * sizeof(dtype)
2 * 32769 * (32768 + 3 * 32768 / 128) *
torch.tensor([], dtype=dtype).element_size())
def test_grid_sample_large_index_3d(self, device, dtype):
# Test 64-bit indexing with grid_sample (gh-41656)
# Try accessing the corners, there should be no segfault
coords = torch.full((1, 2, 2, 2, 3), 1., device=device, dtype=dtype)
im = torch.zeros([1, 1, 2, 32769, 32768], device=device, dtype=dtype)
result = F.grid_sample(im, coords, align_corners=False)
self.assertEqual(result, torch.zeros((1, 1, 2, 2, 2), device=device, dtype=dtype))
# Compare sampling with large strides to the same op on a contiguous tensor
coords = torch.rand(1, 1, 4, 4, 3, device=device, dtype=dtype)
large_view = im[..., 127::128]
small_image = torch.rand_like(large_view)
large_view[...] = small_image
small_image.requires_grad, large_view.requires_grad = True, True
self.assertTrue(
sum(i * s for i, s in zip(large_view.size(), large_view.stride())) >= 2 ** 31,
msg="View must use 64-bit indexing")
for mode, padding_mode, align_corners in itertools.product(
('nearest', 'bilinear'), ('zeros', 'border', 'reflection'), (True, False)):
a = F.grid_sample(
small_image, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
a.sum().backward()
b = F.grid_sample(
large_view, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
b.sum().backward()
self.assertEqual(a, b)
self.assertEqual(small_image.grad, large_view.grad)
small_image.grad.zero_()
large_view.grad.zero_()
@onlyCUDA
@largeTensorTest('12GB')
def test_conv_transposed_large(self, device):
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv = nn.ConvTranspose2d(1, 1, 1, 1, bias=False).to(device).to(dtype)
input_large = torch.randn(4096, 1, 512, 1024, dtype=dtype, device=device)
# forward
ret = conv(input_large)
maxdiff0 = (ret.narrow(0, 0, 1024) - conv(input_large.narrow(0, 0, 1024))).abs_().max().item()
maxdiff1 = (ret.narrow(0, 1024, 1024) - conv(input_large.narrow(0, 1024, 1024))).abs_().max().item()
maxdiff2 = (ret.narrow(0, 2048, 1024) - conv(input_large.narrow(0, 2048, 1024))).abs_().max().item()
maxdiff3 = (ret.narrow(0, 3072, 1024) - conv(input_large.narrow(0, 3072, 1024))).abs_().max().item()
self.assertEqual(maxdiff0, 0)
self.assertEqual(maxdiff1, 0)
self.assertEqual(maxdiff2, 0)
self.assertEqual(maxdiff3, 0)
@onlyCUDA
@skipCUDAIfRocm
@largeTensorTest('12GB')
def test_conv_large(self, device):
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv = nn.Conv2d(2, 2, 8, 8, bias=False).to(device).to(dtype)
conv.weight = torch.nn.Parameter(torch.randn(2, 2, 8, 8, device=device, dtype=dtype) / 64)
input_large = torch.randn(4097, 2, 512, 512, dtype=dtype, device=device)
# forward
ret = conv(input_large)
self.assertEqual(ret[:2048], conv(input_large[:2048]))
self.assertEqual(ret[2048:4096], conv(input_large[2048:4096]))
self.assertEqual(ret[4096:], conv(input_large[4096:]))
# backward
conv.zero_grad()
# When computing the backward, we are using the `max(dim=1)`` to create
# some sparsity. Without this sparsity, the rounding error would be
# too large (as large as 1e-5) to satisfy the creterion (1e-6) of `assertEqual`
ret.view(4097, -1).max(dim=1).values.sum().backward()
del ret
grad1 = conv.weight.grad.detach().clone()
conv.zero_grad()
conv(input_large[:2048]).view(2048, -1).max(dim=1).values.sum().backward()
conv(input_large[2048:4096]).view(2048, -1).max(dim=1).values.sum().backward()
conv(input_large[4096:]).view(1, -1).max(dim=1).values.sum().backward()
grad2 = conv.weight.grad.detach().clone()
# gradients are at the order of hundreds, we need to scale it to
# the order of one so that we can compare
scale = 1 / grad2.abs().mean()
grad1 = grad1 * scale
grad2 = grad2 * scale
self.assertEqual(grad1, grad2, atol=5e-2, rtol=5e-3)
def _test_gumbel_softmax_st_shapes(self, device, dtype, shape, dim, count_expected):
logits = torch.randn(shape, dtype=torch.float, device=device)
logits = logits.to(dtype)
y_draw = F.gumbel_softmax(logits, hard=True, dim=dim)
# All values positive
self.assertGreaterEqual(y_draw.min(), 0)
# Shape unchanged
self.assertTrue(y_draw.shape == logits.shape)
# One choice per draw
self.assertEqual(y_draw.sum(), count_expected, atol=torch.finfo(y_draw.dtype).eps, rtol=0)
def _test_gumbel_softmax_straight_through(self, device, dtype):
num_draws = 100
logits = torch.tensor([[0.2, 0.8, 0.1]], device=device)
logits = logits.reshape([1, 3])
logits = logits.to(dtype).requires_grad_()
probs = logits.softmax(dim=-1)
counts = torch.zeros_like(logits)
for _ in range(num_draws):
y_draw = F.gumbel_softmax(logits, hard=True)
counts = counts + y_draw
# All values positive
self.assertGreaterEqual(y_draw.min(), 0)
# Each experiment should result in 1 draw.
self.assertEqual(counts.sum(), num_draws, atol=torch.finfo(counts.dtype).eps, rtol=0)
# check results is asymptotically as expected.
expected = probs * num_draws
# ~z is approximately N(0,1) for unbiased count
z = (counts - expected) / (expected * (1 - probs)).sqrt()
# A (lazy) approximate 99% two-sided test:
# occurs with prob alpha~>=0.01 if unbiased
self.assertLess(z.abs().max().item(), 2.58)
def _test_gumbel_softmax_grad(self, device, dtype):
# "hard" and "not hard" should propagate same gradient.
logits_soft = torch.zeros(10, 10, dtype=dtype, device=device, requires_grad=True)
logits_hard = torch.zeros(10, 10, dtype=dtype, device=device, requires_grad=True)
seed = torch.random.get_rng_state()
y_soft = F.gumbel_softmax(logits_soft, hard=False)
torch.random.set_rng_state(seed)
y_hard = F.gumbel_softmax(logits_hard, hard=True)
y_soft.sum().backward()
y_hard.sum().backward()
# 2eps = 1x addition + 1x subtraction.
tol = 2 * torch.finfo(dtype).eps
self.assertEqual(logits_soft.grad, logits_hard.grad, atol=tol, rtol=0)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_gumbel_softmax(self, device, dtype):
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5], dim=0, count_expected=1)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5], dim=-1, count_expected=1)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4], dim=1, count_expected=5)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4, 3], dim=1, count_expected=5 * 3)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4, 3], dim=-1, count_expected=5 * 4)
self._test_gumbel_softmax_straight_through(device, dtype)
self._test_gumbel_softmax_grad(device, dtype)
def _test_rnn_retain_variables(self, device, dtype):
rnns = [nn.LSTM(10, 20, num_layers=2).to(device, dtype),
nn.GRU(10, 20, num_layers=2).to(device, dtype),
nn.RNN(10, 20, num_layers=2).to(device, dtype)]
for rnn in rnns:
input = torch.randn(5, 6, 10, device=device, dtype=dtype, requires_grad=True)
output = rnn(input)
output[0].sum().backward(retain_graph=True)
grads = [input.grad.data.clone()] + [p.grad.data.clone() for p in rnn.parameters()]
for _ in range(4):
rnn.zero_grad()
input.grad.data.zero_()
output[0].sum().backward(retain_graph=True)
grads2 = [input.grad.data] + [p.grad.data for p in rnn.parameters()]
self.assertEqual(grads, grads2)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.double)
def test_rnn_retain_variables(self, device, dtype):
self._test_rnn_retain_variables(device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_rnn_retain_variables(device, dtype)
@onlyCUDA
@dtypes(torch.double)
def test_lstmcell_backward_only_one_output_grad(self, device, dtype):
# checks that undefined gradients doen't hamper the backward
# see #11872
l = torch.nn.LSTMCell(2, 3).to(device).to(dtype=dtype)
s = torch.randn(1, 2, device=device, dtype=dtype, requires_grad=True)
for i in range(2):
out = l(s)[i]
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
def _test_rnn_mod(self, mod, inp):
def flatten_out(mod, inp):
out = mod(inp)
return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])
gradcheckfunc = partial(flatten_out, mod)
with torch.backends.cudnn.flags(enabled=False):
gradcheck(gradcheckfunc, inp, check_batched_grad=False)
gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)
if inp.is_cuda and not TEST_WITH_ROCM:
# Assert that we have good error message around unsupported CuDNN double backward
# NB: we trigger double backward using .backward() instead of autograd.grad due to
# https://github.com/pytorch/pytorch/issues/37874
with torch.backends.cudnn.flags(enabled=True):
result = gradcheckfunc(inp)
result[0].sum().backward(create_graph=True)
grad0 = next(mod.parameters()).grad
with self.assertRaisesRegex(RuntimeError,
"please disable the CuDNN backend temporarily"):
grad0.sum().backward()
# Here we avoid the backward(create_graph=True) memory leak
# described in https://github.com/pytorch/pytorch/issues/7343
for param in mod.parameters():
param.grad = None
inp.grad = None
# Merge into OpInfo?
@skipMeta # LSTM cell reuses output which was resized
@dtypes(torch.double)
def test_LSTM_grad_and_gradgrad(self, device, dtype):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=dtype, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(dtype)
self._test_rnn_mod(mod, inp)
@skipMeta # GRU cell reuses output which was resized
@dtypes(torch.double)
def test_GRU_grad_and_gradgrad(self, device, dtype):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=dtype, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(dtype)
self._test_rnn_mod(mod, inp)
@onlyCUDA
def test_upsamplingNearest1d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@onlyCUDA
def test_upsamplingNearest2d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@onlyCUDA
def test_upsamplingNearest3d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@unittest.expectedFailure
@skipIfRocm
@onlyCUDA
def test_upsamplingNearest2d_launch_fail(self, device):
m = nn.Upsample(scale_factor=2)
# launch grid_y == 2**16 (larger than maximum y-dimension limit 65535)
inp = torch.rand(1, 1, 2**15, 2**8, device=device)
out = m(inp)
@onlyCUDA
@skipCUDAIfNotRocm
def test_upsamplingNearest2d_launch_rocm(self, device):
# test_upsamplingNearest2d_launch_fail should run OK on ROCm
m = nn.Upsample(scale_factor=2)
inp = torch.rand(1, 1, 2**15, 2**8, device=device)
out = m(inp)
@onlyCUDA
@skipCUDAIfCudnnVersionLessThan(7600)
def test_CTCLoss_cudnn(self, device):
def _helper(zero_infinity):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device=device).log_softmax(2).requires_grad_()
log_probs_ref = log_probs.detach().clone().requires_grad_()
with torch.backends.cudnn.flags(enabled=True):
res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, zero_infinity=zero_infinity)
res.backward()
expected = ctcloss_reference(log_probs, targets.cuda(), input_lengths, target_lengths).float()
with torch.backends.cudnn.flags(enabled=False):
res2 = torch.nn.functional.ctc_loss(log_probs_ref, targets.cuda().long(), input_lengths, target_lengths,
zero_infinity=zero_infinity)
res2.backward()
self.assertEqual(res, expected)
self.assertEqual(res2, res)
self.assertEqual(log_probs.grad, log_probs_ref.grad)
_helper(zero_infinity=True)
_helper(zero_infinity=False)
def _CTCLoss_gen_losses(self, device, input_length, vocab_size, target_length, reduction, use_module_form):
batch_size = 1
log_probs = torch.randn(input_length, batch_size, vocab_size, dtype=torch.float, device=device) \
.log_softmax(2).requires_grad_()
targets = torch.randint(low=1, high=vocab_size - 1, size=(batch_size, target_length),
dtype=torch.int, device=device)
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
log_probs_no_bd = log_probs.squeeze(1).detach().clone().requires_grad_()
targets_no_bd = targets.squeeze(0).detach().clone()
input_lengths_no_bd = torch.tensor(input_length)
target_lengths_no_bd = torch.tensor(target_length)
# currently only length 2 and 1 right now, but left flexible for additional potential cases
log_probs_refs = [log_probs.detach().clone().requires_grad_() for _ in range(2)]
log_probs_no_bd_refs = [log_probs_no_bd.detach().clone().requires_grad_() for _ in range(1)]
losses = []
losses_no_bd = []
has_cuda = torch.cuda.is_available()
has_cudnn = has_cuda and 'cuda' in device and self.has_cudnn()
# cudnn requires a cpu target
if has_cuda and has_cudnn:
targets = targets.cpu()
targets_no_bd = targets_no_bd.cpu()
ctc_loss = (
nn.CTCLoss(reduction=reduction, zero_infinity=True)
if use_module_form
else partial(torch.nn.functional.ctc_loss, reduction=reduction, zero_infinity=True)
)
with torch.backends.cudnn.flags(enabled=has_cudnn):
# batched case. log_probs.shape = (T, N, C), targets = (N, S), input_lengths/target_lengths = (N,)
losses.append(ctc_loss(log_probs_refs[0], targets, input_lengths, target_lengths))
# batched case. input.shape = (T, N, C), targets = (S,), input_lengths/target_lengths = (N,)
losses.append(ctc_loss(log_probs_refs[1], targets_no_bd, input_lengths, target_lengths))
# unbatched case. input.shape = (T, C), targets = (S,), input_lengths/target_lengths = (N,)
losses_no_bd.append(ctc_loss(log_probs_no_bd_refs[0], targets_no_bd,
input_lengths_no_bd, target_lengths_no_bd))
for loss in losses + losses_no_bd:
loss.backward()
return losses, losses_no_bd, log_probs_refs, log_probs_no_bd_refs
def _assertEqual_list(self, expected, list_to_compare, atol=None, rtol=None):
for ele in list_to_compare:
self.assertEqual(expected, ele, atol=atol, rtol=rtol)
@parametrize_test("reduction", ['none', 'mean', 'sum'])
@parametrize_test("use_module_form", [True, False])
def test_CTCLoss_no_batch_dim(self, device, reduction, use_module_form):
input_length = 40
vocab_size = 3
target_length = 12
args = self._CTCLoss_gen_losses(device, input_length, vocab_size, target_length, reduction, use_module_form)
losses, losses_no_bd, log_probs_refs, log_probs_no_bd_refs = args
# test output values
self._assertEqual_list(losses[0], losses[1:], atol=1e-4, rtol=0)
self._assertEqual_list(losses[0].squeeze(0), losses_no_bd, atol=1e-4, rtol=0)
# test gradient values
self._assertEqual_list(log_probs_refs[0].grad, [t.grad for t in log_probs_refs[1:]], atol=1e-4, rtol=0)
self._assertEqual_list(
log_probs_refs[0].grad.squeeze(1),
[t.grad for t in log_probs_no_bd_refs],
atol=1e-4,
rtol=0,
)
# checking the output's shape
# batch dim case should be (N,). no batch dim case should be ()
self._assertEqual_list((1,) if reduction == 'none' else (), [loss.shape for loss in losses])
self._assertEqual_list((), [loss.shape for loss in losses_no_bd])
# checking the gradient's shape
# batch dim case should have shape (T, N, C). no batch dim case should have shape (T, C)
self._assertEqual_list((input_length, 1, vocab_size), [t.grad.shape for t in log_probs_refs])
self._assertEqual_list((input_length, vocab_size), [t.grad.shape for t in log_probs_no_bd_refs])
@onlyCUDA
@skipCUDAIfNoCudnn
def test_contig_wrong_stride_cudnn(self, device):
# x has to have batch_size 1 to test contiguous checks
x = torch.randn(1, 16, 5, 5, device=device)
stride = list(x.stride())
stride[0] = 20
# change the stride in dimension 0. the tensor is still contiguous because size[0] is 1
x.set_(x.storage(), 0, x.size(), stride)
self.assertTrue(x.is_contiguous())
F.conv_transpose2d(x, torch.randn(16, 1, 1, 1, device=device))
F.conv2d(x, torch.randn(1, 16, 1, 1, device=device))
@onlyCUDA
def test_Conv2d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 5, 5)
conv_cpu = torch.nn.Conv2d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.Conv2d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
@onlyCUDA
def test_ConvTranspose2d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 5, 5)
conv_cpu = torch.nn.ConvTranspose2d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.ConvTranspose2d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
@onlyCUDA
def test_ConvTranspose3d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 3, 5, 5)
conv_cpu = torch.nn.ConvTranspose3d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.ConvTranspose3d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
def _ordered_sequence(self, device, dtype):
"""Create ordered list of random sequences"""
seqs = [torch.empty(random.randint(1, 6), device=device, dtype=dtype)
for _ in range(5)]
seqs = [s.random_(-128, 128) for s in seqs]
ordered = sorted(seqs, key=len, reverse=True)
return ordered
def _padded_sequence(self, device, dtype):
"""Create Tensor of random padded sequences"""
ordered = self._ordered_sequence(device, dtype)
lengths = [len(i) for i in ordered]
padded_tensor = rnn_utils.pad_sequence(ordered)
return padded_tensor, lengths
@onlyCUDA
def test_device_mask(self, device):
for enforce_sorted in [True, False]:
padded, lengths = self._padded_sequence('cpu', torch.float)
packed = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted)
self.assertFalse(packed.is_cuda)
packed = packed.to(device)
self.assertTrue(packed.is_cuda)
unpacked, _ = rnn_utils.pad_packed_sequence(packed)
self.assertTrue(unpacked.is_cuda)
self.assertEqual(unpacked.dtype, torch.float)
@onlyCUDA
def test_overwrite_module_params_on_conversion_cpu_device(self, device):
# Test that under the current default settings
# (`torch.__future__.get_overwrite_module_params_on_conversion() == False`),
# a view to a module's parameters is not pointing to the same storage as
# its base variable after converting the module to a different device.
m = nn.Linear(20, 10)
mw = m.weight[:]
m.to(device)
with torch.no_grad():
# Without using `torch.no_grad()`, this will leak CUDA memory.
# (Issue is filed at https://github.com/pytorch/pytorch/issues/21875)
mw[0][0] = 5
self.assertTrue(mw[0][0].device.type == "cpu")
self.assertTrue(mw._base[0][0].device.type == "cuda")
try:
torch.__future__.set_overwrite_module_params_on_conversion(True)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# a view to a module's parameters is still pointing to the same storage as
# its base variable after converting the module to a different device.
m = nn.Linear(20, 10)
mw = m.weight[:]
m.to(device)
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0] == mw._base[0][0])
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# `cpu_module.to("cuda")` doesn't preserve previous references to
# `cpu_module`'s parameters or gradients.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20)
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m.to(device)
self.assertNotEqual(weight_ref.device, m.weight.device)
self.assertNotEqual(weight_grad_ref.device, m.weight.grad.device)
finally:
torch.__future__.set_overwrite_module_params_on_conversion(False)
@onlyCUDA
@dtypes(*((torch.float, torch.double, torch.bfloat16, torch.half)
if TEST_WITH_ROCM else (torch.float, torch.double, torch.half)))
def test_embedding_max_norm_device(self, device, dtype):
embedding = nn.Embedding(22, 5, max_norm=1.0).to(device, dtype=dtype)
# nn.Embedding only takes LongTensor as input
input = torch.tensor([2, 8, 8, 6], device=device, dtype=torch.long)
output = embedding(input)
self.assertEqual(output[1], output[2])
self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())
# Test fails on Vg20
@skipCUDAIfRocm
@onlyCUDA
@dtypes(torch.half, torch.float)
def test_softmax(self, device, dtype):
input = torch.rand(32, 100, device=device, dtype=dtype, requires_grad=True)
inputf = input.to(torch.float).detach().requires_grad_(True)
out = F.softmax(input, dim=-1, dtype=torch.float)
outf = F.softmax(inputf, dim=-1)
# should be bitwise equal
self.assertEqual(out, outf, atol=0, rtol=0)
gO = torch.empty_like(outf).uniform_()
out.backward(gO)
outf.backward(gO)
# should be bitwise equal
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=0, rtol=0)
@onlyCUDA
def test_pool3d_size_one_feature_dim(self, device):
# Tests crazy strides for feature dim of size 1
x = torch.randn(7, 1, 5, 3, 2, device=device)
strange_strides = [30, 1234, 6, 2, 1]
y = x.as_strided(x.size(), strange_strides)
x = x.cpu().as_strided(x.size(), strange_strides)
to_test = {
'max_pool3d': lambda t: F.max_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
'avg_pool3d': lambda t: F.avg_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
}
for test, fn in to_test.items():
# Should not crash
out_y = fn(y)
out_x = fn(x)
self.assertEqual(out_y, out_x.to(device), msg=test)
@onlyCUDA
@largeTensorTest('6GB')
def test_pool3d_large_size_int64(self, device):
# See https://github.com/pytorch/pytorch/issues/52822
x = torch.randn(70, 32, 100, 100, 100, dtype=torch.half, device=device)
y = torch.nn.functional.max_pool3d(x, 5)
torch.cuda.synchronize()
ref_x = x.cpu().float() # max_pool3d_cpu is not implemented for half
ref_y = torch.nn.functional.max_pool3d(ref_x, 5)
self.assertEqual(y, ref_y, exact_dtype=False)
@onlyCUDA
def test_AvgPool3d_backward_after_cat_dim1_device(self, device):
# x has to have batch_size 1 to test contiguous checks
x = torch.randn(1, 3, 4, 4, 4, device=device, requires_grad=True)
y = F.avg_pool3d(x, kernel_size=3, padding=1, stride=2)
grad = torch.randn(y.size(), device=device)
# increase the stride in dimension 0. the tensor is still contiguous because size[0] is 1
stride = list(grad.stride())
stride[0] = stride[0] * 2
grad.set_(grad.storage(), 0, grad.size(), stride)
assert grad.is_contiguous()
y.backward(grad)
def test_pooling_size_empty(self, device):
t = torch.rand([1, 2, 3, 4], device=device)
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool1d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool2d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool3d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool1d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool2d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool3d(t, []))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_embedding_bag_empty_input(self, device, dtypes):
m = 4
n = 3
x = torch.tensor([], device=device, dtype=dtypes[0])
for sparse in [True, False]:
Embed = torch.nn.EmbeddingBag(m, n, sparse=sparse)
Embed.to(device)
output = Embed(input=x, offsets=torch.tensor([0], device=device, dtype=dtypes[1]))
self.assertEqual(output, torch.zeros_like(output))
output = Embed(input=x, offsets=torch.tensor([0, 0], device=device, dtype=dtypes[1]))
self.assertEqual(output, torch.zeros_like(output))
@skipCUDAIf(True, "cuda assert is not recovarable.")
@dtypes(*itertools.product((torch.float, torch.double), (torch.int, torch.long)))
@parametrize_test("padding_idx", [None, 0])
@parametrize_test("mode", ["sum", "mean", "max"])
def test_embedding_bag_out_of_bounds_idx(self, device, dtypes, padding_idx, mode):
padding_idx = 0
w_dtype, idx_dtype = dtypes
# negative out-of-bound
idx1 = torch.tensor([[-1, 1]], device=device, dtype=idx_dtype)
# positive out-of-bound
idx2 = torch.tensor([[11, 8]], device=device, dtype=idx_dtype)
weight = torch.randn(10, 2, device=device, dtype=w_dtype)
if mode == 'sum':
# Only `sum` supports per_sample_weight
per_sample_weights = (None, torch.randn_like(idx1, device=device, dtype=w_dtype))
else:
per_sample_weights = (None,)
for p_s_weights, idx in itertools.product(per_sample_weights, (idx1, idx2)):
msg = "Expected idx >= 0 && idx < num_embeddings"
with self.assertRaisesRegex(RuntimeError, msg):
torch.nn.functional.embedding_bag(idx, weight,
per_sample_weights=p_s_weights, padding_idx=padding_idx,
mode=mode)
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_EmbeddingBag_per_sample_weights_failures(self, device, dtypes):
# Failure 1: mismatched embeddings / per_sample_weights dtype
es = nn.EmbeddingBag(5, 2, mode='sum').to(dtype=torch.float, device=device)
input = torch.tensor([3, 1, 1, 1, 4, 0], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 0, 3, 3, 6], dtype=dtypes[1], device=device)
per_sample_weights = torch.randn_like(input, dtype=torch.double, device=device)
if device == 'cpu':
with self.assertRaisesRegex(RuntimeError, 'have the same type as'):
es(input, offsets, per_sample_weights)
else:
with self.assertRaisesRegex(RuntimeError, 'expected scalar type'):
es(input, offsets, per_sample_weights)
# Failure 2.1: input/per_sample_weights have different sizes (1d input)
input = torch.tensor([3, 1, 1, 1, 4, 0], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 0, 3, 3, 6], dtype=dtypes[1], device=device)
per_sample_weights = torch.randn(5, dtype=torch.float, device=device)
with self.assertRaisesRegex(ValueError, 'same shape as the input'):
es(input, offsets, per_sample_weights)
# Failure 2.2: input/per_sample_weights have different sizes (2d input)
input = torch.randint(5, (7, 3), dtype=dtypes[0], device=device)
offsets = None
per_sample_weights = torch.randn(7 * 3, dtype=torch.float, device=device)
with self.assertRaisesRegex(ValueError, 'same shape as the input'):
es(input, offsets, per_sample_weights)
# Failure 3: Unsupported per_sample_weights and mode=('max', 'mean')
for unsupported_mode in ('max', 'mean'):
es = nn.EmbeddingBag(5, 2, mode=unsupported_mode).to(
dtype=torch.float, device=device)
input = torch.randint(5, (7, 3), dtype=dtypes[0], device=device)
offsets = None
per_sample_weights = torch.randn(7, 3, dtype=torch.float, device=device)
with self.assertRaisesRegex(NotImplementedError,
"only supported for mode='sum'"):
es(input, offsets, per_sample_weights)
def _embedding_bag_reference_impl(self, input, weight, offsets=None, mode='sum',
per_sample_weights=None, include_last_offset=False):
assert mode == 'sum' or per_sample_weights is None
assert offsets is not None
if per_sample_weights is None:
per_sample_weights = torch.ones(input.size()).to(
dtype=weight.dtype, device=weight.device
)
assert input.numel() == per_sample_weights.numel()
bags = []
long_input = input.to(torch.long)
embeddings = weight.index_select(0, long_input) * per_sample_weights.unsqueeze(1)
if include_last_offset:
for index in range(len(offsets) - 1):
offset = offsets[index]
next_offset = offsets[index + 1]
length = next_offset - offset
if length == 0:
bags.append(
torch.tensor([0] * weight.size(1)).to(
dtype=embeddings.dtype, device=embeddings.device
)
)
else:
if mode == 'sum':
bags.append(embeddings.narrow(0, offset, length).sum(0))
elif mode == 'mean':
bags.append(embeddings.narrow(0, offset, length).sum(0).div(length))
else:
assert mode == 'max'
bags.append(embeddings.narrow(0, offset, length).max(0)[0])
else:
for index, offset in enumerate(offsets):
if index + 1 < len(offsets):
next_offset = offsets[index + 1]
else:
next_offset = len(long_input)
length = next_offset - offset
if length == 0:
bags.append(
torch.tensor([0] * weight.size(1)).to(
dtype=embeddings.dtype, device=embeddings.device
)
)
else:
if mode == 'sum':
bags.append(embeddings.narrow(0, offset, length).sum(0))
elif mode == 'mean':
bags.append(embeddings.narrow(0, offset, length).sum(0).div(length))
else:
assert mode == 'max'
bags.append(embeddings.narrow(0, offset, length).max(0)[0])
return torch.stack(bags)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_empty_per_sample_weights_and_offsets(self, device, dtypes):
# Test empty input and per sample weight, and backward pass. There was a CUDA
# invalid configuration bug (more context in #46572)
def test_per_sample_weights(mode, trainable_scale):
es = nn.EmbeddingBag(5, 2, mode=mode).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device, dtype=dtypes[2]).view_as(es.weight))
input = torch.tensor([], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 0, 0, 0], device=device, dtype=dtypes[1])
per_sample_weights = torch.randn_like(input, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected)
result.backward(grad)
# the reference impl doesn't have grad fn for empty input; but the grad should
# simply be a zero tensor
ref_weights_grad = torch.zeros_like(es.weight)
self.assertEqual(es.weight.grad, ref_weights_grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if trainable_scale:
ref_per_sample_weights_grad = torch.empty_like(per_sample_weights)
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights_grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
modes = ('sum',)
trainable_scale = (True, False)
for mode, trainable in itertools.product(modes, trainable_scale):
test_per_sample_weights(mode, trainable)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_per_sample_weights_and_offsets(self, device, dtypes):
def test_per_sample_weights(mode, trainable_scale):
es = nn.EmbeddingBag(5, 2, mode=mode).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device, dtype=dtypes[2]).view_as(es.weight))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=dtypes[1])
per_sample_weights = torch.randn_like(input, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected).to(dtype=dtypes[2], device=device)
result.backward(grad)
expected.backward(grad)
self.assertEqual(es.weight.grad, reference_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if trainable_scale:
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
modes = ('sum',)
trainable_scale = (True, False)
for mode, trainable in itertools.product(modes, trainable_scale):
test_per_sample_weights(mode, trainable)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_per_sample_weights_and_new_offsets(self, device, dtypes):
def test_per_sample_weights_new_offsets(mode, trainable_scale, include_last_offset, has_weight=True):
es = nn.EmbeddingBag(5, 2, mode=mode, include_last_offset=include_last_offset).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device, dtype=dtypes[2]).view_as(es.weight))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=dtypes[1])
if include_last_offset:
offsets = torch.cat((offsets, torch.tensor([input.size(0)], device=device, dtype=dtypes[1])), 0)
if has_weight:
per_sample_weights = torch.randn_like(input, device=device, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
else:
per_sample_weights = None
ref_per_sample_weights = None
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights, include_last_offset)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected)
result.backward(grad)
expected.backward(grad)
self.assertEqual(es.weight.grad, reference_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if has_weight and trainable_scale:
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
trainable_scale = (True, False)
include_last_offset = (True, False)
modes = (('sum', False), ('sum', True), ('max', False), ('mean', False))
for (mode, has_weight), trainable, include_last_offset in itertools.product(
modes, trainable_scale, include_last_offset
):
test_per_sample_weights_new_offsets(
mode, trainable, include_last_offset, has_weight
)
def _test_EmbeddingBag_vs_Embedding(self, N, D, B, L, max_norm=None,
mode='mean',
device='cpu',
wdtype=torch.float,
dtype=torch.long,
test_per_sample_weights=False,
trainable_per_sample_weights=False,
sparse=False,
test_backward=True,
backward_prec=None):
es = nn.EmbeddingBag(N, D, mode=mode, sparse=sparse, max_norm=max_norm).to(device, wdtype)
e = nn.Embedding(N, D, max_norm=max_norm).to(device, wdtype)
e.weight.data.copy_(es.weight)
input = torch.randint(N, (B, L), device=device, dtype=dtype)
offsets = torch.arange(0, B, device=device, dtype=dtype).mul_(L)
grad_output = torch.rand(B, D, device=device, dtype=wdtype)
if test_per_sample_weights:
# To prevent large gradients, weights should sum to 1 for each bag
per_sample_weights = \
torch.randn(B, L, device=device, dtype=wdtype).softmax(dim=-1)
per_sample_weights_reference = \
per_sample_weights.clone().requires_grad_(trainable_per_sample_weights)
per_sample_weights.requires_grad_(trainable_per_sample_weights)
output = es(input.view(-1), offsets, per_sample_weights.view(-1))
else:
output = es(input.view(-1), offsets)
per_sample_weights = None
per_sample_weights_reference = None
if mode == 'sum':
if test_per_sample_weights:
ref_output = (e(input) * per_sample_weights_reference.unsqueeze(-1)).sum(1)
else:
ref_output = e(input).sum(1)
elif mode == 'mean':
assert not test_per_sample_weights
ref_output = e(input).mean(1)
elif mode == 'max':
assert not test_per_sample_weights
ref_output = e(input).max(1)[0]
self.assertEqual(output, ref_output, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
if not test_backward:
return
output.backward(grad_output)
ref_output.backward(grad_output)
es_weight_grad = es.weight.grad.data
if sparse:
es_weight_grad = es.weight.grad.data.to_dense()
# We have more floating point error here because we are dealing with larger numbers
if backward_prec is None:
needed_prec = dtype2prec_DONTUSE[wdtype] * 5
else:
needed_prec = backward_prec
self.assertEqual(es_weight_grad, e.weight.grad, atol=needed_prec, rtol=0)
if test_per_sample_weights and trainable_per_sample_weights:
self.assertEqual(per_sample_weights.grad, per_sample_weights_reference.grad,
atol=dtype2prec_DONTUSE[wdtype], rtol=0)
@skipCUDAIf(True, "Temporarily disabled. See t54369166")
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.half, torch.float, torch.double)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_per_sample_weights_and_no_offsets(self, device, dtypes):
def run_tests(mode, sparse, trainable_per_sample_weights):
kwargs = dict(test_per_sample_weights=True, device=device,
mode=mode, wdtype=dtypes[1], dtype=dtypes[0], sparse=sparse,
trainable_per_sample_weights=trainable_per_sample_weights)
# Simple case
self._test_EmbeddingBag_vs_Embedding(2, 3, 5, 7, **kwargs)
# B * L > 1000
self._test_EmbeddingBag_vs_Embedding(2, 5, 53, 23, **kwargs)
# Large num_embedding
self._test_EmbeddingBag_vs_Embedding(101, 5, 3, 7, **kwargs)
# Large embedding_dim
self._test_EmbeddingBag_vs_Embedding(2, 101, 3, 7, **kwargs)
modes = ('sum',)
sparsity = (True, False)
trainable_scale = (True, False)
for mode, sparse, trainable_per_sample_weights in \
itertools.product(modes, sparsity, trainable_scale):
run_tests(mode, sparse, trainable_per_sample_weights)
# Test CUDA Dense on half precision
if device == 'cuda':
modes = ('sum',)
sparsity = (False,)
trainable_scale = (True, False)
for mode, sparse, trainable_per_sample_weights in \
itertools.product(modes, sparsity, trainable_scale):
run_tests(mode, sparse, trainable_per_sample_weights)
def _test_EmbeddingBag(
self,
device,
mode,
sparse,
wdtype=torch.double,
dtype=torch.long,
odtype=torch.long,
test_backward=True,
):
# check a known test example
es = nn.EmbeddingBag(5, 2, mode=mode, sparse=sparse).to(device, wdtype)
es.weight.data.copy_(torch.arange(1, 11, device=device, dtype=wdtype).view_as(es.weight))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtype)
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=odtype)
grad_output = torch.tensor(
[1, 2,
3, 4], device=device, dtype=wdtype).view(2, 2)
grad_output_with_empty = torch.tensor(
[99, 99,
1, 2,
99, 99,
3, 4,
99, 99], device=device, dtype=wdtype).view(5, 2)
if mode == "sum" or mode == "mean":
denominator = 1 if mode == "sum" else 3
expected_output = torch.tensor(
[[13, 16],
[13, 16]], device=device, dtype=wdtype) / denominator
expected_output_with_empty = torch.tensor(
[[0, 0],
[13, 16],
[0, 0],
[13, 16],
[0, 0]], device=device, dtype=wdtype) / denominator
expected_grad_weight = torch.tensor(
[[3, 4],
[5, 8],
[0, 0],
[1, 2],
[3, 4]], device=device, dtype=wdtype) / denominator
elif mode == "max":
expected_output = torch.tensor(
[[7, 8],
[9, 10]], device=device, dtype=wdtype)
expected_output_with_empty = torch.tensor(
[[0, 0],
[7, 8],
[0, 0],
[9, 10],
[0, 0]], device=device, dtype=wdtype)
expected_grad_weight = torch.tensor(
[[0, 0],
[0, 0],
[0, 0],
[1, 2],
[3, 4]], device=device, dtype=wdtype)
output = es(input, offsets)
output.backward(grad_output_with_empty)
es_weight_grad = es.weight.grad.data
if sparse:
es_weight_grad = es.weight.grad.to_dense()
self.assertEqual(output, expected_output_with_empty)
self.assertEqual(es_weight_grad, expected_grad_weight, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
# check same example except as 2D (2 x 3)
input = input.view(2, -1)
es.zero_grad()
output = es(input)
output.backward(grad_output)
es_weight_grad = es.weight.grad
if sparse:
es_weight_grad = es.weight.grad.to_dense()
self.assertEqual(output, expected_output)
self.assertEqual(es_weight_grad, expected_grad_weight, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
# test all empty bags
es.zero_grad()
inputs = torch.tensor([], dtype=dtype, device=device)
offsets = torch.tensor([0, 0, 0, 0], dtype=odtype, device=device)
es(inputs, offsets).sum().backward()
dense_grad = es.weight.grad
if dense_grad.is_sparse:
dense_grad = dense_grad.to_dense()
self.assertEqual(dense_grad, torch.zeros_like(es.weight))
# now compare EmbeddingBag vs Embedding + Sum/Mean, for constant bag length
N, D, B, L = random.randint(1, 100), random.randint(1, 100), random.randint(1, 50), random.randint(1, 50)
kwargs = dict(mode=mode, sparse=sparse, device=device, wdtype=wdtype, dtype=dtype, test_backward=test_backward)
self._test_EmbeddingBag_vs_Embedding(N, D, B, L, **kwargs)
for max_norm in (None, 3):
for p in itertools.product([1, 2], repeat=4):
self._test_EmbeddingBag_vs_Embedding(*p, max_norm=max_norm, **kwargs)
# check that giving illegal input combos raises error
es = nn.EmbeddingBag(10, 20, mode=mode, sparse=sparse)
input = torch.ones(3, 4, dtype=dtype)
offset = torch.arange(0, 3, dtype=odtype)
self.assertRaises(ValueError, lambda: es(input, offset))
self.assertRaises(ValueError, lambda: es(input.view(-1)))
offset[0] = 1
if self.device_type == "cpu":
self.assertRaises(RuntimeError, lambda: es(input.view(-1), offset))
offset[0] = 0
offset[-1] = 100
self.assertRaises(RuntimeError, lambda: es(input.view(-1), offset))
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_embedding_bag_device(self, device, dtypes):
self._test_EmbeddingBag(device, 'sum', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
self._test_EmbeddingBag(device, 'mean', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
self._test_EmbeddingBag(device, 'max', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
test_backward = False
if self.device_type == 'cuda':
# see 'todo' in test_embedding_bag.
test_backward = dtypes[2] is not torch.float16
elif self.device_type == 'cpu':
# TODO: figure out why precision on sparse embeddings isn't the
# same as for dense.
test_backward = dtypes[2] is not torch.float
self._test_EmbeddingBag(
device,
'sum',
True,
wdtype=dtypes[2],
dtype=dtypes[0],
odtype=dtypes[1],
test_backward=test_backward,
)
self._test_EmbeddingBag(
device,
'mean',
True,
wdtype=dtypes[2],
dtype=dtypes[0],
odtype=dtypes[1],
test_backward=test_backward,
)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_embedding_bag_non_contiguous_weight(self, device, dtypes):
weight_tensor = torch.randn(3, 4, dtype=dtypes[2], device=device)
weight_tensor_non_contig = weight_tensor[:, :3] # This is non-contiguous strided.
weight_tensor_contig = weight_tensor_non_contig.clone().contiguous() # Contig-strided.
index = torch.tensor([0, 1, 2], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 2], dtype=dtypes[1], device=device)
for mode in ['sum', 'mean', 'max']:
output_non_contig = F.embedding_bag(
input=index,
weight=weight_tensor_non_contig,
offsets=offsets,
mode=mode,
)
output_contig = F.embedding_bag(
input=index,
weight=weight_tensor_contig,
offsets=offsets,
mode=mode,
)
self.assertEqual(output_non_contig, output_contig)
@onlyCUDA
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_embedding_bag_bfloat16(self, device, dtypes):
self._test_EmbeddingBag(device, 'sum', True, wdtype=torch.bfloat16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)
self._test_EmbeddingBag(device, 'mean', True, wdtype=torch.bfloat16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)
@onlyCUDA
@dtypes(torch.half, torch.float, torch.double)
def test_multihead_attention_dtype(self, device, dtype):
embed_dim = 128
num_heads = 8
sl = 10
bs = 8
model = nn.MultiheadAttention(embed_dim, num_heads).cuda().to(dtype)
q = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
k = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
v = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
out = model(q, k, v)
self.assertEqual(q.size(), out[0].size())
self.assertEqual(dtype, out[0].dtype)
@dtypesIfCUDA(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
@dtypes(torch.float)
def test_Conv2d_naive_groups(self, device, dtype):
# Check that grouped convolutions matches two half convolutions
m = nn.Conv2d(4, 4, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
m1.bias.data.copy_(m.bias.data[:2])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
m2.bias.data.copy_(m.bias.data[2:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
@dtypes(torch.double)
def test_Conv2d_backward_depthwise(self, device, dtype):
x = torch.randn(2, 2, 4, 20, device=device, dtype=dtype, requires_grad=True)
weight = torch.randn(2, 1, 3, 5, device=device, dtype=dtype, requires_grad=True)
def conv2d_depthwise(x, weight):
return torch.nn.functional.conv2d(
x, weight, bias=None, stride=(1, 10), groups=2)
for cudnn_enabled in [False, True]:
with torch.backends.cudnn.flags(enabled=cudnn_enabled):
torch.autograd.gradcheck(conv2d_depthwise, (x, weight))
def _test_batchnorm_grad(self, device, dtype=torch.double):
bs, n_feat, size_feat = 4, 5, 6
input = torch.arange(bs * n_feat * size_feat, device=device,
requires_grad=True, dtype=dtype).view(bs, n_feat, size_feat)
weight = torch.arange(1, n_feat + 1, device=device, requires_grad=True, dtype=dtype)
bias = torch.arange(n_feat, device=device, requires_grad=True, dtype=dtype)
running_mean = 1 - torch.arange(n_feat, device=device, dtype=dtype)
running_var = 2 * torch.arange(n_feat, device=device, dtype=dtype)
for training in [False, True]:
_assertGradAndGradgradChecks(self, F.batch_norm, (input, running_mean, running_var, weight, bias,
training, 0.1, 0.0001))
def test_batchnorm_grad(self, device):
self._test_batchnorm_grad(device)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_grad(device)
@onlyCUDA
def test_layernorm_half_precision(self):
width = 128
input = torch.rand(1, 5, width, device="cuda", dtype=torch.half) * 0.1
normalized_shape = (width,)
weight = torch.ones(width, device="cuda", dtype=torch.half)
bias = torch.zeros(width, device="cuda", dtype=torch.half)
eps = 1e-5
output_fp16 = torch.layer_norm(input, normalized_shape, weight, bias, eps)
output_fp32 = torch.layer_norm(input.float(), normalized_shape, weight.float(), bias.float(), eps).half()
self.assertEqual(output_fp16, output_fp32, atol=0, rtol=0)
@onlyCUDA
def test_layernorm_weight_bias(self):
width = 128
input = torch.rand(1, 5, width, device="cuda", dtype=torch.float32) * 0.1
normalized_shape = (width,)
data = torch.randn(width, device="cuda", dtype=torch.float32)
weight = torch.ones(width, device="cuda", dtype=torch.float32)
bias = torch.zeros(width, device="cuda", dtype=torch.float32)
eps = 1e-5
out_none_weight = torch.layer_norm(input, normalized_shape, None, data, eps)
out_one_weight = torch.layer_norm(input, normalized_shape, weight, data, eps)
self.assertEqual(out_none_weight, out_one_weight)
out_none_bias = torch.layer_norm(input, normalized_shape, data, None, eps)
out_zero_bias = torch.layer_norm(input, normalized_shape, data, bias, eps)
self.assertEqual(out_none_bias, out_zero_bias)
def test_hardsigmoid_grad(self, device):
inputs = (torch.randn(4, 16, 16, device=device) - 0.5) * 10
inputs.requires_grad = True
self.assertTrue(gradcheck(F.hardsigmoid, (inputs,)))
# currently fails on XLA
@onlyNativeDeviceTypes
def test_hardswish_grad(self, device):
inputs = (torch.randn(4, 16, 16, device=device) - 0.5) * 10
inputs.requires_grad = True
self.assertTrue(gradcheck(F.hardswish, (inputs,)))
def _test_batchnorm_eval(self, ndim, device, dtype, module_dtype=None):
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3).to(device, module_dtype)
module.eval()
data = torch.rand([3] * ndim, device=device, dtype=dtype, requires_grad=True)
grad = torch.rand([3] * ndim, device=device, dtype=dtype)
# 1st pass
res1 = module(data)
res1.backward(grad)
grad1 = data.grad.clone()
# 2nd pass
if data.grad is not None:
data.grad.data.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad.clone()
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
# track_running_stats=False
module = nn.BatchNorm1d(3, track_running_stats=False).to(device, module_dtype)
data = torch.rand(4, 3, device=device, dtype=dtype, requires_grad=True)
grad = torch.rand(4, 3, device=device, dtype=dtype)
# 1st pass
res1 = module(data)
res1.backward(grad)
grad1 = data.grad.clone()
# set eval
module.eval()
# 2nd pass
if data.grad is not None:
data.grad.data.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad.clone()
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_eval(self, device, dtype):
self._test_batchnorm_eval(2, device, dtype)
self._test_batchnorm_eval(3, device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_eval(2, device, dtype)
self._test_batchnorm_eval(3, device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_eval_mixed(self, device, dtype):
# Test bfloat16 input with float module
self._test_batchnorm_eval(2, device, dtype, torch.float)
self._test_batchnorm_eval(3, device, dtype, torch.float)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_eval(2, device, dtype, torch.float)
self._test_batchnorm_eval(3, device, dtype, torch.float)
def _test_batchnorm_affine(self, ndim, device, dtype, module_dtype=None):
# Compare affine against no-op weights and bias
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3, affine=False).to(device, module_dtype)
module_affine = nn.BatchNorm1d(3, affine=True).to(device, module_dtype)
with torch.no_grad():
module_affine.weight.fill_(1.0)
module_affine.bias.zero_()
data = torch.rand([3] * ndim, device=device, dtype=dtype, requires_grad=True)
grad = torch.ones_like(data, requires_grad=False)
# With weights all ones and bias all zeros
res1 = module_affine(data)
res1.backward(grad)
grad1 = data.grad.clone()
data.grad.zero_()
# Without any weights or bias
res2 = module(data)
res2.backward(grad)
grad2 = data.grad
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_affine(self, device, dtype):
self._test_batchnorm_affine(2, device, dtype)
self._test_batchnorm_affine(3, device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_affine(2, device, dtype)
self._test_batchnorm_affine(3, device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_affine_mixed(self, device, dtype):
cudnn_enabled = [False]
if self.device_type == 'cuda' and self.has_cudnn():
# TODO: Test fails with cudnn, see gh-62034
# cudnn_enabled = [False, True]
pass
# Test bfloat16 input with float module
for enabled in cudnn_enabled:
with torch.backends.cudnn.flags(enabled=enabled):
self._test_batchnorm_affine(2, device, dtype, torch.float)
self._test_batchnorm_affine(3, device, dtype, torch.float)
def _test_batchnorm_simple_average(self, device, dtype, module_dtype=None):
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3, momentum=None).to(dtype=module_dtype, device=device)
zeros = torch.zeros(3, dtype=module_dtype, device=device)
ones = torch.ones(3, dtype=module_dtype, device=device)
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
data1 = torch.rand(4, 3, dtype=dtype, device=device)
data2 = torch.rand(4, 3, dtype=dtype, device=device)
# 1st pass
res1 = module(data1)
running_mean1 = module.running_mean.clone()
running_var1 = module.running_var.clone()
self.assertNotEqual(running_mean1, zeros)
self.assertNotEqual(running_var1, ones)
# reset stats
module.reset_running_stats()
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
# 2nd pass
res2 = module(data2)
running_mean2 = module.running_mean.clone()
running_var2 = module.running_var.clone()
self.assertNotEqual(running_mean2, zeros)
self.assertNotEqual(running_var2, ones)
# reset stats
module.reset_running_stats()
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
# 3rd (combined) pass
res3 = module(data1)
res4 = module(data2)
self.assertEqual(res3, res1)
self.assertEqual(res4, res2)
self.assertEqual(module.running_mean, (running_mean1 + running_mean2) / 2)
self.assertEqual(module.running_var, (running_var1 + running_var2) / 2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_simple_average(self, device, dtype):
self._test_batchnorm_simple_average(device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_simple_average(device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_simple_average_mixed(self, device, dtype):
self._test_batchnorm_simple_average(device, dtype, torch.float)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_simple_average(device, dtype, torch.float)
def _test_maxpool_indices(self, num_dim, adaptive=False, device="cpu", dtype=torch.float):
def expected_indices(dim):
if dim == 1:
return torch.tensor([1, 3], dtype=torch.double).repeat(2, 2, 1)
if dim == 2:
return torch.tensor([[5, 7], [13, 15]], dtype=torch.double).repeat(2, 2, 1, 1)
def expected_grad(dim):
if dim == 1:
return torch.tensor([0, 1, 0, 1], dtype=torch.double).repeat(2, 2, 1)
grad = expected_grad(dim - 1)
zero = torch.zeros(grad.size())
return torch.stack((zero, grad, zero, grad), 2)
def expected_output(dim):
if dim == 1:
return torch.arange(2, 17, 2).view(2, 2, 2)
if dim == 2:
col = torch.arange(6, 63, 8)
return torch.stack([col, col + 2], 1).view(2, 2, 2, 2)
if adaptive:
cls_name = 'AdaptiveMaxPool{}d'.format(num_dim)
else:
cls_name = 'MaxPool{}d'.format(num_dim)
module_cls = getattr(nn, cls_name)
module = module_cls(2, return_indices=True).to(device, dtype=dtype)
numel = 4 ** (num_dim + 1)
input = torch.arange(1, numel + 1).view(2, 2, *repeat(4, num_dim)).to(device, dtype=dtype)
input_var = input.clone().detach().requires_grad_()
# Check forward
output, indices = module(input_var)
if num_dim != 3:
expected_indices = expected_indices(num_dim)
expected_output = expected_output(num_dim)
self.assertEqual(indices.dim(), input.dim())
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(indices.data.squeeze(), expected_indices)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output.data.squeeze(), expected_output)
self.assertTrue(output.requires_grad)
self.assertFalse(indices.requires_grad)
# Make sure backward works
grad_output = torch.ones(output.size(), device=device, dtype=dtype)
output.backward(grad_output, retain_graph=True)
expected_grad = expected_grad(num_dim)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(input_var.grad.data, expected_grad.view_as(input))
# Make sure backward after changing indices will result in an error
indices.add_(1)
self.assertRaises(RuntimeError, lambda: output.backward(grad_output))
# Make sure -Infinity is handled correctly
t = torch.tensor([[[float("-inf")]]])
m = nn.MaxPool1d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0], 0)
t = torch.tensor([[[float("-inf")]]])
m = nn.MaxPool2d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0], 0)
t = torch.tensor([[[[float("-inf")]]]])
m = nn.MaxPool3d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0, 0], 0)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_MaxPool1d_indices(self, device, dtype):
self._test_maxpool_indices(1, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_MaxPool2d_indices(self, device, dtype):
self._test_maxpool_indices(2, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_MaxPool3d_indices(self, device, dtype):
self._test_maxpool_indices(3, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_AdaptiveMaxPool1d_indices(self, device, dtype):
self._test_maxpool_indices(1, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_AdaptiveMaxPool2d_indices(self, device, dtype):
self._test_maxpool_indices(2, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_AdaptiveMaxPool3d_indices(self, device, dtype):
self._test_maxpool_indices(3, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_maxpool_indices_no_batch_dim(self, device, dtype):
"""Check that indices with no batch dim is consistent with a single batch."""
max_pool_cases = [
(nn.MaxPool1d(3, return_indices=True),
torch.randn(3, 5, device=device, dtype=dtype)),
(nn.MaxPool2d(3, return_indices=True),
torch.randn(3, 5, 6, device=device, dtype=dtype)),
(nn.MaxPool3d(3, return_indices=True),
torch.randn(3, 5, 6, 7, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool1d(3, return_indices=True),
torch.randn(3, 5, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool2d(3, return_indices=True),
torch.randn(3, 5, 6, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool3d(3, return_indices=True),
torch.randn(3, 5, 6, 7, device=device, dtype=dtype))]
for module, input in max_pool_cases:
_, indices_no_batch = module(input)
_, indicies_single_batch = module(input.unsqueeze(0))
self.assertEqual(indices_no_batch, indicies_single_batch.squeeze(0))
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@onlyNativeDeviceTypes # TODO: Fails on XLA
def test_max_pool_nan_inf(self, device, dtype):
for adaptive in ['', 'adaptive_']:
for num_dim in [1, 2, 3]:
fn_name = '{}max_pool{}d'.format(adaptive, num_dim)
fn = getattr(F, fn_name)
x = torch.full([1, 1] + num_dim * [3], nan, device=device, dtype=dtype, requires_grad=True)
res = fn(x, 1 if adaptive else 3)
res.backward(torch.randn_like(res))
self.assertTrue(math.isnan(res.item()))
x.requires_grad_(False)
res = fn(x, 1 if adaptive else 3)
self.assertTrue(math.isnan(res.item()))
x2 = torch.full([1, 1] + num_dim * [3], -inf, device=device, dtype=dtype, requires_grad=True)
res2 = fn(x2, 1 if adaptive else 3)
res2.backward(torch.randn_like(res2))
self.assertTrue(math.isinf(res2.item()))
x2.requires_grad_(False)
res2 = fn(x2, 1 if adaptive else 3)
self.assertTrue(math.isinf(res2.item()))
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_grid_sample_nan_inf(self, device, dtype):
input = torch.zeros([1, 1, 3, 3], device=device, dtype=dtype)
grid = torch.tensor([[[[nan, 0], [0, inf]]]], device=device, dtype=dtype)
for padding_mode in ('reflection', 'border', 'zeros'):
sample = torch.nn.functional.grid_sample(input=input, grid=grid, mode='nearest',
padding_mode=padding_mode, align_corners=False)
self.assertEqual(sample, torch.zeros([1, 1, 1, 2], device=device, dtype=dtype))
@expectedFailureMeta # RuntimeError: Unrecognized tensor type ID: Meta
@onlyNativeDeviceTypes
def test_fractional_max_pool2d(self, device):
x = torch.randn(1, 2, 7, 7, requires_grad=True, device=device)
samples = x.new(1, 2, 2).uniform_()
def func(x):
return F.fractional_max_pool2d(
x, (2, 2), output_size=(3, 3), _random_samples=samples)
self.assertEqual(func(x).shape, (1, 2, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
x = torch.randn(2, 7, 7, requires_grad=True, device=device)
self.assertEqual(func(x).shape, (2, 3, 3))
if self.device_type != 'cuda':
# Reference: https://github.com/pytorch/pytorch/issues/52427
# Raises -> RuntimeError: TensorAccessor expected 4 dims but tensor has 3
# on CUDA in gradcheck
gradcheck(func, [x])
gradgradcheck(func, [x])
for kernel_size in [(), (1,)]:
with self.assertRaisesRegex(RuntimeError, "kernel_size must either"):
# Incorrect kernel_size
F.fractional_max_pool2d(x, kernel_size=kernel_size, output_size=(3, 3), _random_samples=samples)
err_large_msg = "too large relative to input "
err_out_size_msg = "output_size must either"
for output_size, msg in [((9, 3), err_large_msg + "height"),
((3, 9), err_large_msg + "width"),
((3,), err_out_size_msg),
((), err_out_size_msg)]:
with self.assertRaisesRegex(RuntimeError, msg):
# Incorrect output_size
F.fractional_max_pool2d(x, (2, 2), output_size=output_size, _random_samples=samples)
@expectedFailureMeta # RuntimeError: Unrecognized tensor type ID: Meta
@onlyNativeDeviceTypes
def test_fractional_max_pool3d(self, device):
x = torch.randn(1, 2, 7, 7, 7, requires_grad=True, device=device)
samples = x.new(1, 2, 3).uniform_()
def func(x):
return F.fractional_max_pool3d(
x, (2, 2, 2), output_size=(3, 3, 3), _random_samples=samples)
self.assertEqual(func(x).shape, (1, 2, 3, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
x = torch.randn(2, 7, 7, 7, requires_grad=True, device=device)
self.assertEqual(func(x).shape, (2, 3, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
for kernel_size in [(), (1,), (1, 1)]:
with self.assertRaisesRegex(RuntimeError, "kernel_size must either"):
# Incorrect kernel_size
F.fractional_max_pool3d(x, kernel_size=kernel_size, output_size=(3, 3, 3), _random_samples=samples)
err_large_msg = "too large relative to input "
err_out_size_msg = "output_size must either"
for output_size, msg in [((9, 3, 3), err_large_msg + "time"),
((3, 9, 3), err_large_msg + "height"),
((3, 3, 9), err_large_msg + "width"),
((3, 3), err_out_size_msg),
((3,), err_out_size_msg),
((), err_out_size_msg)]:
with self.assertRaisesRegex(RuntimeError, msg):
# Incorrect output_size
F.fractional_max_pool3d(x, (2, 2, 2), output_size=output_size, _random_samples=samples)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@onlyNativeDeviceTypes # TODO: Fails on XLA
def test_fractional_max_pool_nan_inf(self, device, dtype):
for num_dim in [2, 3]:
fn_name = 'FractionalMaxPool{}d'.format(num_dim)
fn = getattr(nn, fn_name)(kernel_size=2, output_size=1)
x = torch.full([1, 1] + num_dim * [3], nan, device=device, dtype=dtype, requires_grad=True)
res = fn(x)
res.backward(torch.randn_like(res))
self.assertTrue(math.isnan(res.item()))
x2 = torch.full([1, 1] + num_dim * [3], -inf, device=device, dtype=dtype, requires_grad=True)
res2 = fn(x2)
res2.backward(torch.randn_like(res2))
self.assertTrue(math.isinf(res2.item()))
@onlyNativeDeviceTypes # TODO: RuntimeError message different on XLA
def test_pooling_zero_stride(self, device):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
fn = getattr(F, fn_name)
x = torch.ones([1, 2] + num_dim * [4], device=device, dtype=torch.float)
self.assertRaisesRegex(RuntimeError, r"stride should not be zero|stride must be greater than zero",
lambda: fn(x, kernel_size=2, stride=0))
fn_module_name = '{}Pool{}d'.format(op.title(), num_dim)
fn_module = getattr(nn, fn_module_name)(kernel_size=2, stride=0)
self.assertRaisesRegex(RuntimeError, r"stride should not be zero|stride must be greater than zero",
lambda: fn_module(x))
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_pool_large_size(self, device, dtype):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
fn = getattr(F, fn_name)
# 16777217 is the smallest integer not expressible in float32
x = torch.ones([1, 1, 16777217] + (num_dim - 1) * [1],
device=device, dtype=dtype)
res = fn(x, 1, stride=1, padding=0)
# check if the output shape was still computed correctly
self.assertEqual(x.shape[2], res.shape[2])
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_pool_invalid_size(self, device, dtype):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
if op == 'max':
# New implementation without indices supports empty tensors
# TODO(Heitor) change once with_indices code is updated
fn_name += '_with_indices'
fn = getattr(F, fn_name)
# use a configuration that gives zero outputs only
# when doing a correct floor division by the stride
x = torch.ones([1, 1] + num_dim * [4],
device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"too small|smaller than"):
try:
res = fn(x, 3, stride=2, padding=0, dilation=2)
except TypeError:
# some implementations do not support dilation
res = fn(x, 6, stride=2, padding=0)
def test_CTCLoss_empty_target(self, device):
target_lengths = [0, 0, 0]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (0,), dtype=torch.long, device=device)
log_probs = torch.randn(50, 3, 15, dtype=torch.double, device=device).log_softmax(2)
loss = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
self.assertTrue((loss >= 0).all().item())
self.assertEqual(-log_probs.sum(0)[:, 0], loss)
target_lengths = [0, 9, 0]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (9,), dtype=torch.long, device=device)
log_probs = torch.randn(50, 3, 15, dtype=torch.double, device=device).log_softmax(2)
loss = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
self.assertTrue((loss >= 0).all().item())
self.assertEqual(-log_probs.sum(0)[[0, 2], 0], loss[[0, 2]])
# Merge into OpInfo?
@skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message:
https://github.com/pytorch/pytorch/issues/34870""")
def test_ctc_loss(self, device):
batch_size = 64
num_labels = 101
target_length = 15
gradcheck_input_size = 10
ZERO_NONE = 0
ZERO_SOME = 1
ZERO_ALL = 2
# input_length, vary_lengths, zero_lengths
tests = [(150, False, ZERO_NONE),
(150, True, ZERO_NONE),
(50, True, ZERO_SOME),
(50, True, ZERO_ALL)]
if 'cuda' in device:
tests += [(50, False, ZERO_NONE),
(50, True, ZERO_NONE),
(150, True, ZERO_SOME),
(150, True, ZERO_ALL)]
for input_length, vary_lengths, zero_mode in tests:
targets = torch.randint(1, num_labels, (batch_size, target_length),
device=device, dtype=torch.long)
x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True)
tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,
device=device)
input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()
if vary_lengths or i == 0 else input_length) for i in range(batch_size)]
if zero_mode == ZERO_ALL:
target_lengths = [0 for _ in range(batch_size)]
else:
target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()
if vary_lengths else target_length) for _ in range(batch_size)]
if zero_mode == ZERO_SOME:
idxes = torch.randint(0, batch_size, (10,))
for i in idxes:
target_lengths[i] = 0
def ctc_after_softmax(x):
x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]
.view(input_length, batch_size, num_labels))
log_probs = torch.log_softmax(x_full, 2)
return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
gradcheck(ctc_after_softmax, [x])
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7600)
def test_ctc_loss_cudnn(self, device):
batch_size = 16
input_length = 30
num_labels = 101
target_length = 15
targets = torch.randint(1, num_labels, (batch_size * target_length,),
device='cuda', dtype=torch.long)
log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)
log_probs.requires_grad_()
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)
with torch.backends.cudnn.flags(enabled=False):
loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)
loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),
input_lengths, target_lengths, reduction='none')
self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn))
grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)
self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)
def test_empty_dropout(self, device):
x = torch.tensor([]).to(device)
out = torch.nn.functional.dropout(x)
self.assertEqual(out.size(), x.size())
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@tf32_on_and_off(0.005)
def test_variable_sequence(self, device, dtype):
def pad(var, length):
if var.size(0) == length:
return var
return torch.cat([var, var.new_zeros(length - var.size(0), *var.size()[1:])])
def maybe_index_tuple(maybe_tuple_of_tensors, index):
if maybe_tuple_of_tensors is None:
return None
return tuple(maybe_tuple_of_tensors[j][:, index:index + 1, :].contiguous()
for j in range(2))
def check_lengths(lengths, enforce_sorted, use_default_hiddens, proj_size):
input_size = 3
hidden_size = 4
num_layers = 2
bidirectional = True
max_length = max(lengths)
x_leaf = torch.randn(max_length, len(lengths), input_size, device=device,
dtype=dtype, requires_grad=True)
num_directions = 2 if bidirectional else 1
lstm = nn.LSTM(input_size, hidden_size, bidirectional=bidirectional,
num_layers=num_layers, proj_size=proj_size).to(device, dtype)
lstm2 = deepcopy(lstm).to(device, dtype)
x = x_leaf
hidden0 = None
if not use_default_hiddens:
real_hidden_size = hidden_size if proj_size == 0 else proj_size
hidden0 = (torch.randn(num_directions * num_layers, len(lengths), real_hidden_size,
device=device, dtype=dtype),
torch.randn(num_directions * num_layers, len(lengths), hidden_size,
device=device, dtype=dtype))
# Compute sequences separately
seq_outs = []
seq_hiddens = []
for i, l in enumerate(lengths):
hidden_i = maybe_index_tuple(hidden0, i)
out, hid = lstm2(x[:l, i:i + 1], hidden_i)
out_pad = pad(out, max_length)
seq_outs.append(out_pad)
seq_hiddens.append(hid)
seq_out = torch.cat(seq_outs, 1)
seq_hidden = tuple(torch.cat(hids, 1) for hids in zip(*seq_hiddens))
# Use packed format
packed = rnn_utils.pack_padded_sequence(x, lengths, enforce_sorted=enforce_sorted)
packed_out, packed_hidden = lstm(packed, hidden0)
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed_out)
# Check forward
prec = dtype2prec_DONTUSE[dtype]
self.assertEqual(packed_hidden, seq_hidden, atol=prec, rtol=0)
self.assertEqual(unpacked, seq_out, atol=prec, rtol=0)
self.assertEqual(unpacked_len, lengths, atol=prec, rtol=0)
# Check backward
seq_out.sum().backward()
grad_x = x_leaf.grad.data.clone()
x_leaf.grad.data.zero_()
unpacked.sum().backward()
self.assertEqual(x_leaf.grad, grad_x, atol=dtype2prec_DONTUSE[dtype], rtol=0)
for p1, p2 in zip(lstm.parameters(), lstm2.parameters()):
prec = dtype2prec_DONTUSE[dtype]
if dtype == torch.float16:
prec = 4e-2
self.assertEqual(p1.grad, p2.grad, atol=prec, rtol=0)
tests = [
# enforce_sorted, lengths
[True, [5]],
[False, [5]],
[True, [10, 10, 6, 2, 2, 1, 1]],
[False, [10, 10, 6, 2, 2, 1, 1]],
[False, [2, 1, 3, 2, 10, 5, 3]],
]
for enforce_sorted, seq_lens, in tests:
for use_default_hiddens in (True, False):
for proj_size in [0, 2]:
check_lengths(seq_lens, enforce_sorted, use_default_hiddens, proj_size)
def _test_batchnorm_update_stats(self, device, dtype=torch.float):
module = nn.BatchNorm1d(3).to(device, dtype)
data = torch.rand(4, 3, device=device, dtype=dtype)
# training pass
old_running_mean = module.running_mean.clone()
old_running_var = module.running_var.clone()
old_num_batches_tracked = module.num_batches_tracked.clone()
module(data)
self.assertNotEqual(old_running_mean, module.running_mean)
self.assertNotEqual(old_running_var, module.running_var)
self.assertEqual(old_num_batches_tracked + 1, module.num_batches_tracked)
# eval pass
module.eval()
old_running_mean = module.running_mean.clone()
old_running_var = module.running_var.clone()
old_num_batches_tracked = module.num_batches_tracked.clone()
module(data)
self.assertEqual(old_running_mean, module.running_mean)
self.assertEqual(old_running_var, module.running_var)
self.assertEqual(old_num_batches_tracked, module.num_batches_tracked)
def test_batchnorm_update_stats(self, device):
self._test_batchnorm_update_stats(device)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_update_stats(device)
def test_multi_margin_loss_errors(self, device):
self.assertRaises(RuntimeError,
lambda: nn.functional.multi_margin_loss(torch.randn(5, device=device),
torch.zeros(3, device=device)))
def _test_bfloat16_ops(self, op, device, inp_dims=(), prec=1e-2, scale_factor=None):
# fp32 compute
input1 = torch.randn(inp_dims, dtype=torch.float32, device=device, requires_grad=True)
if scale_factor is not None:
input1 = (torch.rand(inp_dims, dtype=torch.bfloat16, device=device) * scale_factor).float().requires_grad_()
out1 = op(input1)
grad_input1 = torch.randn_like(out1, device=device)
out1.backward(grad_input1)
# bfloat16 compute
op_bfp16 = op.bfloat16()
input2 = input1.detach().bfloat16().requires_grad_()
grad_input2 = grad_input1.bfloat16()
out2 = op_bfp16(input2)
out2.backward(grad_input2)
self.assertEqual(out1, out2, atol=prec, rtol=prec, exact_dtype=False)
self.assertEqual(input1.grad.data, input2.grad.data, atol=prec, rtol=prec, exact_dtype=False)
@onlyCUDA
def test_activations_bfloat16(self, device):
self._test_bfloat16_ops(torch.nn.ReLU(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Threshold(0.1, 20), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.ELU(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Softplus(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Hardshrink(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Softshrink(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.LeakyReLU(), device, inp_dims=(5), prec=1e-2)
@onlyCUDA
def test_pooling_bfloat16(self, device):
self._test_bfloat16_ops(torch.nn.AvgPool1d(3, stride=2), device, inp_dims=(8, 4, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AvgPool2d(3, stride=2), device, inp_dims=(8, 4, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AvgPool3d(3, stride=2), device, inp_dims=(8, 4, 16, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool1d(3), device, inp_dims=(8, 4, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool2d((3, 5)), device, inp_dims=(8, 4, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool3d((3, 5, 7)), device, inp_dims=(8, 4, 16, 16, 16), prec=0.05)
@onlyNativeDeviceTypes
def test_softmax_bfloat16(self, device):
for dim in [0, 1, 2, 3]:
self._test_bfloat16_ops(torch.nn.Softmax(dim=dim), device, inp_dims=(16, 33, 15, 16), prec=1e-2)
# test softmax with large input value which casues exp() to overflow
self._test_bfloat16_ops(torch.nn.Softmax(dim=dim), device, inp_dims=(16, 33, 15, 16), prec=0.05, scale_factor=1000.0)
@onlyCUDA
@skipCUDAIfRocmVersionLessThan((4, 3))
@skipCUDAIfNotMiopenSuggestNHWC
@skipCUDAIfCudnnVersionLessThan(7603)
@dtypes(torch.half, torch.float)
def test_conv_cudnn_nhwc(self, device, dtype):
def helper(n, c, h, w, out_channels, kernel_size, groups):
input = torch.randint(-3, 3, (n, c, h, w), dtype=dtype, device=device)\
.to(memory_format=torch.channels_last)
input.requires_grad_()
conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups)\
.to(device='cuda', dtype=dtype, memory_format=torch.channels_last)
for p in conv.parameters():
p.data = torch.randint_like(p, -3, 3)
# use FP64 channels-first conv as reference
ref_input = input.detach().clone().contiguous().double().requires_grad_()
ref_conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups)
# load_state_dict will restore the stride & memory_layout on ref_conv.weight.
ref_conv.load_state_dict(conv.state_dict())
ref_conv = ref_conv.to(device='cuda', dtype=torch.double, memory_format=torch.contiguous_format)
out = conv(input)
ref_out = ref_conv(ref_input)
grad = torch.randint_like(out, -3, 3)
ref_grad = grad.detach().clone().double().contiguous()
out.backward(grad)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(conv.weight.grad.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ref_input.grad.is_contiguous())
self.assertTrue(ref_conv.weight.grad.is_contiguous())
self.assertEqual(out, ref_out, exact_dtype=False)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)
self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)
helper(2, 8, 4, 4, out_channels=4, kernel_size=3, groups=1)
helper(2, 8, 4, 4, out_channels=8, kernel_size=3, groups=8)
helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=1)
helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=16)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(8005)
@dtypes(torch.half, torch.float)
def test_conv_cudnn_ndhwc(self, device, dtype):
def helper(n, c, d, h, w, out_channels, kernel_size, groups):
input = torch.randint(-2, 2, (n, c, d, h, w), dtype=dtype, device=device)\
.to(memory_format=torch.channels_last_3d)
input.requires_grad_()
conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups)\
.to(device='cuda', dtype=dtype, memory_format=torch.channels_last_3d)
for p in conv.parameters():
p.data = torch.randint_like(p, -2, 2)
# use FP64 channels-first conv as reference
ref_input = input.detach().clone().contiguous().double().requires_grad_()
ref_conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups)
# load_state_dict will restore the stride & memory_layout on ref_conv.weight.
ref_conv.load_state_dict(conv.state_dict())
ref_conv = ref_conv.to(device='cuda', dtype=torch.double, memory_format=torch.contiguous_format)
out = conv(input)
ref_out = ref_conv(ref_input)
grad = torch.randint_like(out, -2, 2)
ref_grad = grad.detach().clone().double().contiguous()
out.backward(grad)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(conv.weight.grad.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ref_input.grad.is_contiguous())
self.assertTrue(ref_conv.weight.grad.is_contiguous())
self.assertEqual(out, ref_out, exact_dtype=False)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)
self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)
helper(2, 8, 4, 4, 4, out_channels=4, kernel_size=3, groups=1)
helper(2, 8, 4, 4, 4, out_channels=8, kernel_size=3, groups=8)
helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=1)
helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=16)
def _run_conv(self, layer, device, inp, grad, ref_conv, ref_input, ref_out,
input_format, weight_format, grad_format, output_format):
conv = layer(inp.size(1), grad.size(1),
ref_conv.weight.size(2)).float().to(device)
# load_state_dict will restore the stride & memory_layout on ref_conv.weight.
conv.load_state_dict(ref_conv.state_dict())
weight_data = conv.weight.detach().clone().contiguous(memory_format=weight_format)
conv.weight.data = weight_data.resize_(weight_data.size(), memory_format=weight_format)
input = inp.clone().contiguous(memory_format=input_format)
input.resize_(input.size(), memory_format=input_format)
input = input.requires_grad_()
grad = grad.contiguous(memory_format=grad_format)
grad.resize_(grad.size(), memory_format=grad_format)
out = conv(input)
out.backward(grad)
self.assertTrue(out.is_contiguous(memory_format=output_format))
self.assertEqual(out, ref_out)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
def _test_conv_cudnn_nhwc_nchw(self, layer, n, c, h, w, k, filter_size, device):
data = torch.randint(1, 10, (n, c, h, w), dtype=torch.float32, device=device)
ref_input = data.clone().contiguous().requires_grad_(True)
ref_conv = layer(c, k, filter_size).float().to(device)
ref_out = ref_conv(ref_input)
grad = torch.randint(1, 10, ref_out.size(), dtype=torch.float32, device="cuda")
ref_out.backward(grad)
for w_f in [torch.contiguous_format, torch.channels_last]:
for g_f in [torch.contiguous_format, torch.channels_last]:
for input_format in [torch.contiguous_format, torch.channels_last]:
output_format = torch.contiguous_format
# Older versions of CudNN have Channels Last support disabled
if torch.backends.cudnn.version() >= 7603:
if input_format == torch.channels_last:
output_format = torch.channels_last
# This is because we have N111 weight that cannot handle
# the ambiguous memory_format
if w_f == torch.channels_last:
if layer == nn.Conv2d and filter_size * c != 1:
output_format = torch.channels_last
if layer == nn.ConvTranspose2d and filter_size * k != 1:
output_format = torch.channels_last
self._run_conv(layer, device, data, grad, ref_conv, ref_input,
ref_out, input_format, w_f, g_f, output_format)
@onlyCUDA
@skipCUDAIfRocmVersionLessThan((4, 3))
@skipCUDAIfNotMiopenSuggestNHWC
@skipCUDAIfCudnnVersionLessThan(7603)
@tf32_on_and_off(0.05)
def test_conv_cudnn_mismatch_memory_format(self, device):
configs = [
[4, 2, 8, 8, 4, 2],
[4, 1, 8, 8, 4, 2],
[1, 1, 8, 8, 4, 2],
[4, 2, 2, 8, 4, 1],
[4, 2, 1, 8, 4, 1],
[4, 2, 8, 8, 4, 1],
[4, 1, 8, 8, 4, 1],
]
for n, c, h, w, k, filter_size in configs:
self._test_conv_cudnn_nhwc_nchw(nn.Conv2d, n, c, h, w, k, filter_size, device)
self._test_conv_cudnn_nhwc_nchw(nn.ConvTranspose2d, n, c, h, w, k, filter_size, device)
# torch.half is erroring out on Windows with CUDA 10.1 + cuDNN 7.6.4
# returning CUDNN_STATUS_BAD_PARAM
# Disabling that specific test for now [see issue # 33918]
@onlyCUDA
@skipCUDAIfNoCudnn
@dtypes(torch.float, torch.double)
def test_conv_cudnn_nhwc_support(self, device, dtype):
input = torch.randn((1, 16, 1, 1), dtype=dtype, device="cuda", requires_grad=True)
weight = torch.randn((8, 16, 3, 3), dtype=dtype, device="cuda", requires_grad=True)
weight = weight.to(memory_format=torch.channels_last)
o = torch.conv2d(input, weight, None, (2, 1), (1, 1), (1, 1), 1)
self.assertTrue(o.is_contiguous(memory_format=torch.channels_last))
o.sum().backward()
# Test that faster algorithms used for inference produce the same results
# Validates depthwise3x3 bug reported in https://github.com/pytorch/pytorch/issues/60176
@onlyCPU
@dtypes(torch.float)
def test_conv2d_no_grad(self, device, dtype):
for batch in [1, 2, 3]:
for groups in [1, 2, 4]:
input = torch.rand(batch, groups, 8, 8, dtype=dtype, device=device)
m = nn.Conv2d(groups, 8, kernel_size=(3, 3), groups=groups, dtype=dtype, device=device)
with torch.no_grad():
output_ng = m(input)
output = m(input)
self.assertEqual(output, output_ng, rtol=1e-2, atol=1e-5)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfNoCudnn
@dtypes(torch.float, torch.float16)
@precisionOverride({torch.half: 0.002, torch.float: 1e-4})
def test_cudnn_convolution_relu(self, device, dtype):
for batch, groups, image_size, kernel_size, memory_format in \
product((1, 2, 3),
(1, 2, 4),
((1, 1), (8, 8)),
((1, 1), (3, 3)),
(torch.channels_last, torch.contiguous_format)):
if image_size[0] < kernel_size[0]:
continue
inp = torch.rand(batch, groups, *image_size, dtype=dtype, device=device)
w = torch.randn(8, groups, *kernel_size, dtype=dtype, device=device)
conv2d_out = torch.conv2d(inp, w, None, (1, 1), (0, 0), (1, 1), 1)
inp = inp.to(memory_format=memory_format)
w = w.to(memory_format=memory_format)
cudnn_out = torch.cudnn_convolution_relu(inp, w, None, (1, 1), (0, 0), (1, 1), 1)
self.assertTrue(cudnn_out.is_contiguous(memory_format=memory_format))
self.assertEqual(conv2d_out.relu(), cudnn_out)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfNoCudnn
@dtypes(torch.float, torch.float16)
@precisionOverride({torch.half: 0.002, torch.float: 1e-4})
def test_cudnn_convolution_add_relu(self, device, dtype):
for batch, groups, image_size, kernel_size, memory_format in \
product((1, 2, 3),
(1, 2, 4),
((1, 1), (8, 8)),
((1, 1), (3, 3)),
(torch.channels_last, torch.contiguous_format)):
if image_size[0] < kernel_size[0]:
continue
inp = torch.rand(batch, groups, *image_size, dtype=dtype, device=device)
w = torch.randn(8, groups, *kernel_size, dtype=dtype, device=device)
conv2d_out = torch.conv2d(inp, w, None, (1, 1), (0, 0), (1, 1), 1)
alpha = 2.0
z = torch.randn_like(conv2d_out)
inp = inp.to(memory_format=memory_format)
w = w.to(memory_format=memory_format)
z = z.to(memory_format=memory_format)
cudnn_out = torch.cudnn_convolution_add_relu(inp, w, z, alpha, None, (1, 1), (0, 0), (1, 1), 1)
self.assertTrue(cudnn_out.is_contiguous(memory_format=memory_format))
self.assertEqual(F.relu(conv2d_out + alpha * z), cudnn_out)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7603)
def test_convert_conv2d_weight_memory_format(self, device):
input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, device=device)
model = nn.Sequential(
nn.Conv2d(8, 4, 3),
nn.BatchNorm2d(4)).to(device).float()
for memory_format in [torch.channels_last, torch.contiguous_format]:
model = nn.utils.convert_conv2d_weight_memory_format(model, memory_format)
out = model(input)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
model = nn.Sequential(
nn.ConvTranspose2d(8, 4, 3),
nn.BatchNorm2d(4)).to(device).float()
for memory_format in [torch.channels_last, torch.contiguous_format]:
model = nn.utils.convert_conv2d_weight_memory_format(model, memory_format)
out = model(input)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
def test_conv_double_backward_strided_with_3D_input_and_weight(self, device):
# Test that _convolution_double_backward() outputs the correct grad shapes
# for 3D input / weight when stride > 1. This is an ad-hoc regression test for a
# specific case that was uncovered during the convolution consolidation effort.
# The test can be safely deleted if _convolution_double_backward() is removed.
input = torch.randn(2, 3, 6, device=device)
weight = torch.randn(3, 3, 3, device=device)
bias = torch.randn(3, device=device)
stride = (2,)
padding = (1,)
dilation = (1,)
transposed = False
output_padding = (0,)
groups = 1
output = torch.ops.aten.convolution(input, weight, bias, stride, padding, dilation, transposed,
output_padding, groups)
ggI = torch.randn(input.shape, device=device)
ggW = torch.randn(weight.shape, device=device)
ggB = torch.randn(bias.shape, device=device)
gO = torch.randn(output.shape, device=device)
output_mask = [True, True, True]
grad_grad_output, grad_input, grad_weight = torch.ops.aten._convolution_double_backward(
ggI, ggW, ggB, gO, weight, input, stride, padding, dilation, transposed,
output_padding, groups, output_mask)
# Make sure the correct shapes are computed.
self.assertEqual(grad_grad_output.shape, gO.shape)
self.assertEqual(grad_input.shape, input.shape)
self.assertEqual(grad_weight.shape, weight.shape)
def test_nll_loss_mismatched_batch(self, device):
x = torch.randn((10, 3), requires_grad=True, device=device)
# t should have size (10,)
t = torch.zeros((3,), dtype=torch.int64, device=device)
with self.assertRaisesRegex(ValueError, 'Expected.*batch_size'):
F.nll_loss(x, t)
def test_nll_loss_out_of_bounds_ignore_index(self, device):
x = torch.randn(6, 3, requires_grad=True, device=device)
t = torch.tensor([0, 1, 255, 0, 1, 2], dtype=torch.int64, device=device)
for reduction in ['mean', 'none']:
F.nll_loss(x, t, ignore_index=255, reduction=reduction).sum().backward()
def test_nll_loss_invalid_target_dim(self, device):
x = torch.randn((10, 3), device=device)
t = torch.zeros((10, 2), dtype=torch.int64, device=device)
with self.assertRaisesRegex(RuntimeError, "1D target tensor expected"):
F.nll_loss(x, t)
def test_nll_loss_invalid_weights(self, device):
x = torch.randn((10, 3), device=device)
t = torch.empty(10, dtype=torch.int64, device=device).random_(0, 3)
invalid_weights = [
torch.randn(4, device=device),
torch.randn(1, 3, device=device),
]
msg = "weight tensor should be defined either for all 3 classes or no classes"
for weight in invalid_weights:
with self.assertRaisesRegex(RuntimeError, msg):
F.nll_loss(x, t, weight=weight)
def _nll_loss_helper(self, input_size, reduction, expected, device):
input = torch.rand(input_size, requires_grad=True, device=device)
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.randint(num_channels, target_size, device=device)
output = F.nll_loss(input, target, reduction=reduction)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, expected)
output.sum().backward()
self.assertEqual(input.grad.size(), input.size())
def test_nll_loss_empty_tensor_reduction_none(self, device):
self._nll_loss_helper([0, 3], "none", torch.empty([0], device=device), device)
self._nll_loss_helper([0, 3, 5, 7], "none", torch.empty([0, 5, 7], device=device), device)
self._nll_loss_helper([2, 3, 0, 7], "none", torch.empty([2, 0, 7], device=device), device)
self._nll_loss_helper([2, 3, 5, 0], "none", torch.empty([2, 5, 0], device=device), device)
self._nll_loss_helper([2, 3, 5, 7, 0], "none", torch.empty([2, 5, 7, 0], device=device), device)
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_nll_loss_empty_tensor_reduction_mean(self, device):
nan = torch.tensor(float('nan'), device=device)
self._nll_loss_helper([0, 3], "mean", nan, device)
self._nll_loss_helper([0, 3, 5, 7], "mean", nan, device)
self._nll_loss_helper([2, 3, 0, 7], "mean", nan, device)
self._nll_loss_helper([2, 3, 5, 0], "mean", nan, device)
self._nll_loss_helper([2, 3, 5, 7, 0], "mean", nan, device)
def test_nll_loss_empty_tensor_reduction_sum(self, device):
zero = torch.tensor(0, device=device)
self._nll_loss_helper([0, 3], "sum", zero, device)
self._nll_loss_helper([0, 3, 5, 7], "sum", zero, device)
self._nll_loss_helper([2, 3, 0, 7], "sum", zero, device)
self._nll_loss_helper([2, 3, 5, 0], "sum", zero, device)
self._nll_loss_helper([2, 3, 5, 7, 0], "sum", zero, device)
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_nll_loss_total_weight_is_zero(self, device):
def helper(input_size):
input = torch.ones(input_size, requires_grad=True, device=device)
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.zeros(target_size, dtype=torch.long, device=device)
weight = torch.zeros([num_channels], device=device)
self.assertEqual(F.nll_loss(input, target, weight, reduction="sum").item(), 0.)
self.assertEqual(F.nll_loss(input, target, weight, reduction="mean").item(), float("nan"))
self.assertEqual(F.nll_loss(input, target, weight, reduction="none"), torch.zeros(target.shape, device=device))
helper([2, 3])
helper([2, 3, 5, 7])
helper([2, 3, 5, 7, 9])
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_nll_loss_all_ignored(self, device):
def helper(input_size):
input = torch.ones(input_size, device=device)
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.zeros(target_size, dtype=torch.long, device=device)
self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction="sum").item(), 0)
self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction="mean").item(), float("nan"))
self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction="none"), torch.zeros(target.shape, device=device))
helper([2, 3])
helper([2, 3, 5, 7])
helper([2, 3, 5, 7, 9])
def test_nll_loss_byte_target_matches_long(self, device):
N, C = 10, 4
input = torch.randn(N, C, device=device, requires_grad=True)
target = torch.empty(N, dtype=torch.long, device=device).random_(0, C)
def compute_result_and_gradient(reduction, target_dtype):
input_ = input.detach()
input_.requires_grad_()
prob = F.log_softmax(input_, dim=-1)
loss = nn.NLLLoss(reduction=reduction)
result = loss(prob, target.to(target_dtype))
result.sum().backward()
return result, input_.grad
for reduction in ["none", "mean", "sum"]:
result_long, grad_long = compute_result_and_gradient(reduction, torch.long)
result_byte, grad_byte = compute_result_and_gradient(reduction, torch.uint8)
self.assertEqual(result_long, result_byte)
self.assertEqual(grad_long, grad_byte)
def test_cross_entropy_loss_prob_target_all_reductions(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
weight = torch.randn(C, device=device).abs()
for reduction, w in product(['none', 'mean', 'sum'], [None, weight]):
m = torch.nn.CrossEntropyLoss(weight=w, reduction=reduction)
output = m(input, target)
output_ref = loss_reference_fns['CrossEntropyLoss'](
input, target, reduction=reduction, weight=w)
self.assertEqual(output, output_ref)
def test_cross_entropy_loss_prob_target_unit_weights(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
for reduction in ['none', 'mean', 'sum']:
# Ensure result with unit weights is equivalent to result without weights.
m = torch.nn.CrossEntropyLoss(reduction=reduction)
unit_weight = torch.ones(C, device=device, dtype=target.dtype)
m_unit = torch.nn.CrossEntropyLoss(weight=unit_weight, reduction=reduction)
output = m(input, target)
output_unit = m_unit(input, target)
self.assertEqual(output, output_unit)
def test_cross_entropy_loss_index_target_unit_weights(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
for reduction in ['none', 'mean', 'sum']:
# Ensure result with unit weights is equivalent to result without weights.
m = torch.nn.CrossEntropyLoss(reduction=reduction)
unit_weight = torch.ones(C, device=device, dtype=input.dtype)
m_unit = torch.nn.CrossEntropyLoss(weight=unit_weight, reduction=reduction)
output = m(input, target)
output_unit = m_unit(input, target)
self.assertEqual(output, output_unit)
def test_cross_entropy_loss_one_hot_target(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
weight = torch.randn(C, device=device).abs()
# Get one-hot representation of the target.
target_one_hot = F.one_hot(target, num_classes=C).to(input.dtype)
# Need to put the C dim at index 1.
target_one_hot = target_one_hot.permute(0, -1, *range(1, target_one_hot.dim() - 1))
for reduction, w in product(['none', 'mean', 'sum'], [None, weight]):
# Skip this case for now because soft and hard label CE are not consistent
# in the way they apply class weights (see issue #61309).
if reduction == 'mean' and weight is not None:
continue
# Ensure loss computed with class indices matches loss
# computed with one-hot class probs.
m = torch.nn.CrossEntropyLoss(weight=w, reduction=reduction)
output = m(input, target)
output_one_hot = m(input, target_one_hot)
self.assertEqual(output, output_one_hot)
def test_cross_entropy_label_smoothing_errors(self, device):
N, C = 3, 4
input_args = [
(torch.randn((N, C), device=device), torch.arange(0, C, device=device)),
(torch.randn((N, C), device=device), torch.randn(N, C, device=device))
]
for input_arg in input_args:
loss = nn.CrossEntropyLoss(label_smoothing=1.2)
with self.assertRaisesRegex(RuntimeError,
r"label_smoothing must be between 0\.0"):
loss(*input_arg)
def test_cross_entropy_label_smoothing_consistent_index_target_and_probs(self, device):
N, C = 10, 4
ks = range(5)
reductions = ['none', 'mean', 'sum']
label_smoothings = [0.05, 0.15]
for k, reduction, label_smoothing in product(ks, reductions, label_smoothings):
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
# construct target probablity that should have the same result as label_smoothing
target_proba = F.one_hot(target, num_classes=C)
# Need to put the C dim at index 1.
target_proba = target_proba.permute(0, -1, *range(1, target_proba.dim() - 1))
target_mask = (target_proba == 1)
target_proba = target_proba.to(dtype=input.dtype)
# y_k^ls = y_k * (1 - label_smoothing) + label_smoothing / n_classes
# Get one-hot representation of the target.
target_proba.masked_fill_(target_mask, 1 - label_smoothing + label_smoothing / C)
target_proba.masked_fill_(~target_mask, label_smoothing / C)
loss = nn.CrossEntropyLoss(reduction=reduction)
output_with_prob = loss(input, target_proba)
loss = nn.CrossEntropyLoss(
reduction=reduction, label_smoothing=label_smoothing)
output_with_index = loss(input, target)
self.assertEqual(output_with_prob, output_with_index,
rtol=1e-07, atol=1e-05)
def test_cross_entropy_label_smoothing_with_probs(self, device):
N, C = 10, 4
ks = range(5)
reductions = ['none', 'mean', 'sum']
label_smoothings = [0.05, 0.15]
# Test with k-dimensional loss.
for k, label_smoothing in product(ks, label_smoothings):
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = F.log_softmax(torch.randn(N, C, *other_dims, device=device), dim=1)
for reduction in reductions:
# use with label_smoothing
loss = nn.CrossEntropyLoss(reduction=reduction, label_smoothing=label_smoothing)
output_with_smoothing = loss(input, target)
# manually smoothing target
# class_proba^ls = class_proba * (1 - label_smoothing) +
# label_smoothing / n_classes
target_with_smoothing = target * (1 - label_smoothing) + label_smoothing / C
loss = nn.CrossEntropyLoss(reduction=reduction)
output_with_manual_smoothing = loss(input, target_with_smoothing)
self.assertEqual(output_with_smoothing, output_with_manual_smoothing)
def test_cross_entropy_label_smoothing_weight_ignore_indices(self, device):
reductions = ['none', 'sum', 'mean']
label_smoothings = [0.05, 0.15]
weight = torch.tensor([0.3, 0.6], device=device)
inp1 = torch.tensor([[0.3, 0.4], [1, 2]], device=device)
inp2 = torch.tensor([[0.3, 0.6], [1, 2]], device=device)
targ_default_ignore_index = torch.tensor([-100, 1], device=device)
targ_negative_ignore_index = torch.tensor([-2, 1], device=device)
targ_positive_ignore_index = torch.tensor([2, 1], device=device)
for reduction, label_smoothing, weight in product(reductions, label_smoothings, (None, weight)):
def check_equal(loss, inp_targ_1, inp_targ_2):
inp1, targ1 = inp_targ_1
inp2, targ2 = inp_targ_2
l1 = loss(inp1, targ1)
l2 = loss(inp2, targ2)
self.assertEqual(l1, l2)
# Default ignore_index
loss = nn.CrossEntropyLoss(reduction=reduction,
label_smoothing=label_smoothing,
weight=weight)
check_equal(loss, (inp1, targ_default_ignore_index), (inp2, targ_default_ignore_index))
if reduction != 'none':
# Check that we correctly tally the denominator for `mean`
# i.e. we don't count the ignored_idx at all.
check_equal(loss, (inp1, targ_default_ignore_index), (inp2[1:], targ_default_ignore_index[1:]))
# negative ignore_index
loss = nn.CrossEntropyLoss(reduction=reduction,
label_smoothing=label_smoothing,
ignore_index=-2,
weight=weight)
check_equal(loss, (inp1, targ_negative_ignore_index), (inp2, targ_negative_ignore_index))
if reduction != 'none':
# Check that we correctly tally the denominator for `mean`
# i.e. we don't count the ignored_idx at all.
check_equal(loss, (inp1, targ_negative_ignore_index), (inp2[1:], targ_negative_ignore_index[1:]))
# positive ignore_index
loss = nn.CrossEntropyLoss(reduction=reduction,
label_smoothing=label_smoothing,
ignore_index=2,
weight=weight)
check_equal(loss, (inp1, targ_positive_ignore_index), (inp2, targ_positive_ignore_index))
if reduction != 'none':
# Check that we correctly tally the denominator for `mean`
# i.e. we don't count the ignored_idx at all.
check_equal(loss, (inp1, targ_positive_ignore_index), (inp2[1:], targ_positive_ignore_index[1:]))
def test_softshrink_negative(self, device):
input = torch.randn(5, device=device, requires_grad=True)
m = torch.nn.Softshrink(-1)
with self.assertRaisesRegex(RuntimeError,
r'lambda must be greater or equal to 0, but found to be -1\.'):
m(input)
def test_fold(self, device):
def test_dtype(fn, input, dtype):
input = input.detach().clone().to(dtype=dtype).requires_grad_(True)
input2 = input.detach().clone().float().requires_grad_(True)
out = fn(input)
out.sum().backward()
out2 = fn(input2)
out2.sum().backward()
self.assertEqual(out.dtype, dtype)
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(out, out2.to(dtype=dtype), atol=0.05, rtol=0)
self.assertEqual(input.grad, input2.grad.to(dtype=dtype))
def func(x):
return F.fold(x, output_size=(4, 5), kernel_size=(2, 2))
seeds = (44, 83, 71, 25, 999)
for sd in seeds:
torch.manual_seed(sd)
x = torch.randn(1, 12, 12, device=device, requires_grad=True)
gradcheck(func, [x], check_forward_ad=True)
gradgradcheck(func, [x], check_fwd_over_rev=True)
if device == 'cpu':
test_dtype(func, x, torch.bfloat16)
def test_logsigmoid_out(self, device):
# this isn't actually documented, but was broken previously:
# https://github.com/pytorch/pytorch/issues/36499
x = torch.randn(2, 3, device=device).t()
empty_out = torch.randn(0, device=device)
self.assertEqual(F.logsigmoid(x), F.logsigmoid(x, out=empty_out))
noncontig_out = torch.randn(2, 3, device=device).t()
self.assertEqual(F.logsigmoid(x), F.logsigmoid(x, out=noncontig_out))
def test_maxpool3d_non_square_backward(self, device):
# previous CUDA routine of this backward calculates kernel launch grid size
# with last two dimensions interchanged, so the tailing along the longer dim
# get ignored. Here we test whether every position gets gradient.
for dim in (2, 3, 4):
shape = tuple(32 if i != dim else 256 for i in range(4))
x = torch.randn(shape, device=device, requires_grad=True)
F.max_pool3d(x, kernel_size=(1, 1, 1)).sum().backward()
self.assertEqual(x.grad, torch.ones_like(x.grad))
# Check that clip_grad_norm_ raises an error if the total norm of the
# parameters' gradients is non-finite
def test_clip_grad_norm_error_if_nonfinite(self, device):
norms_pos = [0.1, 1, 2, 3.5, inf]
norms_neg = [-0.1, -1, -2, -3.5]
norms_except_0 = norms_pos + norms_neg
norms_all = norms_except_0 + [0]
# Each entry in test_cases has the following values, in this order:
#
# grad_only_one_elem If True, only one element of the parameter's
# gradient is set to the scalar grad, and the
# rest of the elements are 0. If False, all grad
# elements are equal to the scalar.
#
# prefix_finite_grad_param If True, prefix a parameter that has a grad
# of 1.
#
# scalars Scalars to use as the parameter's grad, through
# multiplication
#
# norms_nonfinite Norm types that should produce nonfinite total norm
#
# norms_finite Norm types that should produce finite total norm
test_cases = [
# Test errors from an infinite grad
(False, False, [inf, -inf], norms_except_0, [0]),
(False, True, [inf, -inf], norms_pos, norms_neg + [0]),
(True, False, [inf, -inf], norms_pos, norms_neg + [0]),
(True, True, [inf, -inf], norms_pos, norms_neg + [0]),
# Test errors from a NaN grad
(False, False, [nan], norms_except_0, [0]),
(False, True, [nan], norms_except_0, [0]),
(True, False, [nan], norms_except_0, [0]),
(True, True, [nan], norms_except_0, [0]),
# Test a grad that should never error
(False, False, [2e22, -2e22], [], norms_all),
(False, True, [2e22, -2e22], [], norms_all),
(True, False, [2e22, -2e22], [], norms_all),
(True, True, [2e22, -2e22], [], norms_all),
# Test a grad that will overflow to inf for only some norm orders
(False, False, [2e200, -2e200], [3.5, 2, -2, -3.5], [inf, 1, 0.1, 0, -1, -0.1]),
(False, True, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
(True, False, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
(True, True, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
]
def gen_parameters(scalar, grad_only_one_elem, prefix_finite_grad_param):
param = torch.ones(10, dtype=torch.float64, device=device, requires_grad=True)
if grad_only_one_elem:
param[1].mul(scalar).sum().backward()
else:
param.mul(scalar).sum().backward()
if prefix_finite_grad_param:
prefix_param = torch.ones(1, dtype=torch.float64, device=device, requires_grad=True)
prefix_param.mul(1).sum().backward()
parameters = [prefix_param, param]
else:
parameters = [param]
return parameters
def run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, is_norm_nonfinite):
msg = (
f'norm_type: {norm_type}, ',
f'error_if_nonfinite: {error_if_nonfinite}, '
f'scalar: {scalar}, '
f'grad_only_one_elem: {grad_only_one_elem}, '
f'prefix_finite_grad_param: {prefix_finite_grad_param}, '
f'is_norm_nonfinite: {is_norm_nonfinite}')
parameters = gen_parameters(scalar, grad_only_one_elem, prefix_finite_grad_param)
# Should only throw an error if the total norm is expected to be
# nonfinite and `error_if_nonfinite=True`
if is_norm_nonfinite and error_if_nonfinite:
error_msg = f'The total norm of order {float(norm_type)} for gradients'
grads_before = [p.grad.clone() for p in parameters]
with self.assertRaisesRegex(RuntimeError, error_msg, msg=msg):
clip_grad_norm_(parameters, 1, norm_type=norm_type, error_if_nonfinite=True)
# Grad should not change if error is thrown
grads_after = [p.grad for p in parameters]
self.assertEqual(grads_before, grads_after, msg=msg)
else:
clip_grad_norm_(parameters, 1, norm_type=norm_type, error_if_nonfinite=error_if_nonfinite)
for grad_only_one_elem, prefix_finite_grad_param, scalars, norms_nonfinite, norms_finite in test_cases:
for error_if_nonfinite in [False, True]:
for norm_type, scalar in product(norms_nonfinite, scalars):
run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, True)
for norm_type, scalar in product(norms_finite, scalars):
run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, False)
@onlyCUDA
@deviceCountAtLeast(2)
def test_clip_grad_norm_multi_device(self, devices):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.layer1 = nn.Linear(10, 10)
self.layer2 = nn.Linear(10, 10)
test_model = TestModel()
test_model.layer1.to(devices[0])
test_model.layer2.to(devices[1])
ref_model = TestModel().to(devices[0])
for norm_type in [2., math.inf]:
for p in test_model.parameters():
p.grad = torch.ones_like(p)
for p in ref_model.parameters():
p.grad = torch.ones_like(p)
norm = clip_grad_norm_(test_model.parameters(), 0.5, norm_type=norm_type)
expected = clip_grad_norm_(ref_model.parameters(), 0.5, norm_type=norm_type)
self.assertEqual(norm, expected)
for p, pe in zip(test_model.parameters(), ref_model.parameters()):
self.assertEqual(p.grad.to(devices[0]), pe.grad)
def test_elu_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.elu(x, inplace=True)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.elu_(x)
# Merge into OpInfo?
@onlyNativeDeviceTypes
def test_elu_inplace_with_neg_alpha(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.elu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.celu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
@expectedFailureMeta # https://github.com/pytorch/pytorch/issues/54897
def test_hardswish_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.hardswish(x, inplace=True)
def test_silu_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.silu(x, inplace=True)
@onlyNativeDeviceTypes
def test_mish_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.mish(x, inplace=True)
def test_softplus_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.softplus(x, out=x)
def test_softplus_low_threshold(self, device):
# Ensure gradients are computed correctly with a low threshold.
model = torch.nn.Softplus(threshold=1).double()
input = torch.tensor(0.9, device=device, dtype=torch.double,
requires_grad=True)
output = model(input)
torch.autograd.gradcheck(model, input)
def test_softshrink_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.softshrink(x, out=x)
def test_leaky_relu_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.leaky_relu(x, inplace=True)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.leaky_relu_(x)
# Merge into OpInfo?
def test_leaky_relu_inplace_with_neg_slope(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), -2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
# Merge into OpInfo?
def test_leaky_relu_inplace_with_zero_slope(self, device):
a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)
b.backward(torch.ones(3, device=device))
expected = torch.tensor([0., 0., 1.], device=device)
self.assertEqual(a.grad, expected)
a_bf16 = torch.tensor([-2., 0., 2.], device=device, dtype=torch.bfloat16, requires_grad=True)
b_bf16 = torch.nn.functional.leaky_relu_(a_bf16.clone(), 0.0)
b_bf16.backward(torch.ones(3, device=device))
expected_bf16 = torch.tensor([0., 0., 1.], device=device, dtype=torch.bfloat16)
self.assertEqual(a_bf16.grad, expected_bf16)
def test_threshold_inplace_overlap(self, device):
# Inplace threshold is okay, because it is idempotent
x = torch.randn((1, 6), device=device).expand((6, 6))
F.threshold(x, 0.5, 0.5, inplace=True)
F.threshold_(x, 0.5, 0.5)
@onlyNativeDeviceTypes
def test_triplet_margin_with_distance_loss_default_parity(self, device):
# Test for `nn.TripletMarginWithDistanceLoss` and
# `F.triplet_margin_with_distance_loss`. Checks
# for parity against the respective non-distance-agnostic
# implementations of triplet margin loss (``nn.TripletMarginLoss`
# and `F.triplet_margin_loss`) under *default args*.
for extra_args in \
itertools.product((0.5, 1, 1.5), (True, False), ('none', 'mean', 'sum')):
kwargs = {'margin': extra_args[0], 'swap': extra_args[1], 'reduction': extra_args[2]}
anchor = torch.randn(5, 10, device=device, requires_grad=True)
positive = torch.randn(5, 10, device=device, requires_grad=True)
negative = torch.randn(5, 10, device=device, requires_grad=True)
# Test forward, functional
expected = F.triplet_margin_loss(anchor, positive, negative, **kwargs)
actual = F.triplet_margin_with_distance_loss(anchor, positive, negative, **kwargs)
self.assertEqual(actual, expected, rtol=1e-6, atol=1e-6)
# Test forward, module
loss_ref = nn.TripletMarginLoss(**kwargs)
loss_op = nn.TripletMarginWithDistanceLoss(**kwargs)
self.assertEqual(loss_op(anchor, positive, negative),
loss_ref(anchor, positive, negative),
rtol=1e-6, atol=1e-6)
# Test backward
self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(
a, p, n, **kwargs), (anchor, positive, negative)))
self.assertTrue(gradcheck(lambda a, p, n: loss_op(a, p, n),
(anchor, positive, negative)))
@onlyNativeDeviceTypes
def test_triplet_margin_with_distance_loss(self, device):
# Test for parity between `nn.TripletMarginWithDistanceLoss` and
# `F.triplet_margin_with_distance_loss`.
pairwise_distance = nn.PairwiseDistance()
def cosine_distance(x, y):
return 1.0 - F.cosine_similarity(x, y)
distance_functions = (pairwise_distance, cosine_distance,
lambda x, y: 1.0 - F.cosine_similarity(x, y))
reductions = ('mean', 'none', 'sum')
margins = (1.0, 1.5, 0.5)
swaps = (True, False)
for distance_fn, reduction, margin, swap \
in itertools.product(distance_functions, reductions, margins, swaps):
anchor = torch.randn(5, 10, device=device, requires_grad=True)
positive = torch.randn(5, 10, device=device, requires_grad=True)
negative = torch.randn(5, 10, device=device, requires_grad=True)
# Test backward
self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(
a, p, n, distance_function=distance_fn, reduction=reduction, margin=margin, swap=swap),
(anchor, positive, negative)))
loss_op = nn.TripletMarginWithDistanceLoss(distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
self.assertTrue(gradcheck(lambda a, p, n: loss_op(
a, p, n), (anchor, positive, negative)))
traced_loss_op = torch.jit.trace(loss_op, (anchor, positive, negative))
self.assertTrue(gradcheck(lambda a, p, n: traced_loss_op(
a, p, n), (anchor, positive, negative)))
# Test forward parity
functional = F.triplet_margin_with_distance_loss(anchor, positive, negative,
distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
modular = loss_op(anchor, positive, negative)
traced = traced_loss_op(anchor, positive, negative)
self.assertEqual(functional, modular, atol=1e-6, rtol=1e-6)
self.assertEqual(traced, modular, atol=1e-6, rtol=1e-6)
def test_to_complex(self, device):
m = nn.Linear(3, 5).to(device)
self.assertIs(m, m.to(device))
m.to(torch.cfloat)
self.assertIs(m.weight.dtype, torch.cfloat)
m.to(torch.cdouble)
self.assertIs(m.weight.dtype, torch.cdouble)
m.to(torch.float)
self.assertIs(m.weight.dtype, torch.float)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
m.to(torch.cfloat)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("Complex modules are a new feature" in str(w[-1].message))
@skipMeta
@dtypes(torch.float32, torch.float64)
def test_module_to_empty(self, device, dtype):
class MyModule(nn.Module):
def __init__(self, in_features, out_features, device=None, dtype=None):
super().__init__()
factory_kwargs = {"device": device, "dtype": dtype}
self.weight = nn.Parameter(torch.randn(in_features, out_features, **factory_kwargs))
def forward(self, x):
return x @ self.weight
# Test meta module instantiation.
input = torch.randn(5, 10, device=device, dtype=dtype)
m = MyModule(10, 1, device='meta', dtype=dtype)
m(input)
# Test materializing meta module on a real device.
m.to_empty(device=device)
m(input)
with torch.no_grad():
torch.nn.init.kaiming_uniform_(m.weight)
m(input)
# Test creating meta module from materialized module.
m.to_empty(device='meta')
m(input)
@skipMeta
def test_skip_init(self, device):
torch.manual_seed(1)
m_initialized = torch.nn.Linear(5, 1)
m_initialized.to(device)
torch.manual_seed(1)
m_uninitialized = torch.nn.utils.skip_init(torch.nn.Linear, 5, 1, device=device)
self.assertEqual(m_initialized.weight.device, m_uninitialized.weight.device)
self.assertFalse(torch.allclose(m_initialized.weight, m_uninitialized.weight))
def test_adaptive_pool_invalid(self, device):
inp_1d = (torch.randn(1, 1, 1, device=device), (-1,))
inp_2d = (torch.randn(1, 1, 1, 1, device=device), (-1, 0))
inp_3d = (torch.randn(1, 1, 1, 1, 1, device=device), (-1, 0, 2))
module_input_dict = {torch.nn.AdaptiveAvgPool1d : inp_1d,
torch.nn.AdaptiveAvgPool2d : inp_2d,
torch.nn.AdaptiveAvgPool3d : inp_3d}
for m, inp in module_input_dict.items():
with self.assertRaisesRegex(RuntimeError,
r"elements of output_size must be greater than or equal to 0"):
t, output_size = inp
m(output_size)(t)
class TestModuleGlobalHooks(TestCase):
def tearDown(self):
nn.modules.module._global_backward_hooks = OrderedDict()
nn.modules.module._global_forward_hooks = OrderedDict()
nn.modules.module._global_forward_pre_hooks = OrderedDict()
def test_module_global_hooks(self):
module = nn.Sigmoid
module_1 = module()
module_2 = module()
module_3 = module()
input = torch.ones(5, 5, requires_grad=True)
counter = {
'forwards': 0,
'backwards': 0
}
def fw_hook(inc, h_module, input, output):
self.assertIsInstance(input, tuple)
self.assertTrue(isinstance(output, torch.Tensor))
self.assertTrue(isinstance(h_module, module))
self.assertEqual(input[0], torch.ones(5, 5))
self.assertEqual(output, torch.empty(5, 5).fill_(1 / (1 + 1 / math.e)))
counter['forwards'] += inc
def bw_hook(inc, h_module, grad_input, grad_output):
self.assertIsInstance(grad_input, tuple)
self.assertIsInstance(grad_output, tuple)
self.assertTrue(isinstance(h_module, module))
self.assertEqual(grad_output[0], torch.ones(5, 5) * 2)
counter['backwards'] += inc
test_fwd = nn.modules.module.register_module_forward_hook(lambda *args: fw_hook(1, *args))
module_1(input)
module_2(input)
module_3(input)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 0)
test_bwd = nn.modules.module.register_module_backward_hook(
lambda *args: bw_hook(1, *args))
output_1 = module_1(input)
output_2 = module_2(input)
output_3 = module_3(input)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 0)
output_1.backward(torch.ones(5, 5) * 2, retain_graph=True)
output_2.backward(torch.ones(5, 5) * 2, retain_graph=False)
output_3.backward(torch.ones(5, 5) * 2, retain_graph=False)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 3)
output_1.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 4)
test2_fwd = nn.modules.module.register_module_forward_hook(lambda *args: fw_hook(2, *args))
output = module_1(input)
output = module_2(input)
output = module_3(input)
self.assertEqual(counter['forwards'], 15)
self.assertEqual(counter['backwards'], 4)
test2_bwd = nn.modules.module.register_module_backward_hook(lambda *args: bw_hook(2, *args))
module_1(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 18)
self.assertEqual(counter['backwards'], 7)
test2_bwd.remove()
module_2(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 21)
self.assertEqual(counter['backwards'], 8)
test2_fwd.remove()
module_3(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 22)
self.assertEqual(counter['backwards'], 9)
test_fwd.remove()
test_bwd.remove()
def test_module_global_hook_invalid_outputs(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
def bw_fail1(self, grad_input, grad_output):
return grad_input[:-1]
def bw_fail2(self, grad_input, grad_output):
return grad_input + (torch.randn(2, 2),)
with nn.modules.module.register_module_backward_hook(bw_fail1):
with self.assertRaisesRegex(RuntimeError, 'got 0, but expected 1'):
module(input).sum().backward()
with nn.modules.module.register_module_backward_hook(bw_fail2):
with self.assertRaisesRegex(RuntimeError, 'got 2, but expected 1'):
module(input).sum().backward()
def test_module_backward_global_hook_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.sigmoid(input)
def bw_hook(module, grad_input, grad_output):
for grad in grad_input:
self.assertTrue(isinstance(grad, torch.Tensor))
for grad in grad_output:
self.assertTrue(isinstance(grad, torch.Tensor))
return tuple(gi * 2 for gi in grad_input)
nn.modules.module.register_module_backward_hook(bw_hook)
module(input).backward(torch.ones(5, 5))
expected_grad = sig_x * (1 - sig_x) * 2
self.assertEqual(input.grad, expected_grad)
def test_module_global_forward_preforward_hook_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.sigmoid(input)
def forward_pre_hook(m, input):
return torch.nn.functional.relu(input[0])
def forward_hook(m, input, output):
return -output
nn.modules.module.register_module_forward_pre_hook(forward_pre_hook)
nn.modules.module.register_module_forward_hook(forward_hook)
output = module(input)
expected_res = -torch.sigmoid(torch.nn.functional.relu(input))
self.assertEqual(output, expected_res)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
mask = (input > 0).double()
expected_grad = -sig_x * (1 - sig_x) * 2 * mask
self.assertEqual(input.grad, expected_grad)
def test_module_forward_preforward_hook_removable(self):
"""
This test is to test when multiple pre-forward hook functions can be
registered successfully and used correctly, if the handle can be removable
during the pre-forward hook function call.
"""
module = nn.Sigmoid()
def removable_hook(m, input):
nonlocal handle
handle.remove()
return input
def removable_hook_2(m, input):
nonlocal handle_2
handle_2.remove()
return input
handle = module.register_forward_pre_hook(removable_hook)
handle_2 = module.register_forward_pre_hook(removable_hook_2)
# make sure hook register is successful
self.assertEqual(len(handle.hooks_dict_ref()), 2)
self.assertEqual(len(handle_2.hooks_dict_ref()), 2)
input = torch.randn(2, 2)
output = module(input)
self.assertEqual(torch.sigmoid(input), output)
# make sure hook removal is successful
self.assertFalse(handle.id in handle.hooks_dict_ref())
self.assertFalse(handle_2.id in handle.hooks_dict_ref())
self.assertEqual(len(handle.hooks_dict_ref()), 0)
self.assertEqual(len(handle_2.hooks_dict_ref()), 0)
def test_module_forward_forward_hook_removable(self):
"""
This test is to test when multiple forward hook functions can be registered
successfully and used correctly, if the handle can be removable during the
forward hook function call.
"""
module = nn.Sigmoid()
def removable_hook(m, input, output):
nonlocal handle
handle.remove()
return output
def removable_hook_2(m, input, output):
nonlocal handle_2
handle_2.remove()
return output
handle = module.register_forward_hook(removable_hook)
handle_2 = module.register_forward_hook(removable_hook_2)
# make sure hook register is successful
self.assertEqual(len(handle.hooks_dict_ref()), 2)
self.assertEqual(len(handle_2.hooks_dict_ref()), 2)
input = torch.randn(2, 2)
output = module(input)
self.assertEqual(torch.sigmoid(input), output)
# make sure hook removal is successful
self.assertFalse(handle.id in handle.hooks_dict_ref())
self.assertFalse(handle_2.id in handle.hooks_dict_ref())
self.assertEqual(len(handle.hooks_dict_ref()), 0)
self.assertEqual(len(handle_2.hooks_dict_ref()), 0)
def test_global_and_local_hooks_order(self):
module = nn.Sigmoid()
global_forward_pre_called = False
local_forward_pre_called = False
global_forward_called = False
local_forward_called = False
global_backward_called = False
local_backward_called = False
def global_forward_pre_hook(m, input):
nonlocal global_forward_pre_called
self.assertTrue(not local_forward_pre_called)
global_forward_pre_called = True
return input
def local_forward_pre_hook(m, input):
nonlocal local_forward_pre_called
self.assertTrue(global_forward_pre_called)
local_forward_pre_called = True
return input
def global_forward_hook(m, input, output):
nonlocal global_forward_called
self.assertTrue(not local_forward_called)
global_forward_called = True
return output
def local_forward_hook(m, input, output):
nonlocal local_forward_called
self.assertTrue(global_forward_called)
local_forward_called = True
return output
def global_backward_hook(m, input, output):
nonlocal global_backward_called
self.assertTrue(not local_backward_called)
global_backward_called = True
return input
def local_backward_hook(m, input, output):
nonlocal local_backward_called
self.assertTrue(global_backward_called)
local_backward_called = True
return input
input = torch.randn(5, 5, requires_grad=True)
nn.modules.module.register_module_forward_pre_hook(global_forward_pre_hook)
module.register_forward_pre_hook(local_forward_pre_hook)
nn.modules.module.register_module_forward_hook(global_forward_hook)
module.register_forward_hook(local_forward_hook)
nn.modules.module.register_module_backward_hook(global_backward_hook)
module.register_backward_hook(local_backward_hook)
output = module(input)
self.assertTrue(local_forward_called and local_forward_pre_called and global_forward_called and global_forward_pre_called)
output.backward(torch.ones(5, 5), retain_graph=True)
self.assertTrue(local_backward_called and global_backward_called)
class LazyModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
pass
class TestLazyModules(TestCase):
@suppress_warnings
def test_lazy_module_parameter(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
state_dict = module.state_dict()
self.assertIsInstance(state_dict['test_param'], UninitializedParameter)
new_module = LazyModule()
# An error is raised when there is an attempt to replace an existing parameter
# with an uninitialized one
new_module.register_parameter('test_param', nn.Parameter(torch.ones(5, 5)))
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
new_module.load_state_dict(state_dict)
# Uninitialized parameters are overriden when the state dict to be loaded contains a valid one
new_module = LazyModule()
new_module.register_parameter('test_param', nn.Parameter(torch.ones(5, 5)))
module.load_state_dict(new_module.state_dict())
self.assertEqual(module.test_param, torch.ones((5, 5)))
# Uninitialized parameters are left unchanged
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
new_module = LazyModule()
new_module.register_parameter('test_param', UninitializedParameter())
module.load_state_dict(new_module.state_dict())
self.assertTrue(module.has_uninitialized_params())
@suppress_warnings
def test_lazy_module_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
state_dict = module.state_dict()
self.assertIsInstance(state_dict['test_buffer'], UninitializedBuffer)
new_module = LazyModule()
# An error is raised when there is an attempt to replace an existing parameter
# with an uninitialized one
new_module.register_buffer('test_buffer', torch.ones(5, 5))
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
new_module.load_state_dict(state_dict)
# Uninitialized parameters are overriden when the state dict to be loaded contains a valid one
new_module = LazyModule()
new_module.register_buffer('test_buffer', torch.ones(5, 5))
module.load_state_dict(new_module.state_dict())
self.assertEqual(module.test_buffer, torch.ones((5, 5)))
# Uninitialized parameters are left unchanged
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
new_module = LazyModule()
new_module.register_buffer('test_buffer', UninitializedBuffer())
module.load_state_dict(new_module.state_dict())
module.load_state_dict(new_module.state_dict())
self.assertTrue(module.has_uninitialized_params())
@suppress_warnings
def test_lazy_module_jit_param(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'run a forward pass'):
torch.jit.script(module)
@suppress_warnings
def test_lazy_module_jit_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'run a forward pass'):
torch.jit.script(module)
@suppress_warnings
def test_lazy_share_memory_param(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'share memory on an uninitialized'):
module.share_memory()
@suppress_warnings
def test_lazy_share_memory_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'share memory on an uninitialized'):
module.share_memory()
@suppress_warnings
def test_linear(self):
module = nn.LazyLinear(10)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(5, 5)
module(input)
self.assertIsInstance(module, nn.Linear)
self.assertNotIsInstance(module, nn.LazyLinear)
self.assertTrue(module.weight.shape == (10, 5))
self.assertTrue(module.bias.shape == (10,))
y = module(input)
self.assertTrue(torch.equal(torch.nn.functional.linear(input, module.weight, module.bias), y))
@suppress_warnings
def test_lazy_linear_pickle(self):
module = nn.LazyLinear(10)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, nn.LazyLinear)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(5, 5)
module(input) # fully materialized
new_module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(new_module, nn.Linear)
self.assertNotIsInstance(new_module, nn.LazyLinear)
self.assertTrue(new_module.weight.shape == (10, 5))
self.assertNotIsInstance(new_module.weight, UninitializedParameter)
self.assertTrue(new_module.bias.shape == (10,))
self.assertNotIsInstance(new_module.bias, UninitializedParameter)
@suppress_warnings
def test_linear_state(self):
module = nn.Linear(5, 10)
lazy_module = nn.LazyLinear(10)
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# Linear one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertTrue(lazy_module.weight.shape == (10, 5))
self.assertTrue(lazy_module.bias.shape == (10,))
module = nn.Linear(5, 10)
lazy_module = nn.LazyLinear(10)
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def _check_lazy_conv(self, cls, lazy_cls, func, init_args, input_shape,
expected_weight_shape, expected_bias_shape):
module = lazy_cls(*init_args)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(*input_shape)
module(input)
self.assertIsInstance(module, cls)
self.assertNotIsInstance(module, lazy_cls)
self.assertEqual(module.weight.shape, expected_weight_shape)
if module.bias is not None:
self.assertEqual(module.bias.shape, expected_bias_shape)
y = module(input)
self.assertTrue(torch.equal(func(input, module.weight, module.bias), y))
def _check_lazy_conv_pickle(self, cls, lazy_cls, init_args, input_shape,
expected_weight_shape, expected_bias_shape):
module = lazy_cls(*init_args)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, lazy_cls)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(*input_shape)
module(input) # fully materialized
new_module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(new_module, cls)
self.assertNotIsInstance(new_module, lazy_cls)
self.assertEqual(new_module.weight.shape, expected_weight_shape)
self.assertNotIsInstance(new_module.weight, UninitializedParameter)
if new_module.bias is not None:
self.assertEqual(new_module.bias.shape, expected_bias_shape)
self.assertNotIsInstance(new_module.bias, UninitializedParameter)
def _check_lazy_conv_state(self, gen_module, gen_lazy_module,
expected_weight_shape, expected_bias_shape):
module = gen_module()
lazy_module = gen_lazy_module()
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# Conv one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertEqual(lazy_module.weight.shape, expected_weight_shape)
if lazy_module.bias is not None:
self.assertEqual(lazy_module.bias.shape, expected_bias_shape)
module = gen_module()
lazy_module = gen_lazy_module()
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def test_lazy_pre_forward_hook(self):
"""
This test is to test whether lazymodule can register other pre-forward hook
functions successfully.
"""
class TestModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
def __init__(self):
super().__init__()
def initialize_parameters(self, input):
return None
def forward(self, input):
return input
def hook_function(module, input):
return input[0] + 1
module = TestModule()
module.register_forward_pre_hook(hook_function)
output = module(torch.zeros(2, 2))
self.assertEqual(output, torch.ones(2, 2))
def test_lazy_forward_hook(self):
"""
This test is to test whether lazymodule can register other forward hook
functions successfully.
"""
class TestModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
def __init__(self):
super().__init__()
def initialize_parameters(self, input):
return None
def forward(self, input):
return input
def hook_function(module, input, output):
return input[0] + 1
module = TestModule()
module.register_forward_hook(hook_function)
output = module(torch.zeros(2, 2))
self.assertEqual(output, torch.ones(2, 2))
@suppress_warnings
def test_lazy_conv1d(self):
self._check_lazy_conv(nn.Conv1d, nn.LazyConv1d, torch.nn.functional.conv1d,
(32, 2), (192, 16, 50), (32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv1d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv1d, nn.LazyConv1d, (32, 2), (192, 16, 50),
(32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv1d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv1d(16, 32, 2),
lambda: nn.LazyConv1d(32, 2),
(32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv2d(self):
self._check_lazy_conv(nn.Conv2d, nn.LazyConv2d, torch.nn.functional.conv2d,
(32, 2), (192, 16, 8, 6), (32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv2d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv2d, nn.LazyConv2d, (32, 2), (192, 16, 8, 6),
(32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv2d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv2d(16, 32, 2),
lambda: nn.LazyConv2d(32, 2),
(32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d(self):
self._check_lazy_conv(nn.Conv3d, nn.LazyConv3d, torch.nn.functional.conv3d,
(32, 2), (192, 16, 8, 7, 6), (32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv3d, nn.LazyConv3d, (32, 2), (192, 16, 8, 7, 6),
(32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv3d(16, 32, 2),
lambda: nn.LazyConv3d(32, 2),
(32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transposed1d(self):
self._check_lazy_conv(nn.ConvTranspose1d, nn.LazyConvTranspose1d, torch.nn.functional.conv_transpose1d,
(32, 2), (192, 16, 50), (16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose1d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose1d, nn.LazyConvTranspose1d, (32, 2),
(192, 16, 50), (16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose1d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose1d(16, 32, 2),
lambda: nn.LazyConvTranspose1d(32, 2),
(16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d(self):
self._check_lazy_conv(nn.ConvTranspose2d, nn.LazyConvTranspose2d, torch.nn.functional.conv_transpose2d,
(32, 2), (192, 16, 8, 6), (16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose2d, nn.LazyConvTranspose2d, (32, 2),
(192, 16, 8, 6), (16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose2d(16, 32, 2),
lambda: nn.LazyConvTranspose2d(32, 2),
(16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d(self):
self._check_lazy_conv(nn.ConvTranspose3d, nn.LazyConvTranspose3d, torch.nn.functional.conv_transpose3d,
(32, 2), (192, 16, 8, 7, 6), (16, 32, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose3d, nn.LazyConvTranspose3d, (32, 2),
(192, 16, 8, 7, 6), (16, 32, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose3d(16, 32, 2),
lambda: nn.LazyConvTranspose3d(32, 2),
(16, 32, 2, 2, 2), (32,))
def _check_lazy_norm(self, cls, lazy_cls, input_shape):
for affine in [False, True]:
for track_running_stats in [False, True]:
lazy_module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
if affine:
self.assertIsInstance(lazy_module.weight, UninitializedParameter)
self.assertIsInstance(lazy_module.bias, UninitializedParameter)
if track_running_stats:
self.assertIsInstance(lazy_module.running_mean, UninitializedBuffer)
self.assertIsInstance(lazy_module.running_var, UninitializedBuffer)
input = torch.ones(*input_shape)
lazy_output = lazy_module(input)
self.assertIsInstance(lazy_module, cls)
self.assertNotIsInstance(lazy_module, lazy_cls)
num_features = input_shape[1]
module = cls(num_features, affine=affine, track_running_stats=track_running_stats)
expected_output = module(input)
self.assertEqual(lazy_output, expected_output)
if module.weight is not None:
self.assertEqual(lazy_module.weight.shape, module.weight.shape)
self.assertEqual(lazy_module.weight, module.weight)
if module.bias is not None:
self.assertEqual(lazy_module.bias.shape, module.bias.shape)
self.assertEqual(lazy_module.bias, module.bias)
if module.running_mean is not None:
self.assertEqual(lazy_module.running_mean.shape, module.running_mean.shape)
self.assertEqual(lazy_module.running_mean, module.running_mean)
if module.running_var is not None:
self.assertEqual(lazy_module.running_var.shape, module.running_var.shape)
self.assertEqual(lazy_module.running_var, module.running_var)
if module.num_batches_tracked is not None:
self.assertEqual(lazy_module.num_batches_tracked.shape, module.num_batches_tracked.shape)
self.assertEqual(lazy_module.num_batches_tracked, module.num_batches_tracked)
def _check_lazy_norm_pickle(self, cls, lazy_cls, input_shape):
for affine in [False, True]:
for track_running_stats in [False, True]:
module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, lazy_cls)
if affine:
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
if track_running_stats:
self.assertIsInstance(module.running_mean, UninitializedBuffer)
self.assertIsInstance(module.running_var, UninitializedBuffer)
input = torch.ones(*input_shape)
module(input) # fully materialized
module = pickle.loads(pickle.dumps(module))
self.assertNotIsInstance(module, lazy_cls)
self.assertIsInstance(module, cls)
if affine:
self.assertNotIsInstance(module.weight, UninitializedParameter)
self.assertNotIsInstance(module.bias, UninitializedParameter)
if track_running_stats:
self.assertNotIsInstance(module.running_mean, UninitializedBuffer)
self.assertNotIsInstance(module.running_var, UninitializedBuffer)
def _check_lazy_batchnorm_state(self, cls, lazy_cls):
module = cls(10)
lazy_module = lazy_cls(affine=True, track_running_stats=True)
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# Conv one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertEqual(lazy_module.weight.shape, (10,))
self.assertEqual(lazy_module.bias.shape, (10,))
self.assertEqual(lazy_module.running_mean.shape, (10,))
self.assertEqual(lazy_module.running_var.shape, (10,))
module = cls(10)
lazy_module = lazy_cls()
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def _check_lazy_instancenorm_state(self, cls, lazy_cls):
for affine in [False, True]:
for track_running_stats in [False, True]:
module = cls(10, affine=affine, track_running_stats=track_running_stats)
lazy_module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# InstanceNorm one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
if affine:
self.assertEqual(lazy_module.weight.shape, (10,))
self.assertEqual(lazy_module.bias.shape, (10,))
if track_running_stats:
self.assertEqual(lazy_module.running_mean.shape, (10,))
self.assertEqual(lazy_module.running_var.shape, (10,))
module = cls(10, affine=True, track_running_stats=True)
lazy_module = lazy_cls(affine=True, track_running_stats=True)
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def test_lazy_batchnorm1d(self):
self._check_lazy_norm(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 3, 6))
self._check_lazy_norm(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 6))
def test_lazy_batchnorm1d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 3, 6))
self._check_lazy_norm_pickle(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 6))
def test_lazy_batchnorm1d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm1d, nn.LazyBatchNorm1d)
self._check_lazy_batchnorm_state(nn.BatchNorm1d, nn.LazyBatchNorm1d)
def test_lazy_batchnorm2d(self):
self._check_lazy_norm(nn.BatchNorm2d, nn.LazyBatchNorm2d, (16, 3, 6, 7))
def test_lazy_batchnorm2d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm2d, nn.LazyBatchNorm2d, (16, 3, 6, 7))
def test_lazy_batchnorm2d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm2d, nn.LazyBatchNorm2d)
self._check_lazy_batchnorm_state(nn.BatchNorm2d, nn.LazyBatchNorm2d)
def test_lazy_batchnorm3d(self):
self._check_lazy_norm(nn.BatchNorm3d, nn.LazyBatchNorm3d, (16, 3, 6, 7, 8))
def test_lazy_batchnorm3d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm3d, nn.LazyBatchNorm3d, (16, 3, 6, 7, 8))
def test_lazy_batchnorm3d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm3d, nn.LazyBatchNorm3d)
self._check_lazy_batchnorm_state(nn.BatchNorm3d, nn.LazyBatchNorm3d)
def test_lazy_instancenorm1d(self):
self._check_lazy_norm(nn.InstanceNorm1d, nn.LazyInstanceNorm1d, (16, 3, 6))
def test_lazy_instancenorm1d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm1d, nn.LazyInstanceNorm1d, (16, 3, 6))
def test_lazy_instancenorm1d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm1d, nn.LazyInstanceNorm1d)
self._check_lazy_instancenorm_state(nn.InstanceNorm1d, nn.LazyInstanceNorm1d)
def test_lazy_instancenorm2d(self):
self._check_lazy_norm(nn.InstanceNorm2d, nn.LazyInstanceNorm2d, (16, 3, 6, 7))
def test_lazy_instancenorm2d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm2d, nn.LazyInstanceNorm2d, (16, 3, 6, 7))
def test_lazy_instancenorm2d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm2d, nn.LazyInstanceNorm2d)
self._check_lazy_instancenorm_state(nn.InstanceNorm2d, nn.LazyInstanceNorm2d)
def test_lazy_instancenorm3d(self):
self._check_lazy_norm(nn.InstanceNorm3d, nn.LazyInstanceNorm3d, (16, 3, 6, 7, 8))
def test_lazy_instancenorm3d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm3d, nn.LazyInstanceNorm3d, (16, 3, 6, 7, 8))
def test_lazy_instancenorm3d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm3d, nn.LazyInstanceNorm3d)
self._check_lazy_instancenorm_state(nn.InstanceNorm3d, nn.LazyInstanceNorm3d)
@suppress_warnings
def test_materialize_dtype(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.test_param.materialize(10)
self.assertTrue(module.test_param.dtype == torch.float64)
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.half()
module.test_param.materialize(10)
self.assertTrue(module.test_param.dtype == torch.float16)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@suppress_warnings
def test_materialize_device(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.test_param.materialize(10)
self.assertTrue(module.test_param.device.type == 'cpu')
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.cuda()
module.test_param.materialize(10)
self.assertTrue(module.test_param.device.type == 'cuda')
@suppress_warnings
def test_chained_initialization(self):
class MyNetwork(torch.nn.Module):
def __init__(self):
super(MyNetwork, self).__init__()
self.linear_1 = torch.nn.LazyLinear(15)
self.linear_2 = torch.nn.LazyLinear(10)
def forward(self, x):
y = self.linear_1(x)
return self.linear_2(y)
net = MyNetwork()
net(torch.ones(5, 10))
self.assertTrue(net.linear_1.weight.shape == (15, 10))
self.assertTrue(net.linear_1.bias.shape == (15,))
self.assertTrue(net.linear_2.weight.shape == (10, 15))
self.assertTrue(net.linear_2.bias.shape == (10,))
@suppress_warnings
def test_optimizer_pass(self):
optimizers = [torch.optim.Adadelta, torch.optim.Adagrad, torch.optim.Adam,
torch.optim.AdamW, torch.optim.Adamax,
torch.optim.ASGD, torch.optim.SGD, torch.optim.Rprop,
torch.optim.RMSprop, torch.optim.LBFGS]
def run_step(module, optim):
self.assertIsInstance(optim.param_groups[0]['params'][0], UninitializedParameter)
module.test_param.materialize(10)
self.assertIsInstance(optim.param_groups[0]['params'][0], Parameter)
self.assertNotIsInstance(optim.param_groups[0]['params'][0], UninitializedParameter)
for p in module.parameters():
p.grad = torch.rand_like(p)
if isinstance(optim, torch.optim.LBFGS):
optim.step(lambda: 1.0)
else:
optim.step()
for optim_cls in optimizers:
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
if optim_cls is torch.optim.SGD:
optim = optim_cls(module.parameters(), lr=0.0)
elif optim_cls is torch.optim.Adagrad:
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
optim = optim_cls(module.parameters())
continue
else:
optim = optim_cls(module.parameters())
run_step(module, optim)
@suppress_warnings
def test_weight_norm(self):
m = nn.LazyLinear(7)
with self.assertRaisesRegex(ValueError, 'have uninitialized parameters.'):
m = torch.nn.utils.weight_norm(m)
@suppress_warnings
def test_spectral_norm(self):
m = nn.LazyLinear(7)
with self.assertRaisesRegex(ValueError, 'have uninitialized parameters.'):
m = torch.nn.utils.spectral_norm(m)
@suppress_warnings
def test_invalid_functions(self):
param = torch.nn.parameter.UninitializedParameter()
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
torch.empty_like(param)
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
torch.add(param, param)
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
param + param
class TestFunctionalPickle(TestCase):
# issue gh-38137
def test_pickle_softsign(self):
# Make sure it does not throw an exception
s = pickle.dumps(F.softsign)
class TestStateDictHooks(TestCase):
def test_load_state_dict_pre_hook(self):
m = nn.Linear(10, 10)
m_state_dict = m.state_dict()
m_load = nn.Linear(10, 10)
hook_called = 0
def hook_without_module(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.assertEqual(m_state_dict, state_dict)
nonlocal hook_called
hook_called += 1
def hook_with_module(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.assertEqual(m_state_dict, state_dict)
self.assertTrue(m_load is module)
nonlocal hook_called
hook_called += 1
hook_called = 0
m_load._register_load_state_dict_pre_hook(hook_without_module)
m_load.load_state_dict(m_state_dict)
self.assertEqual(1, hook_called)
hook_called = 0
m_load._register_load_state_dict_pre_hook(hook_with_module, True)
m_load.load_state_dict(m_state_dict)
self.assertEqual(2, hook_called)
def test_load_state_dict_module_pre_hook(self):
hook_called = 0
# Test with module instance method as hook
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.foo = torch.nn.Parameter(torch.rand(10))
def my_pre_load_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
assert [] == error_msgs
assert [] == unexpected_keys
assert [] == missing_keys
assert strict
nonlocal hook_called
hook_called += 1
def my_pre_load_hook_with_module(
self,
module,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
assert [] == error_msgs
assert [] == unexpected_keys
assert [] == missing_keys
assert strict
assert self is module
nonlocal hook_called
hook_called += 1
m = MyModule()
state_dict = m.state_dict()
hook_called = 0
m._register_load_state_dict_pre_hook(m.my_pre_load_hook)
m.load_state_dict(state_dict)
self.assertEqual(1, hook_called)
hook_called = 0
m._register_load_state_dict_pre_hook(m.my_pre_load_hook_with_module, True)
m.load_state_dict(state_dict)
self.assertEqual(2, hook_called)
instantiate_device_type_tests(TestNNDeviceType, globals())
instantiate_parametrized_tests(TestNN)
if __name__ == '__main__':
run_tests()
| 46.473553 | 132 | 0.584683 |
import math
import random
import string
import unittest
import io
import unittest.mock as mock
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import repeat, product
from functools import reduce, partial
from operator import mul
from collections import OrderedDict
import torch
torch.set_default_dtype(torch.double)
from torch._six import inf, nan
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
import torch.nn.utils.parametrize as parametrize
import torch.nn.utils.prune as prune
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn import Parameter
from torch.nn.parameter import UninitializedParameter, UninitializedBuffer
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_fp_dtypes, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
skipIfRocmVersionLessThan, skipIfNotMiopenSuggestNHWC, TEST_NUMPY, TEST_SCIPY, TEST_WITH_ROCM, download_file, \
get_function_arglist, load_tests, \
suppress_warnings, TemporaryFileName, TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, \
ctcloss_reference, new_module_tests, single_batch_reference_fn
from torch.testing._internal.common_device_type import expectedFailureXLA, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfNoCudnn, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, skipCUDAIfRocmVersionLessThan, skipCUDAIfNotMiopenSuggestNHWC, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types, \
disableMkldnn, skipCPUIfNoMkldnn, disablecuDNN, skipCUDAIfMiopen, skipCUDAIfNoMiopen
from torch.nn import MultiheadAttention
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
if TEST_SCIPY:
from scipy import stats
import scipy.ndimage
if TEST_NUMPY:
import numpy as np
class PackedSequenceTest(TestCase):
_type_by_name = {
'torch.DoubleTensor': (torch.DoubleTensor, 'double'),
'torch.FloatTensor': (torch.FloatTensor, 'float'),
'torch.LongTensor': (torch.LongTensor, 'long'),
'torch.IntTensor': (torch.IntTensor, 'int'),
'torch.ShortTensor': (torch.ShortTensor, 'short'),
'torch.CharTensor': (torch.CharTensor, 'char'),
'torch.ByteTensor': (torch.ByteTensor, 'byte'),
}
def __init__(self, *args, **kwargs):
super(PackedSequenceTest, self).__init__(*args, **kwargs)
self.batch_size = 5
self.max_length = 6
def _ordered_sequence(self, tensor_type):
seqs = [tensor_type(random.randint(1, self.max_length))
for _ in range(self.batch_size)]
if tensor_type == torch.ByteTensor:
seqs = [s.random_(0, 256) for s in seqs]
else:
seqs = [s.random_(-128, 128) for s in seqs]
ordered = sorted(seqs, key=len, reverse=True)
return ordered
def _padded_sequence(self, tensor_type):
ordered = self._ordered_sequence(tensor_type)
lengths = [len(i) for i in ordered]
padded_tensor = rnn_utils.pad_sequence(ordered)
return padded_tensor, lengths
def test_type_casts(self):
for _, (input_type, _) in self._type_by_name.items():
for expected_type_str, (_, cast_str) in self._type_by_name.items():
for enforce_sorted in [True, False]:
padded, lengths = self._padded_sequence(input_type)
packed = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted)
masked = getattr(packed, cast_str)()
unpacked, lengths_out = rnn_utils.pad_packed_sequence(masked)
self.assertEqual(unpacked.type(), expected_type_str)
def test_wrong_order(self):
a = torch.ones(25, 300)
b = torch.ones(22, 300)
b_a = rnn_utils.pad_sequence([b, a])
self.assertRaises(
RuntimeError,
lambda: rnn_utils.pack_padded_sequence(b_a, [22, 25], enforce_sorted=True))
def test_total_length(self):
padded, lengths = self._padded_sequence(torch.FloatTensor)
max_length = max(lengths)
packed = rnn_utils.pack_padded_sequence(padded, lengths)
for total_length in (-1, 0, max_length - 1):
for batch_first in (True, False):
def err_fn():
rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,
total_length=total_length)
self.assertRaisesRegex(ValueError,
r'Expected total_length to be at least the '
r'length of the longest sequence in input',
err_fn)
for batch_first in (True, False):
no_extra_pad, _ = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)
for total_length_delta in (0, 1, 8):
total_length = max_length + total_length_delta
unpacked, lengths_out = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,
total_length=total_length)
self.assertEqual(lengths, lengths_out)
self.assertEqual(unpacked.size(1 if batch_first else 0), total_length)
if total_length_delta == 0:
ref_output = no_extra_pad
elif batch_first:
extra_pad = no_extra_pad.new_zeros(self.batch_size, total_length_delta)
ref_output = torch.cat([no_extra_pad, extra_pad], 1)
else:
extra_pad = no_extra_pad.new_zeros(total_length_delta, self.batch_size)
ref_output = torch.cat([no_extra_pad, extra_pad], 0)
self.assertEqual(unpacked, ref_output)
def test_to(self):
for enforce_sorted in (True, False):
padded, lengths = self._padded_sequence(torch.IntTensor)
a = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted).cpu()
self.assertIs(a, a.to('cpu'))
self.assertIs(a, a.cpu())
self.assertIs(a, a.to('cpu', dtype=torch.int32))
self.assertEqual(a.long(), a.to(torch.int64))
if torch.cuda.is_available():
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = a.cuda(device=cuda)
self.assertIs(b, b.to(cuda))
self.assertIs(b, b.cuda())
self.assertEqual(a, b.to('cpu'))
self.assertEqual(b, a.to(cuda))
self.assertEqual(a, b.to('cpu', dtype=torch.int32))
self.assertIs(b, b.to(dtype=torch.int32))
self.assertEqual(b.long(), b.to(dtype=torch.int64))
def test_to_memory_format(self):
m = torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=2, bias=True)
m = m.to(memory_format=torch.channels_last)
for param in m.parameters():
if param.dim() == 4:
self.assertTrue(param.is_contiguous(memory_format=torch.channels_last))
class TestAvgPool(TestCase):
def _sum_pool2d(self, x, kernel_size):
windows = torch.nn.functional.unfold(x, kernel_size=kernel_size, stride=kernel_size)
return torch.sum(windows, dim=1)
def _sum_pool3d(self, x, kernel_size):
h = kernel_size[0]
splited_x = [t.sum(0) for t in x.split(h) if t.size(0) == h]
splited_x = [self._sum_pool2d(t.unsqueeze(0).unsqueeze(0), kernel_size[1:]) for t in splited_x]
joined_x = torch.cat(splited_x)
return joined_x.view(1, joined_x.numel())
def _avg_pool2d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool2d(x, kernel_size) / size
def _avg_pool3d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool3d(x, kernel_size) / size
def test_doubletensor_avg_pool2d(self):
n, m = 5, 8
input = torch.rand(1, 1, n, m)
for i in range(1, n + 1):
for j in range(1, m + 1):
actual = torch.nn.functional.avg_pool2d(input[0], (i, j))
actual = actual.view(1, actual.numel())
expected = self._avg_pool2d(input, (i, j))
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_avg_pool2d_with_zero_divisor(self):
self.assertRaisesRegex(RuntimeError, "divisor must be not zero",
lambda: F.avg_pool2d(torch.zeros(3, 3, 3), (2, 2), divisor_override=0))
def test_doubletensor_avg_pool2d_with_divisor(self):
n, m = 3, 3
input = torch.rand(1, 1, n, m)
for i in range(1, n + 1):
for j in range(1, m + 1):
for divisor in [1, 7, i * j]:
actual = F.avg_pool2d(input[0], (i, j), divisor_override=divisor)
actual = actual.view(1, actual.numel())
expected = self._sum_pool2d(input, (i, j)) / divisor
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_doubletensor_avg_pool3d(self):
h, w, d = 5, 6, 7
input = torch.rand(h, w, d)
for i in range(1, h + 1):
for j in range(1, w + 1):
for k in range(1, d + 1):
actual = torch.nn.functional.avg_pool3d(input.unsqueeze(0), (i, j, k))
actual = actual.view(1, actual.numel())
expected = self._avg_pool3d(input, (i, j, k))
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_doubletensor_avg_pool3d_with_divisor(self):
h, w, d = 6, 5, 7
input = torch.rand(h, w, d)
for i in range(1, h + 1):
for j in range(1, w + 1):
for k in range(1, d + 1):
for divisor in [1, 7, i * j]:
actual = torch.nn.functional.avg_pool3d(input.unsqueeze(0), (i, j, k), divisor_override=divisor)
actual = actual.view(1, actual.numel())
expected = self._sum_pool3d(input, (i, j, k)) / divisor
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_avg_pool3d_with_zero_divisor(self):
self.assertRaisesRegex(RuntimeError, "divisor must be not zero",
lambda: F.avg_pool3d(torch.zeros(3, 3, 3, 3), (2, 2, 2), divisor_override=0))
def test_avg_pool1d_ceil_mode(self):
x = 10 * torch.randn((1, 16, 4))
y = torch.nn.functional.avg_pool1d(
x, ceil_mode=True, count_include_pad=True, kernel_size=1, stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool1d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=1, stride=2)
self.assertTrue(not torch.isnan(y).any())
def test_avg_pool2d_ceil_mode(self):
x = 10 * torch.randn((1, 16, 4, 4))
y = torch.nn.functional.avg_pool2d(
x, ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),
padding=(0, 1), stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool2d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),
padding=(0, 1), stride=2)
self.assertTrue(not torch.isnan(y).any())
def test_avg_pool3d_ceil_mode(self):
x = 10 * torch.randn((1, 16, 4, 4, 4))
y = torch.nn.functional.avg_pool3d(
x, ceil_mode=True, count_include_pad=True, kernel_size=(1, 2, 3), stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool3d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=(1, 2, 3), stride=2)
self.assertTrue(not torch.isnan(y).any())
class TestNN(NNTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
def _forward(self, module, input: _TensorOrTensors):
with freeze_rng_state():
if isinstance(input, tuple):
return module(*input)
else:
return module(input)
def _backward(self, module, input: _TensorOrTensors, output, grad_output, create_graph=False):
output.backward(grad_output, retain_graph=True, create_graph=create_graph)
if isinstance(input, tuple):
return tuple(i.grad.data if i.grad is not None else None for i in input)
else:
return input.grad.data if input.grad is not None else None
def _forward_criterion(self, criterion, input, target, extra_args=None):
if extra_args is None:
extra_args = tuple()
if isinstance(input, tuple):
args = input + (target,) + extra_args
output = criterion(*args)
else:
output = criterion(input, target, *extra_args)
return output
def _backward_criterion(self, criterion, input, output, target, gradOutput=None, extra_args=None):
if extra_args is None:
extra_args = tuple()
input_tuple = input if isinstance(input, tuple) else (input,)
output_tuple = output if isinstance(output, tuple) else (output,)
for i in input_tuple:
if i.grad is not None:
i.grad.data.zero_()
args = input_tuple + (target,) + extra_args
if gradOutput is None:
gradOutput = torch.ones(())
criterion(*args).backward(gradOutput.to(output_tuple[0]))
if isinstance(input, tuple):
return tuple(i.grad.data for i in input)
else:
return input.grad.data
def _zero_grad_parameters(self, module):
for p in module.parameters():
if p.grad is not None:
with torch.no_grad():
p.grad.zero_()
p.grad.detach_()
def _get_parameters(self, module):
params = []
d_params = []
for p in module.parameters():
params.append(p)
d_params.append(p.grad)
return params, d_params
def _create_basic_net(self):
class Layer(nn.Module):
def __init__(self):
super(Layer, self).__init__()
self.layer_dummy_param = Parameter(torch.empty(3, 5))
self.register_buffer('layer_dummy_buf', torch.zeros(1, 3, 3, 7))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = Layer()
self.dummy_param = Parameter(torch.empty(3, 5))
self.register_buffer('dummy_buf', torch.zeros(7, 3, 3, 1))
l = Layer()
n = Net()
s = nn.Sequential(n, n)
return l, n, s
def test_requires_grad_(self):
m = self._create_basic_net()[-1]
assert len(list(m.buffers())) > 0, 'invalid test'
assert all(not b.requires_grad for b in m.buffers()) > 0, 'invalid test'
assert len(list(m.parameters())) > 0, 'invalid test'
assert all(p.requires_grad for p in m.parameters()) > 0, 'invalid test'
for requires_grad in (False, True):
self.assertIs(m.requires_grad_(requires_grad), m)
for p in m.parameters():
self.assertEqual(p.requires_grad, requires_grad)
for b in m.buffers():
self.assertFalse(b.requires_grad)
def test_module_backcompat(self):
from torch.serialization import SourceChangeWarning
path = download_file('https://download.pytorch.org/test_data/linear.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
m = torch.load(path)
input = torch.randn(2, 3, dtype=torch.float)
self.assertEqual(m(input).size(), (2, 5))
def test_conv_backcompat(self):
from torch.serialization import SourceChangeWarning
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
m = torch.load(path, encoding='utf-8')
input = torch.randn((1, 1, 1, 1), dtype=torch.float)
self.assertEqual(m(input).size(), (1, 1, 1, 1))
def test_share_memory(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.p = nn.Parameter(torch.eye(5))
self.par = nn.ParameterList()
self.par.append(nn.Parameter(torch.randn(10)))
def forward(self, inp):
return inp.clone()
net = Net()
for p in net.parameters():
self.assertFalse(p.storage().is_shared())
for b in net.buffers():
self.assertFalse(b.storage().is_shared())
net.share_memory()
for p in net.parameters():
self.assertTrue(p.storage().is_shared())
for b in net.buffers():
self.assertTrue(b.storage().is_shared())
def _test_hooks(self, backward_register_fn):
module = nn.Sigmoid()
input = torch.ones(5, 5, requires_grad=True)
counter = {
'forwards': 0,
'backwards': 0
}
def fw_hook(inc, h_module, input, output):
self.assertIsInstance(input, tuple)
self.assertTrue(isinstance(output, torch.Tensor))
self.assertTrue(h_module is module)
self.assertEqual(input[0], torch.ones(5, 5))
self.assertEqual(output, torch.empty(5, 5).fill_(1 / (1 + 1 / math.e)))
counter['forwards'] += inc
def bw_hook(inc, h_module, grad_input, grad_output):
self.assertIsInstance(grad_input, tuple)
self.assertIsInstance(grad_output, tuple)
self.assertTrue(h_module is module)
self.assertEqual(grad_output[0], torch.ones(5, 5) * 2)
counter['backwards'] += inc
test_fwd = module.register_forward_hook(lambda *args: fw_hook(1, *args))
module(input)
module(input)
self.assertEqual(counter['forwards'], 2)
self.assertEqual(counter['backwards'], 0)
test_bwd = getattr(module, backward_register_fn)(
lambda *args: bw_hook(1, *args))
output = module(input)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 0)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 1)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 2)
test2_fwd = module.register_forward_hook(lambda *args: fw_hook(2, *args))
output = module(input)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 2)
test2_bwd = getattr(module, backward_register_fn)(lambda *args: bw_hook(2, *args))
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 9)
self.assertEqual(counter['backwards'], 5)
test2_bwd.remove()
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 12)
self.assertEqual(counter['backwards'], 6)
test2_fwd.remove()
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 13)
self.assertEqual(counter['backwards'], 7)
test_fwd.remove()
test_bwd.remove()
def test_hooks(self):
self._test_hooks("register_backward_hook")
self._test_hooks("register_full_backward_hook")
def test_hook_cpp(self):
bn = nn.BatchNorm1d(5)
def hook(module, grad_inputs, grad_outputs):
self.assertEqual(len(grad_inputs), 1)
self.assertEqual(len(grad_outputs), 1)
self.assertEqual(module, bn)
bn.register_full_backward_hook(hook)
output = bn(torch.randn(5, 5, requires_grad=True))
output.sum().backward()
def test_hook_invalid_outputs(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
def bw_fail1(self, grad_input, grad_output):
return grad_input[:-1]
def bw_fail2(self, grad_input, grad_output):
return grad_input + (torch.randn(2, 2),)
with module.register_backward_hook(bw_fail1):
with self.assertRaisesRegex(RuntimeError, 'got 0, but expected 1'):
module(input).sum().backward()
with module.register_backward_hook(bw_fail2):
with self.assertRaisesRegex(RuntimeError, 'got 2, but expected 1'):
module(input).sum().backward()
def test_hook_requires_grad(self):
test_self = self
class MyModule(nn.Module):
def forward(self, arg1, arg2, arg3):
test_self.assertTrue(arg1.requires_grad)
test_self.assertFalse(arg2.requires_grad)
test_self.assertTrue(arg3.requires_grad)
return arg1.sum() + arg2.sum() + arg3.sum()
inp = torch.rand(2, requires_grad=True)
mod = MyModule()
mod(inp, inp.detach(), inp)
mod.register_full_backward_hook(lambda mod, gI, gO: None)
mod(inp, inp.detach(), inp)
def test_hook_no_requires_grad(self):
mod = nn.Linear(2, 3)
inp = torch.rand(1, 2)
return_val = "None"
hook_called = [0]
def hook(mod, grad_input, grad_output):
hook_called[0] += 1
for gI in grad_input:
self.assertIsNone(gI)
for gO in grad_output:
self.assertEqual(gO.size(), (1, 3))
if return_val == "grad_input":
return grad_input
elif return_val == "invalid":
return inp
elif return_val == "None":
return None
else:
raise RuntimeError("Invalid return_val string")
mod.register_full_backward_hook(hook)
mod(inp).sum().backward()
self.assertEqual(hook_called[0], 1)
return_val = "grad_input"
mod(inp).sum().backward()
self.assertEqual(hook_called[0], 2)
return_val = "invalid"
with self.assertRaisesRegex(RuntimeError, "where no input requires gradient"):
mod(inp).sum().backward()
def test_hook_last_arg_requires_grad(self):
mod = nn.L1Loss()
inp = torch.rand(1, requires_grad=True)
mod.register_full_backward_hook(lambda m, gI, gO: None)
try:
mod(inp.detach(), inp)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
def test_hook_extra_input(self):
class MyModule(nn.Module):
def forward(self, non_tensor, tensor):
return tensor.clone(), non_tensor
inp = torch.rand(2, requires_grad=True)
mod = MyModule()
def hook(mod, grad_input, grad_output):
self.assertIsNone(grad_input[0])
self.assertIsInstance(grad_input[1], torch.Tensor)
self.assertIsInstance(grad_output[0], torch.Tensor)
self.assertIsNone(grad_output[1])
mod.register_full_backward_hook(hook)
out, _ = mod(True, inp)
out.sum().backward()
def test_hook_inplace(self):
class MyModule(nn.Module):
def forward(self, inp, do_inplace):
self.inp = inp
if do_inplace:
inp += 1
return inp.clone()
hook_called = [0]
def hook(mod, grad_input, grad_output):
hook_called[0] += 1
inp = torch.rand(10, requires_grad=True)
mod = MyModule()
mod.register_full_backward_hook(hook)
mod(inp, False).sum().backward()
self.assertEqual(hook_called[0], 1)
with self.assertRaisesRegex(RuntimeError, "Output 0 of BackwardHookFunctionBackward is "
"a view and is being modified inplace."):
mod(inp.clone(), True)
local_inp = inp.clone()
out = mod(local_inp, False)
local_inp[0] *= 1
with self.assertRaisesRegex(RuntimeError, "Output 0 of BackwardHookFunctionBackward is "
"a view and its base or another view"):
mod.inp + 2
out = mod(inp, False)
with self.assertRaisesRegex(RuntimeError, "BackwardHookFunctionBackward is a view "
"and is being modified inplace."):
out += 1
def test_hook_non_full_warning(self):
def noop(*args):
pass
a = torch.rand(2, requires_grad=True)
b = torch.rand(2, requires_grad=True)
class MyModule(nn.Module):
def forward(self, l):
return l[0].clone(), l[1].clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "does not take as input a single Tensor or a tuple of Tensors"):
m([a, b])
class MyModule(nn.Module):
def forward(self, a, b):
return [a.clone(), b.clone()]
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "does not return a single Tensor or a tuple of Tensors"):
m(a, b)
class MyModule(nn.Module):
def forward(self, a, b):
return a.clone(), b.clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "outputs are generated by different autograd Nodes"):
m(a, b)
class MyModule(nn.Module):
def forward(self, a):
return a.clone().clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "the forward contains multiple autograd Nodes"):
m(a)
def test_hook_backward_size(self):
class MyModule(nn.Module):
def forward(self, arg1, arg2):
tmp = arg1.sum() * arg2
tmp = tmp + arg2.sum() * arg1.sum()
tmp = tmp.sum().view(1)
tmp = tmp.expand(8).contiguous()
return tmp
module = MyModule()
inp1 = torch.randn(5, 5, requires_grad=True)
inp2 = torch.randn(10, 10, requires_grad=True)
def bw_hook(module, grad_input, grad_output):
self.assertEqual(len(grad_input), 2)
self.assertEqual(grad_input[0].size(), torch.Size([5, 5]))
self.assertEqual(grad_input[1].size(), torch.Size([10, 10]))
self.assertEqual(len(grad_output), 1)
self.assertEqual(grad_output[0].size(), torch.Size([8]))
with module.register_full_backward_hook(bw_hook):
module(inp1, inp2).sum().backward()
def test_hook_backward_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.nn.functional.sigmoid(input)
def bw_hook(module, grad_input, grad_output):
for grad in grad_input:
self.assertTrue(isinstance(grad, torch.Tensor))
for grad in grad_output:
self.assertTrue(isinstance(grad, torch.Tensor))
return tuple(gi * 2 for gi in grad_input)
module.register_backward_hook(bw_hook)
module(input).backward(torch.ones(5, 5))
expected_grad = sig_x * (1 - sig_x) * 2
self.assertEqual(input.grad, expected_grad)
def test_hook_forward_preforward_writable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.nn.functional.sigmoid(input)
def forward_pre_hook(m, input):
return torch.nn.functional.relu(input[0])
def forward_hook(m, input, output):
return -output
module.register_forward_pre_hook(forward_pre_hook)
module.register_forward_hook(forward_hook)
output = module(input)
expected_res = -torch.nn.functional.sigmoid(torch.nn.functional.relu(input))
self.assertEqual(output, expected_res)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
mask = (input > 0).double()
expected_grad = -sig_x * (1 - sig_x) * 2 * mask
self.assertEqual(input.grad, expected_grad)
def test_to(self):
m = nn.Linear(3, 5)
self.assertIs(m, m.to('cpu'))
self.assertIs(m, m.to('cpu', dtype=torch.float32))
self.assertEqual(m.double(), m.to(torch.float64))
self.assertRaises(RuntimeError, lambda: m.to('cpu', copy=True))
if torch.cuda.is_available():
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
m2 = m.cuda(device=cuda)
self.assertIs(m2, m2.to(cuda))
self.assertEqual(m, m2.to('cpu'))
self.assertEqual(m2, m.to(cuda))
self.assertIs(m2, m2.to(dtype=torch.float32))
self.assertEqual(m2.double(), m2.to(dtype=torch.float64))
def test_zero_grad(self):
i = torch.randn(2, 5, requires_grad=True)
module = nn.Linear(5, 5)
for p in module.parameters():
p.requires_grad = False
module.zero_grad()
module.weight.requires_grad = True
module.zero_grad()
self.assertIsNone(module.weight.grad)
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
module.bias.requires_grad = True
module.zero_grad()
self.assertIsNotNone(module.weight.grad)
self.assertIsNone(module.bias.grad)
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertIsNotNone(module.bias.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
self.assertGreater(module.bias.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
self.assertEqual(module.bias.grad.data, module.bias.data.clone().zero_())
module.zero_grad(set_to_none=True)
self.assertIsNone(module.weight.grad)
def test_no_grad(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = nn.Conv2d(2, 5, kernel_size=3, padding=1).to(dtype)
input = torch.randn(1, 2, 10, 10).to(dtype)
x = input
y = input.clone()
output = module(x)
self.assertTrue(output.requires_grad)
output.backward(torch.ones(1, 5, 10, 10))
with torch.no_grad():
output2 = module(y)
self.assertFalse(output2.requires_grad)
self.assertRaises(RuntimeError, lambda: output2.backward(torch.ones(1, 5, 10, 10)))
def test_invalid_conv1d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(4\). ' +
r'Kernel size: \(10\). Kernel size can\'t be greater than actual input size'):
module(input)
# Negative stride check
module = nn.Conv1d(in_channels=3, out_channels=6, kernel_size=3, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_mismatch_shape_conv2d(self):
x = torch.randn(1, 10, 1, 28, 28)
w = torch.randn(6, 1, 5, 5)
with self.assertRaisesRegex(RuntimeError,
r'Expected 3D \(unbatched\) or 4D \(batched\) input to conv2d, but got ' +
r'input of size: \[1, 10, 1, 28, 28\]'):
F.conv2d(x, w)
def test_conv2d_discontiguous_weight(self):
# Test for https://github.com/pytorch/pytorch/issues/55781
x = torch.ones(64, 16, 16, 16)
weight = torch.arange(0, 1.0, 1 / 2.0 ** 10).reshape(32, 16, 1, 2)[:, :, :, ::2]
self.assertFalse(weight.is_contiguous())
y = torch.nn.functional.conv2d(x, weight, None)
if torch.backends.mkldnn.is_available():
# Disable MKLDNN explicitly, so that either NNPACK or THCNN will be used
with torch.backends.mkldnn.flags(enabled=False):
y_ = torch.nn.functional.conv2d(x, weight, None)
self.assertEqual(y, y_)
self.assertEqual(y.sum(), 4186112.)
def test_invalid_conv2d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = torch.nn.Conv2d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)
input = torch.empty(1, 1, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True)
input = torch.randn(1, 3, 1, 1)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(1 x 1\). ' +
r'Kernel size: \(10 x 10\). Kernel size can\'t be greater than actual input size'):
module(input)
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=0, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_invalid_conv3d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = torch.nn.Conv3d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)
input = torch.empty(1, 1, 4, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
module = torch.nn.Conv3d(1, 1, kernel_size=3, stride=-2)
input = torch.empty(1, 1, 4, 4, 4)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_Conv1d_module_same_padding(self):
x = torch.rand(1, 1, 20)
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same')
expect = F.conv1d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same', dilation=2)
expect = F.conv1d(x, module.weight, module.bias, padding='same', dilation=2)
self.assertEqual(expect, module(x))
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same', padding_mode='replicate')
x_padded = F.pad(x, [4, 5], mode='replicate')
expect = F.conv1d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
def test_Conv2d_module_same_padding(self):
x = torch.rand(1, 1, 9, 20)
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(5, 10),
padding='same')
expect = F.conv2d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 4),
padding='same', dilation=(1, 2))
expect = F.conv2d(x, module.weight, module.bias, padding='same', dilation=(1, 2))
self.assertEqual(expect, module(x))
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 4),
padding='same', padding_mode='reflect')
x_padded = F.pad(x, [1, 2, 1, 1], mode='reflect')
expect = F.conv2d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 3))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(4, 1))
def test_Conv3d_module_same_padding(self):
x = torch.rand(1, 1, 4, 4, 4)
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same')
expect = F.conv3d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same', dilation=(3, 2, 1))
expect = F.conv3d(x, module.weight, module.bias, padding='same', dilation=(3, 2, 1))
self.assertEqual(expect, module(x))
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same', padding_mode='circular')
x_padded = F.pad(x, [1, 2, 1, 1, 0, 1], mode='circular')
expect = F.conv3d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv3d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 1, 3))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 4, 1))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(5, 1, 1))
def _test_alpha_dropout(self, cls, input):
mean = input.mean()
std = input.std()
for p in [0.2, 0.5, 0.8]:
module = cls(p)
input_var = input.detach().clone().requires_grad_()
output = module(input_var)
self.assertLess(abs(output.data.mean() - mean), 0.1)
self.assertLess(abs(output.data.std() - std), 0.1)
output.backward(input)
def test_parameters_and_named_parameters(self):
def names(named_parameters):
return [k for k, _ in named_parameters]
l, n, s = self._create_basic_net()
self.assertEqual(len(list(l.parameters())), 1)
self.assertEqual(
names(l.named_parameters()),
['layer_dummy_param'])
self.assertEqual(len(list(n.parameters())), 2)
self.assertEqual(
names(n.named_parameters()),
['dummy_param', 'l1.layer_dummy_param'])
self.assertEqual(len(list(n.parameters(recurse=False))), 1)
self.assertEqual(
names(n.named_parameters(recurse=False)),
['dummy_param'])
self.assertEqual(len(list(s.parameters())), 2)
self.assertEqual(
names(s.named_parameters()),
['0.dummy_param', '0.l1.layer_dummy_param'])
def test_buffers_and_named_buffers(self):
def names(named_buffers):
return [k for k, _ in named_buffers]
l, n, s = self._create_basic_net()
self.assertEqual(len(list(l.buffers())), 1)
self.assertEqual(
names(l.named_buffers()),
['layer_dummy_buf'])
self.assertEqual(len(list(n.buffers())), 2)
self.assertEqual(
names(n.named_buffers()),
['dummy_buf', 'l1.layer_dummy_buf'])
self.assertEqual(len(list(n.buffers(recurse=False))), 1)
self.assertEqual(
names(n.named_buffers(recurse=False)),
['dummy_buf'])
self.assertEqual(len(list(s.buffers())), 2)
self.assertEqual(
names(s.named_buffers()),
['0.dummy_buf', '0.l1.layer_dummy_buf'])
def test_call_supports_python_dict_output(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = nn.Linear(10, 20)
self.register_backward_hook(self.hook)
self.check_backward_hook_flag = False
def hook(self, module, grad_out, grad_in):
self.check_backward_hook_flag = True
def forward(self, inputs):
return {"output": self.l1(inputs).sum()}
net = Net()
model_output = net(torch.randn([5, 10]))
model_output["output"].backward()
self.assertTrue(net.check_backward_hook_flag)
def test_children(self):
l1 = nn.Linear(2, 2)
l2 = nn.Linear(2, 2)
l3 = nn.Linear(2, 2)
l4 = nn.Linear(2, 2)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(l1, l2, l1, l2, subnet)
self.assertEqual(list(s.children()), [l1, l2, subnet])
def test_train_errors_for_invalid_mode(self):
class SubclassNet(nn.Module):
def __init__(self):
super(SubclassNet, self).__init__()
self.l1 = nn.Linear(2, 2)
def forward(self, inputs):
return self.l1(inputs)
subclass_net = SubclassNet()
sequential_net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
error_modes = ["invalid_str", torch.device('cpu')]
modules_to_check = [subclass_net, sequential_net]
for error_mode, module in itertools.product(error_modes, modules_to_check):
with self.assertRaises(ValueError):
module.train(error_mode)
def test_dir(self):
linear = nn.Linear(2, 2)
linear._test_submodule = nn.Linear(2, 2)
linear._test_parameter = Parameter(torch.empty(2, 2))
linear.register_buffer('_test_buffer', torch.empty(2, 2))
keys = dir(linear)
self.assertIn('_test_submodule', keys)
self.assertIn('_test_parameter', keys)
self.assertIn('_test_buffer', keys)
for key in keys:
self.assertTrue(hasattr(linear, key))
def test_repr(self):
empty_sequential = nn.Sequential()
expected_repr_empty = 'Sequential()'
self.assertEqual(repr(empty_sequential), expected_repr_empty)
linear = nn.Linear(1, 1)
expected_repr_linear = 'Linear(in_features=1, out_features=1, bias=True)'
self.assertEqual(repr(linear), expected_repr_linear)
sequential = nn.Sequential(linear)
expected_repr_sequential = 'Sequential(\n' \
' (0): Linear(in_features=1, out_features=1, bias=True)\n' \
')'
self.assertEqual(repr(sequential), expected_repr_sequential)
def test_dir_digit(self):
model = nn.Sequential(nn.Linear(2, 2))
keys = dir(model)
self.assertNotIn('0', keys)
def test_named_children(self):
l1 = nn.Linear(2, 2)
l2 = nn.Linear(2, 2)
l3 = nn.Linear(2, 2)
l4 = nn.Linear(2, 2)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential()
with self.assertRaises(KeyError):
s.add_module('', l1)
with self.assertRaises(KeyError):
s.add_module('name.with.dot', l1)
s.add_module('layer1', l1)
s.add_module('layer2', l2)
s.add_module('layer3', l1)
s.add_module('layer4', l2)
s.add_module('subnet', subnet)
self.assertEqual(list(s.named_children()), [('layer1', l1), ('layer2', l2), ('subnet', subnet)])
def test_modules(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = torch.empty(3, 5)
l = nn.Linear(10, 20)
n = Net()
s = nn.Sequential(n, n, n, n)
self.assertEqual(list(s.modules()), [s, n, l])
def test_named_modules(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = torch.empty(3, 5)
self.block = block
l = nn.Linear(10, 20)
l1 = nn.Linear(10, 20)
l2 = nn.Linear(10, 20)
block = nn.Sequential()
block.add_module('linear1', l1)
block.add_module('linear2', l2)
n = Net()
s = nn.Sequential(n, n)
self.assertEqual(list(s.named_modules()), [('', s), ('0', n), ('0.l1', l),
('0.block', block), ('0.block.linear1', l1),
('0.block.linear2', l2)])
self.assertEqual(list(s.named_modules(remove_duplicate=False)), [
('', s), ('0', n), ('0.l1', l), ('0.l2', l),
('0.block', block), ('0.block.linear1', l1),
('0.block.linear2', l2),
('1', n), ('1.l1', l), ('1.l2', l),
('1.block', block), ('1.block.linear1', l1),
('1.block.linear2', l2)])
def test_register_buffer_raises_error_if_name_is_not_string(self):
m = nn.Module()
expected_error = 'buffer name should be a string. Got '
with self.assertRaisesRegex(TypeError, expected_error + 'int'):
m.register_buffer(1, torch.rand(5))
with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):
m.register_buffer(None, torch.rand(5))
def test_register_buffer_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
del m.attribute_name
m.register_parameter('attribute_name', nn.Parameter())
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
del m.attribute_name
m.add_module('attribute_name', nn.Module())
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
def test_register_buffer_raises_error_if_not_tensor(self):
m = nn.Module()
with self.assertRaises(TypeError):
m.register_buffer('attribute_name', 5)
def test_register_buffer_allows_overwriting_with_same_name(self):
m = nn.Module()
buffer1 = torch.rand(5)
buffer2 = buffer1 + 5
buffer3 = None
m.register_buffer('buffer_name', buffer1)
self.assertEqual(m.buffer_name, buffer1)
m.register_buffer('buffer_name', buffer2)
self.assertEqual(m.buffer_name, buffer2)
m.register_buffer('buffer_name', buffer3)
self.assertEqual(m.buffer_name, buffer3)
def test_get_buffer(self):
m = nn.Module()
buffer1 = torch.randn(2, 3)
buffer2 = torch.randn(4, 5)
m.register_buffer('foo', buffer1)
m.register_buffer('bar', buffer2)
self.assertEqual(buffer1, m.get_buffer('foo'))
self.assertEqual(buffer2, m.get_buffer('bar'))
def test_get_buffer_from_submodules(self):
class MyModule(nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.sub = Sub(foo, bar)
class Sub(nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.register_buffer('foo', foo)
self.subsub = SubSub(bar)
class SubSub(nn.Module):
def __init__(self, bar):
super().__init__()
self.register_buffer('bar', bar)
foo = torch.randn(2, 3)
bar = torch.randn(4, 5)
m = MyModule(foo, bar)
self.assertEqual(foo, m.get_buffer('sub.foo'))
self.assertEqual(bar, m.get_buffer('sub.subsub.bar'))
def test_buffer_not_persistent(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
def test_buffer_not_persistent_del(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
del m.buf
self.assertTrue(len(list(m.buffers())) == 0)
def test_buffer_not_persistent_overwrite(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
m.register_buffer('buf', torch.rand(5))
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 1)
m.register_buffer('buf', torch.rand(5), persistent=False)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
def test_buffer_not_persistent_assign(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
m.buf = None
self.assertTrue(len(list(m.buffers())) == 0)
self.assertTrue(len(m.state_dict()) == 0)
m.buf = torch.rand(5)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
m.buf = nn.Parameter(torch.rand(5))
self.assertTrue(len(list(m.buffers())) == 0)
self.assertTrue(len(m.state_dict()) == 1)
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_load_state_dict_invalid(self):
m = torch.nn.Linear(2, 2, bias=False)
state_dict = {'weight': np.random.randn(2, 2)}
with self.assertRaisesRegex(RuntimeError,
"expected torch.Tensor or Tensor-like object from checkpoint but received"):
m.load_state_dict(state_dict)
state_dict = {'weight': ((1., 1.), (2., 2.))}
with self.assertRaisesRegex(RuntimeError,
"expected torch.Tensor or Tensor-like object from checkpoint but received"):
m.load_state_dict(state_dict)
def test_buffer_not_persistent_load(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
m.load_state_dict({})
def test_register_parameter_raises_error_if_name_is_not_string(self):
m = nn.Module()
expected_error = 'parameter name should be a string. Got '
with self.assertRaisesRegex(TypeError, expected_error + 'int'):
m.register_parameter(1, nn.Parameter())
with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):
m.register_parameter(None, nn.Parameter())
def test_register_parameter_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
del m.attribute_name
m.register_buffer('attribute_name', torch.rand(5))
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
del m.attribute_name
m.add_module('attribute_name', nn.Module())
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
def test_register_parameter_allows_overwriting_with_same_name(self):
m = nn.Module()
param1 = nn.Parameter(torch.rand(5))
param2 = nn.Parameter(param1.data + 5)
param3 = None
m.register_parameter('param_name', param1)
self.assertEqual(m.param_name, param1)
m.register_parameter('param_name', param2)
self.assertEqual(m.param_name, param2)
m.register_parameter('param_name', param3)
self.assertEqual(m.param_name, param3)
def test_add_module_raises_error_if_attr_exists(self):
methods_to_test = ['add_module', 'register_module']
for fn in methods_to_test:
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
getattr(m, fn)('attribute_name', nn.Module())
del m.attribute_name
m.register_buffer('attribute_name', torch.rand(5))
with self.assertRaises(KeyError):
getattr(m, fn)('attribute_name', nn.Module())
del m.attribute_name
m.register_parameter('attribute_name', nn.Parameter())
with self.assertRaises(KeyError):
getattr(m, fn)('attribute_name', nn.Module())
@unittest.expectedFailure
def test_getattr_with_property(self):
class Model(nn.Module):
@property
def some_property(self):
return self.something_that_doesnt_exist
model = Model()
with self.assertRaisesRegex(
AttributeError,
r"'Model' object has no attribute 'something_that_doesnt_exist'"):
model.some_property
def test_Sequential_getitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
self.assertIs(n[0], l1)
self.assertIs(n[1], l2)
self.assertIs(n[2], l3)
self.assertIs(n[3], l4)
self.assertIs(n[torch.tensor(3, dtype=torch.int64)], l4)
self.assertEqual(n[1:], nn.Sequential(l2, l3, l4))
self.assertEqual(n[3:], nn.Sequential(l4))
self.assertEqual(n[:-1], nn.Sequential(l1, l2, l3))
self.assertEqual(n[:-3], nn.Sequential(l1))
self.assertEqual(n[::-1], nn.Sequential(l4, l3, l2, l1))
def test_Sequential_setitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3)
n[0] = l4
n[-1] = l4
n[torch.tensor(1, dtype=torch.int16)] = l1
self.assertIs(n[0], l4)
self.assertIs(n[1], l1)
self.assertIs(n[2], l4)
def test_Sequential_setitem_named(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(OrderedDict([
('linear1', l1),
('linear2', l2),
('linear3', l3),
]))
n[0] = l4
n[-1] = l4
self.assertEqual(n.linear1, l4)
self.assertEqual(n.linear3, l4)
def test_Sequential_delitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
del n[-1]
self.assertEqual(n, nn.Sequential(l1, l2, l3))
del n[1::2]
self.assertEqual(n, nn.Sequential(l1, l3))
def test_Sequential_append(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3)
n2 = n.append(l4)
self.assertEqual(n, nn.Sequential(l1, l2, l3, l4))
self.assertEqual(n2, nn.Sequential(l1, l2, l3, l4))
self.assertEqual(nn.Sequential(l1).append(l2).append(l4), nn.Sequential(l1, l2, l4))
def test_ModuleList(self):
modules = [nn.ReLU(), nn.Linear(5, 5)]
module_list = nn.ModuleList(modules)
def check():
self.assertEqual(len(module_list), len(modules))
for m1, m2 in zip(modules, module_list):
self.assertIs(m1, m2)
for m1, m2 in zip(modules, module_list.children()):
self.assertIs(m1, m2)
for i in range(len(modules)):
self.assertIs(module_list[i], modules[i])
check()
modules += [nn.Conv2d(3, 4, 3)]
module_list += [modules[-1]]
check()
modules = modules + [nn.Conv2d(3, 4, 3, bias=False), nn.GELU()]
module_list = module_list + nn.ModuleList(modules[-2:])
check()
modules.insert(1, nn.Linear(3, 2))
module_list.insert(1, modules[1])
check()
modules.append(nn.Tanh())
module_list.append(modules[-1])
check()
next_modules = [nn.Linear(5, 5), nn.Sigmoid()]
modules.extend(next_modules)
module_list.extend(next_modules)
check()
modules[2] = nn.Conv2d(5, 3, 2)
module_list[2] = modules[2]
check()
modules[-1] = nn.Conv2d(5, 2, 1)
module_list[-1] = modules[-1]
check()
idx = torch.tensor(2, dtype=torch.int32)
modules[2] = nn.Conv2d(5, 3, 2)
module_list[idx] = modules[2]
self.assertIs(module_list[idx], modules[2])
check()
self.assertEqual(module_list[1:], nn.ModuleList(modules[1:]))
self.assertEqual(module_list[3:], nn.ModuleList(modules[3:]))
self.assertEqual(module_list[:-1], nn.ModuleList(modules[:-1]))
self.assertEqual(module_list[:-3], nn.ModuleList(modules[:-3]))
self.assertEqual(module_list[::-1], nn.ModuleList(modules[::-1]))
del module_list[-1]
self.assertEqual(module_list, nn.ModuleList(modules[:-1]))
del module_list[1::2]
self.assertEqual(module_list, nn.ModuleList(modules[:-1][0::2]))
with self.assertRaises(TypeError):
module_list += nn.ReLU()
with self.assertRaises(TypeError):
module_list.extend(nn.ReLU())
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 2)
l4 = nn.Linear(2, 3)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(
OrderedDict([
("layer1", l1),
("layer2", l2),
("layer3", l3),
("layer4", l4),
("subnet_layer", subnet)
])
)
modules = list(s.modules())
module_list = nn.ModuleList()
module_list.extend(s.modules())
check()
self.assertRaises(NotImplementedError, module_list)
self.assertRaises(NotImplementedError, module_list, torch.rand(1, 3))
def test_ModuleDict(self):
modules = OrderedDict([
('act', nn.ReLU()),
('conv', nn.Conv2d(10, 10, 5)),
('fc', nn.Linear(5, 5)),
])
module_dict = nn.ModuleDict(modules)
def check():
self.assertEqual(len(module_dict), len(modules))
for k1, m2 in zip(modules, module_dict.children()):
self.assertIs(modules[k1], m2)
for k1, k2 in zip(modules, module_dict):
self.assertIs(modules[k1], module_dict[k2])
for k in module_dict:
self.assertIs(module_dict[k], modules[k])
for k in module_dict.keys():
self.assertIs(module_dict[k], modules[k])
for k, v in module_dict.items():
self.assertIs(modules[k], v)
for k1, m2 in zip(modules, module_dict.values()):
self.assertIs(modules[k1], m2)
for k in modules.keys():
self.assertTrue(k in module_dict)
check()
modules['conv'] = nn.Conv2d(3, 4, 3)
module_dict['conv'] = modules['conv']
check()
next_modules = [
('fc2', nn.Linear(5, 5)),
('act', nn.Sigmoid()),
]
modules.update(next_modules)
module_dict.update(next_modules)
check()
next_modules = OrderedDict([
('fc3', nn.Linear(5, 5)),
('act2', nn.Sigmoid()),
])
modules.update(next_modules)
module_dict.update(next_modules)
check()
next_modules = {
'fc4': nn.Linear(5, 5),
'act3': nn.Sigmoid()
}
modules.update(next_modules.items())
module_dict.update(next_modules)
check()
next_modules = nn.ModuleDict([
('fc5', nn.Linear(5, 5)),
('act4', nn.Sigmoid()),
])
modules.update(next_modules)
module_dict.update(next_modules)
check()
del module_dict['fc']
del modules['fc']
check()
with self.assertRaises(TypeError):
module_dict.update(nn.ReLU())
with self.assertRaises(TypeError):
module_dict.update([nn.ReLU()])
with self.assertRaises(ValueError):
module_dict.update([[nn.ReLU()]])
with self.assertRaises(TypeError):
module_dict[1] = nn.ReLU()
s = nn.Sequential(modules)
module_dict = nn.ModuleDict(s.named_children())
check()
c = module_dict.pop('conv')
self.assertIs(c, modules['conv'])
modules.pop('conv')
check()
module_dict.clear()
self.assertEqual(len(module_dict), 0)
modules.clear()
check()
self.assertRaises(NotImplementedError, module_dict)
self.assertRaises(NotImplementedError, module_dict, torch.rand(1, 3))
def test_ParameterList(self):
def make_param():
return Parameter(torch.randn(10, 10))
parameters = [make_param(), make_param()]
param_list = nn.ParameterList(parameters)
def check():
self.assertEqual(len(parameters), len(param_list))
for p1, p2 in zip(parameters, param_list):
self.assertIs(p1, p2)
for p1, p2 in zip(parameters, param_list.parameters()):
self.assertIs(p1, p2)
for i in range(len(parameters)):
self.assertIs(parameters[i], param_list[i])
check()
parameters += [make_param()]
param_list += [parameters[-1]]
check()
parameters.append(make_param())
param_list.append(parameters[-1])
check()
next_params = [make_param(), make_param()]
parameters.extend(next_params)
param_list.extend(next_params)
check()
parameters[2] = make_param()
param_list[2] = parameters[2]
check()
parameters[-1] = make_param()
param_list[-1] = parameters[-1]
check()
idx = torch.tensor(2, dtype=torch.int32)
parameters[2] = make_param()
param_list[idx] = parameters[2]
self.assertIs(param_list[idx], parameters[2])
check()
self.assertEqual(param_list[1:], nn.ParameterList(parameters[1:]))
self.assertEqual(param_list[3:], nn.ParameterList(parameters[3:]))
self.assertEqual(param_list[:-1], nn.ParameterList(parameters[:-1]))
self.assertEqual(param_list[:-3], nn.ParameterList(parameters[:-3]))
self.assertEqual(param_list[::-1], nn.ParameterList(parameters[::-1]))
with self.assertRaises(TypeError):
param_list += make_param()
with self.assertRaises(TypeError):
param_list.extend(make_param())
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 2)
l4 = nn.Linear(2, 3)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(
OrderedDict([
("layer1", l1),
("layer2", l2),
("layer3", l3),
("layer4", l4),
("subnet_layer", subnet)
])
)
parameters = list(s.parameters())
param_list = nn.ParameterList()
param_list.extend(s.parameters())
check()
def test_ParameterDict(self):
parameters = OrderedDict([
('p1', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
('p3', Parameter(torch.randn(10, 10))),
])
parameter_dict = nn.ParameterDict(parameters)
def check():
self.assertEqual(len(parameter_dict), len(parameters))
for k1, m2 in zip(parameters, parameter_dict.parameters()):
self.assertIs(parameters[k1], m2)
for k1, k2 in zip(parameters, parameter_dict):
self.assertIs(parameters[k1], parameter_dict[k2])
for k in parameter_dict:
self.assertIs(parameter_dict[k], parameters[k])
for k in parameter_dict.keys():
self.assertIs(parameter_dict[k], parameters[k])
for k, v in parameter_dict.items():
self.assertIs(v, parameters[k])
for k1, m2 in zip(parameters, parameter_dict.values()):
self.assertIs(parameters[k1], m2)
for k in parameters.keys():
self.assertTrue(k in parameter_dict)
check()
parameters['p4'] = Parameter(torch.randn(10, 10))
parameter_dict['p4'] = parameters['p4']
check()
next_parameters = [
('p5', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
]
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
next_parameters = OrderedDict([
('p6', Parameter(torch.randn(10, 10))),
('p5', Parameter(torch.randn(10, 10))),
])
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
next_parameters = {
'p8': Parameter(torch.randn(10, 10)),
'p7': Parameter(torch.randn(10, 10))
}
parameters.update(sorted(next_parameters.items()))
parameter_dict.update(next_parameters)
check()
next_parameters = nn.ParameterDict([
('p10', Parameter(torch.randn(10, 10))),
('p9', Parameter(torch.randn(10, 10))),
])
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
del parameter_dict['p3']
del parameters['p3']
check()
with self.assertRaises(TypeError):
parameter_dict.update(1)
with self.assertRaises(TypeError):
parameter_dict.update([1])
with self.assertRaises(ValueError):
parameter_dict.update(Parameter(torch.randn(10, 10)))
with self.assertRaises(TypeError):
parameter_dict[1] = Parameter(torch.randn(10, 10))
p_pop = parameter_dict.pop('p4')
self.assertIs(p_pop, parameters['p4'])
parameters.pop('p4')
check()
forward = list(iter(parameter_dict))
backward = list(reversed(parameter_dict))
self.assertEqual(len(forward), len(backward))
n = len(forward)
for i in range(n):
self.assertIs(forward[i], backward[n - i - 1])
check()
copy = parameter_dict.copy()
for key in parameter_dict:
self.assertTrue(key in copy)
self.assertEqual(parameter_dict[key], copy[key])
self.assertIs(parameter_dict[key], copy[key])
check()
parameter_dict["p20"] = Parameter(torch.randn(10, 10))
copy["p21"] = Parameter(torch.randn(9, 10))
self.assertTrue("p20" in parameter_dict)
self.assertFalse("p20" in copy)
self.assertFalse("p21" in parameter_dict)
self.assertTrue("p21" in copy)
parameter_dict.pop("p20")
check()
p = Parameter(torch.randn(10, 10))
parameter_dict['p12'] = p
p_popitem = parameter_dict.popitem()
self.assertEqual(p_popitem[0], 'p12')
self.assertIs(p_popitem[1], p)
assert 'p11' not in parameter_dict
parameters['p11'] = Parameter(torch.randn(10, 10))
p_setdefault = parameter_dict.setdefault('p11', parameters['p11'])
self.assertIs(p_setdefault, parameters['p11'])
p = Parameter(torch.randn(10, 10))
self.assertFalse(parameter_dict.setdefault('p11', p) is p)
self.assertIs(parameter_dict.setdefault('p26'), None)
del parameter_dict['p26']
check()
parameters2 = OrderedDict([
('p13', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
('p3', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict |= parameter_dict2
check()
parameters2 = OrderedDict()
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict |= parameter_dict2
check()
parameters2 = OrderedDict([
('p14', Parameter(torch.randn(10, 10))),
('p15', Parameter(torch.randn(10, 10))),
('p13', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict |= parameter_dict2
check()
parameters2 = OrderedDict([
('p20', Parameter(torch.randn(10, 10))),
('p21', Parameter(torch.randn(10, 10))),
('p22', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict = parameter_dict | parameter_dict2
check()
parameters2 = OrderedDict([
('p23', Parameter(torch.randn(10, 10))),
('p24', Parameter(torch.randn(10, 10))),
('p25', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters2.update(parameters)
parameters = parameters2
parameter_dict = parameter_dict2 | parameter_dict
check()
parameters['p17'] = Parameter(torch.randn(10, 10))
parameter_dict['p17'] = parameters['p17']
self.assertIs(parameters['p17'], parameter_dict.get('p17'))
temp_param = Parameter(torch.randn(10, 10))
self.assertIs(parameters['p17'], parameter_dict.get('p17', temp_param))
self.assertIs(None, parameter_dict.get('p18'))
self.assertIs(temp_param, parameter_dict.get('p18', temp_param))
check()
parameter_dict.clear()
self.assertEqual(len(parameter_dict), 0)
parameters.clear()
check()
parameter_dict2 = parameter_dict.fromkeys(['p19', 'p20'])
self.assertEqual({'p19': None, 'p20': None}, parameter_dict2)
check()
parameter_dict2 = parameter_dict.fromkeys(['p19', 'p20'], temp_param)
self.assertEqual({'p19': temp_param, 'p20': temp_param}, parameter_dict2)
check()
def test_add_module(self):
methods_to_test = ['add_module', 'register_module']
for fn in methods_to_test:
l = nn.Linear(10, 20)
net = nn.Module()
net.l = l
net.l2 = l
getattr(net, fn)('empty', None)
self.assertEqual(net.l, l)
self.assertEqual(net.l2, l)
self.assertEqual(net.empty, None)
getattr(net, fn)('l3', l)
self.assertEqual(net.l3, l)
l3 = nn.Linear(20, 10)
getattr(net, fn)('l', l3)
self.assertEqual(net.l, l3)
self.assertRaises(TypeError, lambda: getattr(net, fn)('x', 'non-module'))
self.assertRaisesRegex(TypeError, 'module name should be a string. Got int',
lambda: getattr(net, fn)(1, l))
self.assertRaisesRegex(TypeError, 'module name should be a string. Got NoneType',
lambda: getattr(net, fn)(None, l))
def test_module_to_argparse(self):
net = nn.Sequential(nn.Linear(3, 3))
cpu = torch.device('cpu')
with self.assertRaises(TypeError):
net.to(cpu, True)
with self.assertRaises(TypeError):
net.to(torch.long)
with self.assertRaises(TypeError):
net.to(None, True)
with self.assertRaises(TypeError):
net.to(cpu, torch.long, True)
with self.assertRaises(TypeError):
net.to(cpu, dtype=torch.long, non_blocking=True)
with self.assertRaises(TypeError):
net.to([])
with self.assertRaises(TypeError):
net.to({}, non_blocking=True)
with self.assertRaises(TypeError):
net.to(torch.tensor(3, dtype=torch.long), non_blocking=True)
with self.assertRaises(TypeError):
net.to(cpu, torch.tensor(3, dtype=torch.long), non_blocking=True)
def test_RNN_nonlinearity(self):
rnn = torch.nn.RNN(1, 10)
self.assertEqual(rnn.nonlinearity, 'tanh')
rnn = torch.nn.RNN(1, 10, nonlinearity='relu')
self.assertEqual(rnn.nonlinearity, 'relu')
with self.assertRaisesRegex(ValueError, 'Unknown nonlinearity'):
rnn = torch.nn.RNN(1, 10, nonlinearity='garbage')
def test_module_apply_inplace_op(self):
def add_one_inplace(t):
return t.add_(1.0)
m = nn.Linear(20, 10)
pvm = m.weight.mul(m.weight)
m_weight_version_saved = m.weight._version
m = m._apply(add_one_inplace)
self.assertGreater(m.weight._version, m_weight_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pvm.backward(torch.randn(10, 20))
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
pgm = m.weight.grad.mul(m.weight.grad)
m_weight_grad_version_saved = m.weight.grad._version
m = m._apply(add_one_inplace)
self.assertGreater(m.weight.grad._version, m_weight_grad_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pgm.backward(torch.randn(10, 20))
def test_overwrite_module_params_on_conversion(self):
# Test that if the conversion function passed to `module._apply()`
# changes the TensorImpl type of `module`'s parameters, the `module`'s
# parameters are always overwritten, regardless of the value of
# `torch.__future__.get_overwrite_module_params_on_conversion()`.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20)
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m = m._apply(lambda t: torch.sparse_coo_tensor(torch.zeros([2, 1]), torch.ones([1]), torch.Size([10, 20])))
self.assertNotEqual(weight_ref.layout, m.weight.layout)
self.assertNotEqual(weight_grad_ref.layout, m.weight.grad.layout)
# Test that under the current default settings
# (`torch.__future__.get_overwrite_module_params_on_conversion() == False`),
# a view to a module's parameters is not pointing to the same storage as
m = nn.Linear(20, 10).float()
mw = m.weight[:]
m.double()
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0].dtype == torch.float)
self.assertTrue(mw._base[0][0].dtype == torch.double)
try:
torch.__future__.set_overwrite_module_params_on_conversion(True)
# its base variable after converting the module to a different dtype.
m = nn.Linear(20, 10).float()
mw = m.weight[:]
m.double()
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0] == mw._base[0][0])
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# `float_module.double()` doesn't preserve previous references to
m = nn.Linear(20, 10).float()
m.weight.grad = torch.randn(10, 20).float()
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m.double()
self.assertNotEqual(weight_ref.dtype, m.weight.dtype)
self.assertNotEqual(weight_grad_ref.dtype, m.weight.grad.dtype)
def add_one_inplace(t):
return t.add_(1.0)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an in-place operation to a module would bump the module's
m = nn.Linear(20, 10)
pvm = m.weight.mul(m.weight)
weight_ref = m.weight
m_weight_version_saved = weight_ref._version
m = m._apply(add_one_inplace)
# Test that the in-place operation bumps the original parameter's version counter
self.assertGreater(weight_ref._version, m_weight_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pvm.backward(torch.randn(10, 20))
# original parameters' gradients' version counter.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
pgm = m.weight.grad.mul(m.weight.grad)
weight_grad_ref = m.weight.grad
m_weight_grad_version_saved = weight_grad_ref._version
m = m._apply(add_one_inplace)
self.assertGreater(weight_grad_ref._version, m_weight_grad_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pgm.backward(torch.randn(10, 20))
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an out-of-place operation to a module doesn't bump
m = nn.Linear(20, 10)
weight_ref = m.weight
m_weight_version_saved = weight_ref._version
m = m._apply(lambda t: torch.randn(t.shape))
self.assertEqual(weight_ref._version, m_weight_version_saved)
# the module's original parameters' gradients' version counter.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
weight_grad_ref = m.weight.grad
m_weight_grad_version_saved = weight_grad_ref._version
m = m._apply(lambda t: torch.randn(t.shape))
self.assertEqual(weight_grad_ref._version, m_weight_grad_version_saved)
finally:
torch.__future__.set_overwrite_module_params_on_conversion(False)
def test_type(self):
l = nn.Linear(10, 20)
net = nn.Module()
net.l = l
net.l2 = l
net.add_module('empty', None)
net.register_buffer('indices', torch.LongTensor(1))
net.float()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.double()
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to(torch.half)
self.assertIsInstance(l.weight.data, torch.HalfTensor)
self.assertIsInstance(l.bias.data, torch.HalfTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
if TEST_CUDA:
net.float().cuda()
self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)
self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.cpu()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to("cuda", torch.double, True)
self.assertIsInstance(l.weight.data, torch.cuda.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.cuda.DoubleTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.to(torch.empty(1, device="cuda:0", dtype=torch.half))
self.assertIsInstance(l.weight.data, torch.cuda.HalfTensor)
self.assertIsInstance(l.bias.data, torch.cuda.HalfTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.to(torch.device("cpu"), non_blocking=True)
self.assertIsInstance(l.weight.data, torch.HalfTensor)
self.assertIsInstance(l.bias.data, torch.HalfTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to(torch.float)
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
net.to(torch.DoubleTensor(1))
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
if TEST_CUDA:
net.to(device='cuda', dtype=torch.float)
self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)
self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)
def test_non_leaf_parameters(self):
l1 = nn.Linear(10, 10)
l2 = nn.Linear(10, 10)
def assign_weight():
l2.weight = l1.weight + 2
self.assertRaises(TypeError, assign_weight)
l2.weight = Parameter(torch.randn(10, 10))
def test_clip_grad_norm(self):
l = nn.Linear(10, 10)
max_norm = 2
def compute_norm(norm_type):
norm_type = float(norm_type)
if norm_type != inf:
total_norm = 0
for p in l.parameters():
total_norm += p.grad.data.abs().pow(norm_type).sum()
return pow(total_norm, 1. / norm_type)
else:
return max(p.grad.data.abs().max() for p in l.parameters())
def compare_scaling(grads):
p_scale = [p.grad.data.div(g).view(-1) for p, g in zip(l.parameters(), grads)]
scale = torch.cat(p_scale)
self.assertEqual(scale.std(), 0)
return scale[0]
grads = torch.arange(1., 101).view(10, 10), torch.ones(10).div(1000)
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
for p, g in zip(l.parameters(), grads):
p._grad = g.clone().view_as(p.data)
norm_before = compute_norm(norm_type)
norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)
norm_after = compute_norm(norm_type)
self.assertEqual(norm, norm_before)
self.assertEqual(norm_after, max_norm)
self.assertLessEqual(norm_after, norm_before)
compare_scaling(grads)
grads = torch.rand(10, 10).div(10000), torch.ones(10).div(500)
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
for p, g in zip(l.parameters(), grads):
p.grad.data.copy_(g)
norm_before = compute_norm(norm_type)
norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)
norm_after = compute_norm(norm_type)
self.assertEqual(norm, norm_before)
self.assertEqual(norm_before, norm_after)
self.assertLessEqual(norm_after, max_norm)
scale = compare_scaling(grads)
self.assertEqual(scale, 1)
p1, p2 = torch.randn(10, 10), torch.randn(10, 10)
g = torch.arange(1., 101).view(10, 10)
p1._grad = g.clone()
p2._grad = g.clone()
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
clip_grad_norm_(p1, max_norm, norm_type=norm_type)
clip_grad_norm_([p2], max_norm, norm_type=norm_type)
self.assertEqual(p1.grad, p2.grad)
def test_clip_grad_value(self):
l = nn.Linear(10, 10)
clip_value = 2.5
grad_w, grad_b = torch.arange(-50., 50).view(10, 10).div_(5), torch.ones(10).mul_(2)
for grad_list in [[grad_w, grad_b], [grad_w, None]]:
for p, g in zip(l.parameters(), grad_list):
p._grad = g.clone().view_as(p.data) if g is not None else g
clip_grad_value_(l.parameters(), clip_value)
for p in filter(lambda p: p.grad is not None, l.parameters()):
self.assertLessEqual(p.grad.data.max(), clip_value)
self.assertGreaterEqual(p.grad.data.min(), -clip_value)
p1, p2 = torch.randn(10, 10), torch.randn(10, 10)
g = torch.arange(-50., 50).view(10, 10).div_(5)
p1._grad = g.clone()
p2._grad = g.clone()
clip_grad_value_(p1, clip_value)
clip_grad_value_([p2], clip_value)
self.assertEqual(p1.grad, p2.grad)
def test_parameters_to_vector(self):
conv1 = nn.Conv2d(3, 10, 5)
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)
vec = parameters_to_vector(model.parameters())
self.assertEqual(vec.size(0), 980)
def test_vector_to_parameters(self):
conv1 = nn.Conv2d(3, 10, 5)
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)
vec = torch.arange(0., 980)
vector_to_parameters(vec, model.parameters())
sample = next(model.parameters())[0, 0, 0]
self.assertTrue(torch.equal(sample.data, vec.data[:5]))
@skipIfNoLapack
def test_register_and_remove_parametrization(self):
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
Id = torch.eye(X.size(0), device=X.device)
return torch.linalg.solve(Id + X, Id - X).contiguous()
class Resize(nn.Module):
def forward(self, X):
return X[[0]]
class NoResize(nn.Module):
def forward(self, X):
return X
class FirstZero(nn.Module):
def forward(self, x):
return torch.cat([x.new_zeros(1), x[1:]])
class LastZero(nn.Module):
def forward(self, x):
return torch.cat([x[:-1], x.new_zeros(1)])
model = nn.Linear(8, 8)
initial_weight_id = id(model.weight)
initial_bias_id = id(model.bias)
initial_model = deepcopy(model)
with self.assertRaisesRegex(ValueError, "Registering a parametrization may not change the shape of the tensor"):
parametrize.register_parametrization(model, "weight", Resize())
model(torch.ones(8, 8))
parametrize.register_parametrization(model, "weight", Resize(), unsafe=True)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
A = model.weight
self.assertTrue(A.shape[0] == 1)
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
parametrize.register_parametrization(model, "weight", Resize(), unsafe=True)
parametrize.register_parametrization(model, "weight", NoResize(), unsafe=False)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
A = model.weight
self.assertTrue(A.shape[0] == 1)
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
parametrize.register_parametrization(model, "weight", Skew(), unsafe=True)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
# Remove and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test one parametrization
parametrize.register_parametrization(model, "weight", Skew())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
# Remove and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test two parametrizations at the same time and removing them
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
# Result should be orthogonal
X = model.weight
Id = torch.eye(X.size(0), device=X.device)
self.assertEqual(X.T @ X, Id)
# Structure tests
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertIn("weight", model.parametrizations)
self.assertNotIn("weight", model._parameters)
# Remove
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
# Add everything
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
parametrize.register_parametrization(model, "bias", FirstZero())
parametrize.register_parametrization(model, "bias", LastZero())
# Basic tests
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertTrue(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happpened
# Should not throw
sgd = torch.optim.SGD(model.parameters(), lr=0.01)
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Remove first parametrization.
# Check that the model is still parametrized and so is the second parameter
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertTrue(parametrize.is_parametrized(model)) # Still parametrized
self.assertFalse(parametrize.is_parametrized(model, "weight")) # Parametrization removed
self.assertTrue(parametrize.is_parametrized(model, "bias")) # Still parametrized
self.assertEqual(model.bias[0].item(), 0.) # Still parametrized
self.assertEqual(model.bias[-1].item(), 0.) # Still parametrized
self.assertNotEqual(model.weight, initial_model.weight) # Has been updated
self.assertEqual(id(model.weight), initial_weight_id) # Keeps the same id
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happened
# Should not throw
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Remove the second parametrization.
# Check that the module is not parametrized
parametrize.remove_parametrizations(model, "bias", leave_parametrized=False)
self.assertFalse(parametrize.is_parametrized(model)) # Not parametrized
self.assertNotEqual(model.bias, initial_model.bias) # Has been updated
self.assertNotEqual(model.bias[0].item(), 0.) # Not parametrized
self.assertNotEqual(model.bias[-1].item(), 0.) # Not parametrized
self.assertEqual(id(model.bias), initial_bias_id) # Keeps the same id
self.assertFalse(hasattr(model, "parametrizations")) # Not parametrized the module
self.assertEqual(model.__class__, nn.Linear) # Resores the previous class
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happeed
# Should not throw things are updated
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Test leave_parametrized=True
for _ in range(2):
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
# We didn't change the dtype nor had multiple inputs, so the id should be the same
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(id(model.bias), initial_bias_id)
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
def test_register_and_remove_nested_parametrization(self):
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
model = nn.Linear(8, 8)
parametrize.register_parametrization(model, "weight", Skew())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
A = model.weight
self.assertEqual(A, -A.T)
param_mod = model.parametrizations.weight
self.assertFalse(hasattr(param_mod, "parametrizations"))
self.assertFalse(parametrize.is_parametrized(param_mod))
self.assertFalse(parametrize.is_parametrized(param_mod, "original"))
parametrize.register_parametrization(param_mod, "original", Skew())
self.assertTrue(hasattr(param_mod, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(param_mod))
self.assertTrue(parametrize.is_parametrized(param_mod, "original"))
self.assertNotIn("original", param_mod._parameters)
A = param_mod.original
self.assertEqual(A, -A.T)
parametrize.remove_parametrizations(param_mod, "original", leave_parametrized=False)
self.assertFalse(hasattr(param_mod, "parametrizations"))
self.assertEqual(param_mod.__class__, parametrize.ParametrizationList)
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
def test_register_and_remove_buffer_parametrization(self):
class FirstZero(nn.Module):
def forward(self, x):
return torch.cat([x.new_zeros(1), x[1:]])
class LastZero(nn.Module):
def forward(self, x):
return torch.cat([x[:-1], x.new_zeros(1)])
model = nn.Linear(8, 8)
delattr(model, "bias")
model.register_buffer("bias", torch.ones(8))
parametrize.register_parametrization(model, "bias", FirstZero())
parametrize.register_parametrization(model, "bias", LastZero())
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())
self.assertEqual(len(list(model.parameters())), 1)
parametrize.remove_parametrizations(model, "bias", leave_parametrized=True)
self.assertFalse(parametrize.is_parametrized(model))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())
self.assertEqual(len(list(model.parameters())), 1)
kipIfNoLapack
def test_serialization_parametrization(self):
class Orthogonal(nn.Module):
def __init__(self, n):
super().__init__()
self.register_buffer("id", torch.eye(n))
self.register_buffer("B", torch.empty(n, n))
init.orthogonal_(self.B)
def forward(self, X):
A = X.triu(1)
A = A - A.T
return self.B @ torch.linalg.solve(self.id + A, self.id - A)
def get_model():
model = torch.nn.Sequential(
torch.nn.Linear(5, 5),
torch.nn.ReLU(),
torch.nn.Linear(5, 1),
)
parametrize.register_parametrization(model[0], "weight", Orthogonal(5))
return model
model = get_model()
prev_weight = model[0].weight
prev_B = model[0].parametrizations.weight[0].B
new_model = get_model()
with TemporaryFileName() as fname:
torch.save(model.state_dict(), fname)
new_model.load_state_dict(torch.load(fname))
self.assertTrue(parametrize.is_parametrized(new_model[0], "weight"))
self.assertEqual(prev_weight, new_model[0].weight)
self.assertEqual(prev_B, new_model[0].parametrizations.weight[0].B)
with self.assertRaisesRegex(RuntimeError, "state_dict"):
with TemporaryFileName() as fname:
torch.save(model, fname)
kipIfNoLapack
def test_initialization_parametrization(self):
class Skew(nn.Module):
def forward(self, X):
A = X.triu(1)
return A - A.T
def is_skew(self, A):
return torch.allclose(A, -A.T, atol=1e-6)
def right_inverse(self, X):
if not self.is_skew(X):
raise ValueError("The matrix is not skew-symmetric.")
return X.triu(1)
class Orthogonal(nn.Module):
def __init__(self, n):
super().__init__()
self.register_buffer("B", torch.eye(n))
def forward(self, X):
Id = torch.eye(X.size(0))
return self.B @ torch.linalg.solve(Id + X, Id - X)
def is_orthogonal(self, X):
Id = torch.eye(X.size(0))
return torch.allclose(X.T @ X, Id, atol=1e-4)
def right_inverse(self, X):
if not self.is_orthogonal(X):
raise ValueError("The input is not orthogonal.")
self.B = X
return torch.zeros_like(X)
N = 5
model = nn.Linear(N, N)
skew = Skew()
with torch.no_grad():
model.weight.set_(skew(model.weight))
parametrize.register_parametrization(model, "weight", skew)
X = torch.rand(N, N)
with self.assertRaises(ValueError):
model.weight = X
X = X - X.T
model.weight = X
self.assertEqual(model.parametrizations.weight.original, X.triu(1))
self.assertEqual(model.weight, X)
parametrize.register_parametrization(model, "weight", Orthogonal(N))
X = torch.rand(N, N)
with self.assertRaises(ValueError):
model.weight = X
init.orthogonal_(X)
model.weight = X
self.assertEqual(model.weight, X)
self.assertEqual(model.parametrizations.weight.original, torch.zeros_like(X))
def test_errors_unparametrized_tensor_parametrization(self):
module = nn.Linear(3, 4)
weight_init = module.weight.clone()
class Identity(nn.Module):
def forward(self, x):
return x
with self.assertRaisesRegex(ValueError, "does not have a parameter"):
parametrize.register_parametrization(module, "foo", Identity())
self.assertFalse(parametrize.is_parametrized(module))
with self.assertRaisesRegex(ValueError, "does not have a parametrization"):
parametrize.remove_parametrizations(module, "bias")
self.assertFalse(parametrize.is_parametrized(module))
class Sum(nn.Module):
def forward(self, x, y):
return x + y
def right_inverse(self, z):
return z, torch.zeros_like(z)
parametrize.register_parametrization(module, "weight", Sum())
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
parametrize.remove_parametrizations(module, "weight", leave_parametrized=False)
parametrize.remove_parametrizations(module, "weight", leave_parametrized=True)
class WrongNumberParams(nn.Module):
def forward(self, x, y, z):
return x + y + z
def right_inverse(self, w):
return w, torch.zeros_like(w)
with self.assertRaisesRegex(TypeError, "positional argument"):
parametrize.register_parametrization(module, "weight", WrongNumberParams())
self.assertFalse(parametrize.is_parametrized(module))
class WrongRightInverse(Identity):
def right_inverse(self, z):
return None
with self.assertRaisesRegex(ValueError, "Tensor or a Sequence of"):
parametrize.register_parametrization(module, "weight", WrongRightInverse())
self.assertFalse(parametrize.is_parametrized(module))
class WrongRightInverseSequence(nn.Module):
def forward(self, x, y):
return x
def right_inverse(self, z):
return None, z
with self.assertRaisesRegex(ValueError, "of the sequence with type"):
parametrize.register_parametrization(module, "weight", WrongRightInverseSequence())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization from one tensor to one tensor that changes the dtype
class ChangeDtypeInverse(nn.Module):
def forward(self, x):
return x.float()
def right_inverse(self, w):
return w.bool()
# For parametrizations that return one tensor, right_inverse may not change the dtype
with self.assertRaisesRegex(ValueError, "outputs one tensor, it may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeInverse())
self.assertFalse(parametrize.is_parametrized(module))
# Doesn't return a tensor
class NotTensor(nn.Module):
def forward(self, x):
return 2
with self.assertRaisesRegex(ValueError, "must return a tensor"):
parametrize.register_parametrization(module, "weight", NotTensor())
self.assertFalse(parametrize.is_parametrized(module))
class ChangeDtype(nn.Module):
def forward(self, x):
return x.bool()
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtype())
self.assertFalse(parametrize.is_parametrized(module))
class ChangeShape(nn.Module):
def forward(self, x):
return x[:-1]
with self.assertRaisesRegex(ValueError, "may not change the shape"):
parametrize.register_parametrization(module, "weight", ChangeShape())
self.assertFalse(parametrize.is_parametrized(module))
class ChangeDtypeMulti(nn.Module):
def forward(self, x, y):
return (x + y).bool()
def right_inverse(self, w):
return w, w + 1
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeMulti())
self.assertFalse(parametrize.is_parametrized(module))
class SequenceLen1(nn.Module):
def forward(self, x):
return x
def right_inverse(self, w):
return (w,)
parametrize.register_parametrization(module, "weight", SequenceLen1())
self.assertTrue(hasattr(module.parametrizations.weight, "original0"))
self.assertFalse(hasattr(module.parametrizations.weight, "original1"))
_ = module.weight # Does not throw
self.assertTrue(parametrize.is_parametrized(module))
parametrize.remove_parametrizations(module, "weight", leave_parametrized=True)
# None of the operations above should have altered the weight
self.assertFalse(parametrize.is_parametrized(module))
self.assertEqual(module.weight, weight_init)
def test_errors_parametrized_tensor_parametrization(self):
# Test errors when registering a parametrization on a parametrized tensor
class Identity(nn.Module):
def forward(self, x):
return x
module = nn.Linear(3, 4)
parametrize.register_parametrization(module, "weight", Identity())
# Has to return a tensor
class WrongReturn(nn.Module):
def forward(self, x):
return x, x
with self.assertRaisesRegex(ValueError, "must return a tensor"):
parametrize.register_parametrization(module, "weight", WrongReturn())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change dtype
class ChangeDtype(nn.Module):
def forward(self, x):
return x.bool()
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtype())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change shape
class ChangeShape(nn.Module):
def forward(self, x):
return x[:-1]
with self.assertRaisesRegex(ValueError, "may not change the shape"):
parametrize.register_parametrization(module, "weight", ChangeShape())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# The following checks are mostly due to bugs in the code of the parametrization
# right_inverse has to return a tensor
class WrongReturnInverse(Identity):
def right_inverse(self, x):
return x, x
with self.assertRaisesRegex(ValueError, "right_inverse must return a tensor"):
parametrize.register_parametrization(module, "weight", WrongReturnInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change dtype
class ChangeDtypeInverse(Identity):
def right_inverse(self, x):
return x.bool()
with self.assertRaisesRegex(ValueError, "must have the same dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change shape
class ChangeShapeInverse(Identity):
def right_inverse(self, x):
return x[:-1]
with self.assertRaisesRegex(ValueError, "must have the same shape"):
parametrize.register_parametrization(module, "weight", ChangeShapeInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_multiple_inputs_parametrization(self):
# A parametrization with several outputs
class RankOne(nn.Module):
def forward(self, x, y):
# Form a rank-1 matrix from a pair of vectors
return x.unsqueeze(-1) @ y.unsqueeze(-2)
def right_inverse(self, Y):
# We project the given matrix onto the rank 1 matrices
U, S, Vh = torch.linalg.svd(Y, full_matrices=False)
# S is ordered in a decreasing way.
s0_sqrt = S[0].sqrt().unsqueeze(-1)
return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt
# Simple parametrisation
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
def right_inverse(self, w):
return 0.5 * w
model = nn.Linear(3, 3)
# Test one parametrization
parametrize.register_parametrization(model, "weight", RankOne())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertTrue(hasattr(model.parametrizations.weight, "original0"))
self.assertIn("original0", model.parametrizations.weight._parameters)
self.assertTrue(hasattr(model.parametrizations.weight, "original1"))
self.assertIn("original1", model.parametrizations.weight._parameters)
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be rank 1
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
# Cannot remove a parametrization with multiple inputs and not leave it parametrized
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
# Remove parametrization and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
self.assertFalse(parametrize.is_parametrized(model))
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
self.assertIn("weight", model._parameters)
# Registering parametrizations with one input on top of one with multiple inputs should work
init_weight = model.weight.clone()
parametrize.register_parametrization(model, "weight", RankOne())
# Projecting a rank 1 matrix onto the matrices of rank one does not change the matrix
self.assertEqual(init_weight, model.weight)
parametrize.register_parametrization(model, "weight", Double())
# The matrix now is twice the initial matrix
self.assertEqual(2.0 * init_weight, model.weight)
# Multiplying by a scalar does not change the rank
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
# The model has now three parameters
self.assertEqual(len(list(model.parameters())), 3)
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
# Test backward. Should not throw
for _ in range(2):
sgd.zero_grad()
loss = (model.weight.T @ model.bias).sum()
loss.backward()
sgd.step()
# Same drill as before, removing should work as expected
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
# Cannot remove a parametrization with multiple inputs and not leave it parametrized
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
# Remove parametrization and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
self.assertFalse(parametrize.is_parametrized(model))
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
self.assertIn("weight", model._parameters)
# The model has now two parameters
self.assertEqual(len(list(model.parameters())), 2)
# Test backward. Should not throw
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
for _ in range(2):
sgd.zero_grad()
loss = (model.weight.T @ model.bias).sum()
loss.backward()
sgd.step()
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_caching_parametrization(self):
# Define a couple matrix parametrizations
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
Id = torch.eye(X.size(0), device=X.device)
return torch.linalg.solve(Id + X, Id - X)
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
# Test that the caching system works
with parametrize.cached():
X = model.weight
Y = model.weight
self.assertEqual(id(X), id(Y))
def test_parametrization_same_training_mode(self):
class Identity(nn.Module):
def forward(self, X):
return X
module = nn.Linear(4, 4)
module.eval()
parametrize.register_parametrization(module, "weight", Identity())
self.assertFalse(module.parametrizations.weight[0].training)
module.train()
parametrize.register_parametrization(module, "weight", Identity().eval())
self.assertTrue(module.parametrizations.weight[0].training)
self.assertTrue(module.parametrizations.weight[1].training)
# torch/nn/utils/prune.py
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_validate_pruning_amount_init(self):
# neither float not int should raise TypeError
with self.assertRaises(TypeError):
prune._validate_pruning_amount_init(amount="I'm a string")
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=1.1)
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=20.)
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=-10)
prune._validate_pruning_amount_init(amount=0.34)
prune._validate_pruning_amount_init(amount=1500)
prune._validate_pruning_amount_init(amount=0)
prune._validate_pruning_amount_init(amount=0.)
prune._validate_pruning_amount_init(amount=1)
prune._validate_pruning_amount_init(amount=1.)
self.assertTrue(True)
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_validate_pruning_amount(self):
# if amount is int and amount > tensor_size, raise ValueError
with self.assertRaises(ValueError):
prune._validate_pruning_amount(amount=20, tensor_size=19)
# amount is a float so this should not raise an error
prune._validate_pruning_amount(amount=0.3, tensor_size=0)
# this is okay
prune._validate_pruning_amount(amount=19, tensor_size=20)
prune._validate_pruning_amount(amount=0, tensor_size=0)
prune._validate_pruning_amount(amount=1, tensor_size=1)
self.assertTrue(True)
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_compute_nparams_to_prune(self):
self.assertEqual(
prune._compute_nparams_toprune(amount=0, tensor_size=15),
0
)
self.assertEqual(
prune._compute_nparams_toprune(amount=10, tensor_size=15),
10
)
# if 1 is int, means 1 unit
self.assertEqual(
prune._compute_nparams_toprune(amount=1, tensor_size=15),
1
)
# if 1. is float, means 100% of units
self.assertEqual(
prune._compute_nparams_toprune(amount=1., tensor_size=15),
15
)
self.assertEqual(
prune._compute_nparams_toprune(amount=0.4, tensor_size=17),
7
)
def test_random_pruning_sizes(self):
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
# mask has the same size as tensor being pruned
self.assertEqual(
original_tensor.size(),
getattr(m, name + '_mask').size()
)
# 'orig' tensor has the same size as the original tensor
self.assertEqual(
original_tensor.size(),
getattr(m, name + '_orig').size()
)
# new tensor has the same size as the original tensor
self.assertEqual(
original_tensor.size(),
getattr(m, name).size()
)
def test_random_pruning_orig(self):
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# tensor prior to pruning
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
self.assertEqual(
original_tensor,
getattr(m, name + '_orig')
)
def test_random_pruning_new_weight(self):
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# tensor prior to pruning
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
# weight = weight_orig * weight_mask
self.assertEqual(
getattr(m, name),
getattr(m, name + '_orig')
* getattr(m, name + '_mask').to(
dtype=original_tensor.dtype
),
)
def test_identity_pruning(self):
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
y_prepruning = m(input_) # output prior to pruning
# compute grad pre-pruning and check it's equal to all ones
y_prepruning.sum().backward()
old_grad_weight = m.weight.grad.clone()
self.assertEqual(old_grad_weight, torch.ones_like(m.weight))
old_grad_bias = m.bias.grad.clone()
self.assertEqual(old_grad_bias, torch.ones_like(m.bias))
# remove grads
m.zero_grad()
# force the mask to be made of all 1s
prune.identity(m, name="weight")
# with mask of 1s, output should be identical to no mask
y_postpruning = m(input_)
self.assertEqual(y_prepruning, y_postpruning)
# with mask of 1s, grad should be identical to no mask
y_postpruning.sum().backward()
self.assertEqual(old_grad_weight, m.weight_orig.grad)
self.assertEqual(old_grad_bias, m.bias.grad)
# calling forward twice in a row shouldn't change output
y1 = m(input_)
y2 = m(input_)
self.assertEqual(y1, y2)
def test_random_pruning_0perc(self):
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
y_prepruning = m(input_)
y_prepruning.sum().backward()
old_grad_weight = m.weight.grad.clone() # don't grab pointer!
self.assertEqual(old_grad_weight, torch.ones_like(m.weight))
old_grad_bias = m.bias.grad.clone()
self.assertEqual(old_grad_bias, torch.ones_like(m.bias))
m.zero_grad()
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = torch.ones_like(m.weight)
prune.random_unstructured(m, name='weight', amount=0.9)
# with mask of 1s, output should be identical to no mask
y_postpruning = m(input_)
self.assertEqual(y_prepruning, y_postpruning)
# with mask of 1s, grad should be identical to no mask
y_postpruning.sum().backward()
self.assertEqual(old_grad_weight, m.weight_orig.grad)
self.assertEqual(old_grad_bias, m.bias.grad)
# calling forward twice in a row shouldn't change output
y1 = m(input_)
y2 = m(input_)
self.assertEqual(y1, y2)
def test_random_pruning(self):
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
mask = torch.ones_like(m.weight)
mask[1, 0] = 0
mask[0, 3] = 0
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
y_postpruning = m(input_)
y_postpruning.sum().backward()
self.assertEqual(m.weight_orig.grad, mask) # all 1s, except for masked units
self.assertEqual(m.bias.grad, torch.ones_like(m.bias))
# make sure that weight_orig update doesn't modify [1, 0] and [0, 3]
old_weight_orig = m.weight_orig.clone()
learning_rate = 1.
for p in m.parameters():
p.data.sub_(p.grad.data * learning_rate)
self.assertEqual(old_weight_orig[1, 0], m.weight_orig[1, 0])
self.assertEqual(old_weight_orig[0, 3], m.weight_orig[0, 3])
def test_random_pruning_forward(self):
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
mask = torch.zeros_like(m.weight)
mask[1, 0] = 1
mask[0, 3] = 1
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
yhat = m(input_)
self.assertEqual(yhat[0, 0], m.weight_orig[0, 3] + m.bias[0])
self.assertEqual(yhat[0, 1], m.weight_orig[1, 0] + m.bias[1])
def test_remove_pruning_forward(self):
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
mask = torch.ones_like(m.weight)
mask[1, 0] = 0
mask[0, 3] = 0
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
y_postpruning = m(input_)
prune.remove(m, 'weight')
y_postremoval = m(input_)
self.assertEqual(y_postpruning, y_postremoval)
def test_pruning_id_consistency(self):
m = nn.Linear(5, 2, bias=False)
tensor_id = id(list(m.parameters())[0])
prune.random_unstructured(m, name="weight", amount=0.9)
self.assertEqual(tensor_id, id(list(m.parameters())[0]))
prune.remove(m, "weight")
self.assertEqual(tensor_id, id(list(m.parameters())[0]))
def test_random_pruning_pickle(self):
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
prune.random_unstructured(m, name=name, amount=0.1)
m_new = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m_new, type(m))
def test_multiple_pruning_calls(self):
m = nn.Conv3d(2, 2, 2)
prune.l1_unstructured(m, name='weight', amount=0.1)
weight_mask0 = m.weight_mask
prune.ln_structured(m, name='weight', amount=0.3, n=2, dim=0)
hook = next(iter(m._forward_pre_hooks.values()))
self.assertIsInstance(
hook,
torch.nn.utils.prune.PruningContainer
)
self.assertEqual(hook._tensor_name, 'weight')
self.assertEqual(len(hook), 2)
self.assertIsInstance(hook[0], torch.nn.utils.prune.L1Unstructured)
self.assertIsInstance(hook[1], torch.nn.utils.prune.LnStructured)
self.assertTrue(torch.all(m.weight_mask[weight_mask0 == 0] == 0))
prune.ln_structured(m, name='weight', amount=0.1, n=float('inf'), dim=1)
hook = next(iter(m._forward_pre_hooks.values()))
self.assertEqual(hook._tensor_name, 'weight')
def test_pruning_container(self):
container = prune.PruningContainer()
container._tensor_name = 'test'
self.assertEqual(len(container), 0)
p = prune.L1Unstructured(amount=2)
p._tensor_name = 'test'
container.add_pruning_method(p)
q = prune.L1Unstructured(amount=2)
q._tensor_name = 'another_test'
with self.assertRaises(ValueError):
container.add_pruning_method(q)
with self.assertRaises(TypeError):
container.add_pruning_method(10)
with self.assertRaises(TypeError):
container.add_pruning_method('ugh')
def test_pruning_container_compute_mask(self):
container = prune.PruningContainer()
container._tensor_name = 'test'
p = prune.L1Unstructured(amount=2)
p._tensor_name = 'test'
container.add_pruning_method(p)
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
sk)
q = prune.LnStructured(amount=1, n=2, dim=0)
q._tensor_name = 'test'
container.add_pruning_method(q)
expected_mask = torch.tensor([[0, 0, 0, 0], [1, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
sk)
r = prune.LnStructured(amount=1, n=2, dim=1)
r._tensor_name = 'test'
container.add_pruning_method(r)
expected_mask = torch.tensor([[0, 1, 1, 0], [0, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
sk)
def test_l1_unstructured_pruning(self):
m = nn.Linear(4, 2)
m.weight = torch.nn.Parameter(
torch.tensor(
[[1, 2, 3, 4], [-4, -3, -2, -1]], dtype=torch.float32
)
)
prune.l1_unstructured(m, 'weight', amount=2)
expected_weight = torch.tensor([[0, 2, 3, 4], [-4, -3, -2, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
prune.l1_unstructured(m, 'weight', amount=2)
expected_weight = torch.tensor([[0, 0, 3, 4], [-4, -3, 0, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
def test_l1_unstructured_pruning_with_importance_scores(self):
m = nn.Linear(4, 2)
m.weight = torch.nn.Parameter(
torch.tensor(
[[1, 2, 3, 4], [-4, -3, -2, -1]], dtype=torch.float32
)
)
importance_scores = torch.tensor(
[[4, 2, 1, 3], [-3, -1, -2, -4]], dtype=torch.float32
)
prune.l1_unstructured(m, 'weight', amount=2, importance_scores=importance_scores)
expected_weight = torch.tensor([[1, 2, 0, 4], [-4, 0, -2, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
prune.l1_unstructured(m, 'weight', amount=2, importance_scores=importance_scores)
expected_weight = torch.tensor([[1, 0, 0, 4], [-4, 0, 0, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
def test_unstructured_pruning_same_magnitude(self):
AMOUNT = 0.2
p = prune.L1Unstructured(amount=AMOUNT)
t = 2 * torch.randint(low=-1, high=2, size=(10, 7))
nparams_toprune = prune._compute_nparams_toprune(AMOUNT, t.nelement())
computed_mask = p.compute_mask(t, default_mask=torch.ones_like(t))
nparams_pruned = torch.sum(computed_mask == 0)
self.assertEqual(nparams_toprune, nparams_pruned)
def test_random_structured_pruning_amount(self):
AMOUNT = 0.6
AXIS = 2
p = prune.RandomStructured(amount=AMOUNT, dim=AXIS)
t = 2 * torch.randint(low=-1, high=2, size=(5, 4, 2)).to(
dtype=torch.float32
)
nparams_toprune = prune._compute_nparams_toprune(AMOUNT, t.shape[AXIS])
computed_mask = p.compute_mask(t, default_mask=torch.ones_like(t))
remaining_axes = [_ for _ in range(len(t.shape)) if _ != AXIS]
per_column_sums = sorted(
torch.sum(computed_mask == 0, axis=remaining_axes)
)
assert per_column_sums == [0, 20]
def test_ln_structured_pruning(self):
m = nn.Conv2d(3, 1, 2)
m.weight.data = torch.tensor(
[[[[1., 2.], [1., 2.5]],
[[0.5, 1.], [0.1, 0.1]],
[[-3., -5.], [0.1, -1.]]]]
)
expected_mask_axis1 = torch.ones_like(m.weight)
expected_mask_axis1[:, 1] = 0.
prune.ln_structured(m, 'weight', amount=1, n=2, dim=1)
self.assertEqual(expected_mask_axis1, m.weight_mask)
expected_mask_axis3 = expected_mask_axis1
expected_mask_axis3[:, :, :, 0] = 0.
prune.ln_structured(m, 'weight', amount=1, n=1, dim=-1)
self.assertEqual(expected_mask_axis3, m.weight_mask)
def test_ln_structured_pruning_importance_scores(self):
m = nn.Conv2d(3, 1, 2)
m.weight.data = torch.tensor(
[[[[1., 2.], [1., 2.5]],
[[0.5, 1.], [0.1, 0.1]],
[[-3., -5.], [0.1, -1.]]]]
)
importance_scores = torch.tensor(
[[[[10., 1.], [10., 1.]],
[[30., 3.], [30., 3.]],
[[-20., -2.], [-20., -2.]]]]
)
expected_mask_axis1 = torch.ones_like(m.weight)
expected_mask_axis1[:, 0] = 0.
prune.ln_structured(m, 'weight', amount=1, n=2, dim=1, importance_scores=importance_scores)
self.assertEqual(expected_mask_axis1, m.weight_mask)
expected_mask_axis3 = expected_mask_axis1
expected_mask_axis3[:, :, :, 1] = 0.
prune.ln_structured(m, 'weight', amount=1, n=1, dim=-1, importance_scores=importance_scores)
self.assertEqual(expected_mask_axis3, m.weight_mask)
def test_remove_pruning(self):
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
prune.random_unstructured(m, name, amount=0.5)
self.assertIn(name + "_orig", dict(m.named_parameters()))
self.assertIn(name + "_mask", dict(m.named_buffers()))
self.assertNotIn(name, dict(m.named_parameters()))
self.assertTrue(hasattr(m, name))
pruned_t = getattr(m, name)
prune.remove(m, name)
self.assertIn(name, dict(m.named_parameters()))
self.assertNotIn(name + "_orig", dict(m.named_parameters()))
self.assertNotIn(name + "_mask", dict(m.named_buffers()))
final_t = getattr(m, name)
self.assertEqual(pruned_t, final_t)
def test_remove_pruning_exception(self):
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
self.assertFalse(prune.is_pruned(m))
# since it isn't pruned, pruning can't be removed from it
with self.assertRaises(ValueError):
prune.remove(m, name)
def test_global_pruning(self):
m = nn.Linear(4, 2)
n = nn.Linear(3, 1)
# modify the weight matrices by hand
m.weight = torch.nn.Parameter(
torch.tensor([[1, 2, 3, 4], [-4, -3, -2, -1]]).to(
dtype=torch.float32)
)
n.weight = torch.nn.Parameter(
torch.tensor([[0, 0.1, -2]]).to(
dtype=torch.float32)
)
params_to_prune = (
(m, 'weight'),
(n, 'weight'),
)
# prune the 4 smallest weights globally by L1 magnitude
prune.global_unstructured(
params_to_prune,
pruning_method=prune.L1Unstructured,
amount=4
)
expected_mweight = torch.tensor([[0, 2, 3, 4], [-4, -3, -2, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_mweight, m.weight)
expected_nweight = torch.tensor([[0, 0, -2]]).to(dtype=n.weight.dtype)
self.assertEqual(expected_nweight, n.weight)
def test_global_pruning_importance_scores(self):
m = nn.Linear(4, 2)
n = nn.Linear(3, 1)
# modify the weight matrices by hand
m.weight = torch.nn.Parameter(
torch.tensor([[1, 2, 3, 4], [-4, -3, -2, -1]]).to(
dtype=torch.float32)
)
m_importance_scores = torch.tensor(
[[4, 2, 1, 3], [-3, -1, -2, -4]], dtype=torch.float32
)
n.weight = torch.nn.Parameter(
torch.tensor([[0, 0.1, -2]]).to(
dtype=torch.float32)
)
n_importance_scores = torch.tensor([[0, 10., -0.2]]).to(dtype=torch.float32)
params_to_prune = (
(m, 'weight'),
(n, 'weight'),
)
importance_scores = {
(m, 'weight'): m_importance_scores,
(n, 'weight'): n_importance_scores,
}
# prune the 4 smallest weights globally by L1 magnitude
prune.global_unstructured(
params_to_prune,
pruning_method=prune.L1Unstructured,
amount=4,
importance_scores=importance_scores,
)
expected_m_weight = torch.tensor([[1, 2, 0, 4], [-4, 0, -2, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_m_weight, m.weight)
expected_n_weight = torch.tensor([[0, 0.1, 0]]).to(dtype=n.weight.dtype)
self.assertEqual(expected_n_weight, n.weight)
def test_custom_from_mask_pruning(self):
# new mask
mask = torch.tensor([[0, 1, 1, 0], [0, 0, 1, 1]])
# old mask
default_mask = torch.tensor([[0, 0, 0, 0], [1, 1, 1, 1]])
# some tensor (not actually used)
t = torch.rand_like(mask.to(dtype=torch.float32))
p = prune.CustomFromMask(mask=mask)
computed_mask = p.compute_mask(t, default_mask)
expected_mask = torch.tensor([[0, 0, 0, 0], [0, 0, 1, 1]]).to(
dtype=t.dtype
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(computed_mask, expected_mask)
def test_pruning_rollback(self):
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
with mock.patch(
"torch.nn.utils.prune.L1Unstructured.compute_mask"
) as compute_mask:
compute_mask.side_effect = Exception('HA!')
with self.assertRaises(Exception):
prune.l1_unstructured(m, name=name, amount=0.9)
self.assertTrue(
name in dict(m.named_parameters())
)
self.assertFalse(
name + '_mask' in dict(m.named_buffers())
)
self.assertFalse(
name + '_orig' in dict(m.named_parameters())
)
def test_pruning_serialization_model(self):
# create a model
model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
# check that everything looks normal before pruning
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# prune one of its parameters
prune.l1_unstructured(module=model[0], name='weight', amount=0.9)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', model.state_dict())
self.assertIn('0.weight_mask', model.state_dict())
self.assertNotIn('0.weight', model.state_dict())
self.assertTrue(hasattr(model[0], 'weight'))
pruned_weight = model[0].weight
with TemporaryFileName() as fname:
torch.save(model, fname)
new_model = torch.load(fname)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', new_model.state_dict())
self.assertIn('0.weight_mask', new_model.state_dict())
self.assertNotIn('0.weight', new_model.state_dict())
self.assertTrue(hasattr(new_model[0], 'weight'))
self.assertEqual(pruned_weight, new_model[0].weight)
def test_pruning_serialization_state_dict(self):
# create a model
model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
# check that everything looks normal before pruning
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# prune one of its parameters
prune.l1_unstructured(module=model[0], name='weight', amount=0.9)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', model.state_dict())
self.assertIn('0.weight_mask', model.state_dict())
self.assertNotIn('0.weight', model.state_dict())
self.assertTrue(hasattr(model[0], 'weight'))
pruned_weight = model[0].weight
# make pruning permanent and restore parameter names as in base
# architecture
prune.remove(module=model[0], name='weight')
# check that the original weight and the new mask are no longer present
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# save the state dict of model and reload it into new_model
new_model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
with TemporaryFileName() as fname:
torch.save(model.state_dict(), fname)
new_model.load_state_dict(torch.load(fname))
# check that the original weight and the new mask are not present in
# new_model either.
self.assertNotIn('0.weight_orig', new_model.state_dict())
self.assertNotIn('0.weight_mask', new_model.state_dict())
self.assertIn('0.weight', new_model.state_dict())
self.assertEqual(pruned_weight, new_model[0].weight)
def test_prune(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
pruned_tensor = p.prune(t, default_mask)
self.assertEqual(t * expected_mask, pruned_tensor)
def test_prune_importance_scores(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
importance_scores = torch.tensor(
[[1, 2, 3, 4], [1.5, 1.6, 1.7, 1.8]]
).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 1, 1, 0], [0, 1, 0, 1]])
pruned_tensor = p.prune(t, default_mask, importance_scores=importance_scores)
self.assertEqual(t * expected_mask, pruned_tensor)
def test_prune_importance_scores_mimic_default(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
pruned_tensor_without_importance_scores = p.prune(t, default_mask)
pruned_tensor_with_importance_scores = p.prune(t, default_mask, importance_scores=t)
self.assertEqual(pruned_tensor_without_importance_scores, pruned_tensor_with_importance_scores)
self.assertEqual(t * expected_mask, pruned_tensor_without_importance_scores)
def test_rnn_pruning(self):
l = torch.nn.LSTM(32, 32)
# This Module has 4 parameters called:
# 'weight_ih_l0', 'weight_hh_l0', 'bias_ih_l0', 'bias_hh_l0'
# Pruning one of them causes one of the weights to become a tensor
prune.l1_unstructured(l, 'weight_ih_l0', 0.5)
assert (
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights])
== 3
)
# Removing the pruning reparametrization restores the Parameter
prune.remove(l, 'weight_ih_l0')
assert (
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights])
== 4
)
# Make sure that, upon removal of the reparametrization, the
# `._parameters` and `.named_parameters` contain the right params.
# Specifically, the original weight ('weight_ih_l0') should be placed
# back in the parameters, while the reparametrization component
# ('weight_ih_l0_orig') should be removed.
assert 'weight_ih_l0' in l._parameters
assert l._parameters['weight_ih_l0'] is not None
assert 'weight_ih_l0_orig' not in l._parameters
assert 'weight_ih_l0' in dict(l.named_parameters())
assert dict(l.named_parameters())['weight_ih_l0'] is not None
assert 'weight_ih_l0_orig' not in dict(l.named_parameters())
def test_rnn_weight_norm(self):
def check_weight_norm(l, name, num_params):
# This Module has 4 or 5 parameters called:
# 'weight_ih_l0', 'weight_hh_l0', 'bias_ih_l0', 'bias_hh_l0', weight_hr_l0
# Applying weight norm on one of them causes it to become a tensor
l = torch.nn.utils.weight_norm(l, name=name)
self.assertEqual(
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),
num_params - 1,
)
# Removing the weight norm reparametrization restores the Parameter
l = torch.nn.utils.remove_weight_norm(l, name=name)
self.assertEqual(
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),
num_params,
)
# Make sure that, upon removal of the reparametrization, the
# `._parameters` and `.named_parameters` contain the right params.
# Specifically, the original weight ('weight_ih_l0') should be placed
# back in the parameters, while the reparametrization components
# ('weight_ih_l0_v' and 'weight_ih_l0_g') should be removed.
self.assertTrue(name in l._parameters)
self.assertIsNotNone(l._parameters[name])
self.assertTrue(name + '_v' not in l._parameters)
self.assertTrue(name + '_g' not in l._parameters)
self.assertTrue(name in dict(l.named_parameters()))
self.assertIsNotNone(dict(l.named_parameters())[name])
self.assertTrue(name + '_v' not in dict(l.named_parameters()))
self.assertTrue(name + '_g' not in dict(l.named_parameters()))
check_weight_norm(torch.nn.LSTM(32, 32), 'weight_ih_l0', 4)
check_weight_norm(torch.nn.LSTM(32, 32, proj_size=16), 'weight_hr_l0', 5)
def test_weight_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
expected_output = m(input)
# add weight normalization
m = torch.nn.utils.weight_norm(m)
self.assertEqual(m.weight_v.size(), m.weight.size())
self.assertEqual(m.weight_g.size(), (7, 1))
self.assertEqual(m(input), expected_output)
# remove weight norm
m = torch.nn.utils.remove_weight_norm(m)
self.assertFalse(hasattr(m, 'weight_g'))
self.assertFalse(hasattr(m, 'weight_v'))
self.assertEqual(m(input), expected_output)
# test with dim=1
m = torch.nn.utils.weight_norm(m, dim=1)
self.assertEqual(m.weight_v.size(), m.weight.size())
self.assertEqual(m.weight_g.size(), (1, 5))
self.assertEqual(m(input), expected_output)
# test with dim=None
m = nn.Linear(5, 7)
expected_output = m(input)
m = torch.nn.utils.weight_norm(m, dim=None)
self.assertEqual(m(input), expected_output)
with self.assertRaisesRegex(RuntimeError, 'register two weight_norm hooks'):
m = torch.nn.utils.weight_norm(m)
m = torch.nn.utils.weight_norm(m)
def test_parameterlistdict_setting_attributes(self):
with warnings.catch_warnings(record=True) as w:
mod = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
mod.train()
mod.eval()
self.assertTrue(len(w) == 0)
with self.assertWarnsRegex(UserWarning,
r"Setting attributes on ParameterList is not supported"):
torch.nn.utils.weight_norm(mod, "0")
with warnings.catch_warnings(record=True) as w:
mod = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
mod.train()
mod.eval()
self.assertTrue(len(w) == 0)
with self.assertWarnsRegex(UserWarning,
r"Setting attributes on ParameterDict is not supported"):
torch.nn.utils.weight_norm(mod, "b")
def test_parameterlistdict_pickle(self):
m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
del m._initialized
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
# Test whether loading from older checkpoints works without triggering warnings
m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
del m._forward_pre_hooks, m._state_dict_hooks, m._load_state_dict_pre_hooks, m._non_persistent_buffers_set
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
m = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
m = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
del m._initialized
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
# Test whether loading from older checkpoints works without triggering warnings
m = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
del m._forward_pre_hooks, m._state_dict_hooks, m._load_state_dict_pre_hooks, m._non_persistent_buffers_set
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
def test_weight_norm_pickle(self):
m = torch.nn.utils.weight_norm(nn.Linear(5, 7))
m = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m, nn.Linear)
def test_spectral_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.spectral_norm(m)
self.assertEqual(m.weight_u.size(), torch.Size([m.weight.size(0)]))
# weight_orig should be trainable
self.assertTrue(hasattr(m, 'weight_orig'))
self.assertTrue('weight_orig' in m._parameters)
# weight_u should be just a reused buffer
self.assertTrue(hasattr(m, 'weight_u'))
self.assertTrue('weight_u' in m._buffers)
self.assertTrue('weight_v' in m._buffers)
# weight should be a plain attribute, not counted as a buffer or a param
self.assertFalse('weight' in m._buffers)
self.assertFalse('weight' in m._parameters)
# it should also be sharing storage as `weight_orig`
self.assertEqual(m.weight_orig.storage(), m.weight.storage())
self.assertEqual(m.weight_orig.size(), m.weight.size())
self.assertEqual(m.weight_orig.stride(), m.weight.stride())
m = torch.nn.utils.remove_spectral_norm(m)
self.assertFalse(hasattr(m, 'weight_orig'))
self.assertFalse(hasattr(m, 'weight_u'))
# weight should be converted back as a parameter
self.assertTrue(hasattr(m, 'weight'))
self.assertTrue('weight' in m._parameters)
with self.assertRaisesRegex(RuntimeError, 'register two spectral_norm hooks'):
m = torch.nn.utils.spectral_norm(m)
m = torch.nn.utils.spectral_norm(m)
# test correctness in training/eval modes and cpu/multi-gpu settings
for apply_dp in (True, False):
if apply_dp:
if not TEST_MULTIGPU:
continue
device = torch.device('cuda:0')
def maybe_wrap(m):
return torch.nn.DataParallel(m, [0, 1])
else:
device = torch.device('cpu')
def maybe_wrap(m):
return m
for requires_grad in (True, False):
m = nn.Linear(3, 4).to(device)
m.weight.requires_grad_(requires_grad)
m = torch.nn.utils.spectral_norm(m)
wrapped_m = maybe_wrap(m)
self.assertTrue(hasattr(m, 'weight_u'))
u0 = m.weight_u.clone()
v0 = m.weight_v.clone()
# TEST TRAINING BEHAVIOR
# assert that u and v are updated
input = torch.randn(2, 3, device=device)
out = wrapped_m(input)
self.assertNotEqual(u0, m.weight_u)
self.assertNotEqual(v0, m.weight_v)
# assert that backprop reaches weight_orig
# can't use gradcheck because the function changes as we
if requires_grad:
torch.autograd.grad(out.sum(), m.weight_orig)
saved_u = m.weight_u.clone()
saved_v = m.weight_v.clone()
def fn(input):
m.weight_u.data.copy_(saved_u)
m.weight_v.data.copy_(saved_v)
out0 = wrapped_m(input)
out1 = wrapped_m(input)
return out0 + out1
gradcheck(fn, (input.clone().requires_grad_(),), check_batched_grad=False)
pre_remove_out = wrapped_m(input)
m = torch.nn.utils.remove_spectral_norm(m)
self.assertEqual(wrapped_m(input), pre_remove_out)
m = torch.nn.utils.spectral_norm(m)
for _ in range(3):
pre_remove_out = wrapped_m(input)
m = torch.nn.utils.remove_spectral_norm(m)
self.assertEqual(wrapped_m(input), pre_remove_out)
m = torch.nn.utils.spectral_norm(m)
wrapped_m(input)
last_train_out = wrapped_m(input)
last_train_u = m.weight_u.clone()
last_train_v = m.weight_v.clone()
wrapped_m.zero_grad()
wrapped_m.eval()
eval_out0 = wrapped_m(input)
self.assertEqual(eval_out0, last_train_out)
self.assertEqual(eval_out0, wrapped_m(input))
self.assertEqual(last_train_u, m.weight_u)
self.assertEqual(last_train_v, m.weight_v)
# FIXME: the code below is flaky when executed with DataParallel
# see https://github.com/pytorch/pytorch/issues/13818
if apply_dp:
continue
# test backward works with multiple forwards in mixed training
# and eval modes
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = m.weight_u.clone()
saved_v = m.weight_v.clone()
def fn(input):
m.weight_u.data.copy_(saved_u)
m.weight_v.data.copy_(saved_v)
wrapped_m.train()
out0 = wrapped_m(input)
wrapped_m.eval()
out1 = wrapped_m(input)
wrapped_m.train()
out2 = wrapped_m(input)
wrapped_m.eval()
out3 = wrapped_m(input)
return out0 + out1 + out2 + out3
gradcheck(fn, (input.clone().requires_grad_(),))
# assert that backprop reaches weight_orig in eval
if requires_grad:
def fn(weight):
return wrapped_m(input)
gradcheck(fn, (m.weight_orig,))
def test_new_spectral_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.parametrizations.spectral_norm(m)
spectral_norm_m = m.parametrizations.weight[0]
self.assertEqual(spectral_norm_m._u.size(), torch.Size([m.weight.size(0)]))
# .parametrizations.weight.original should be trainable
self.assertTrue(hasattr(m.parametrizations.weight, 'original'))
self.assertTrue('original' in m.parametrizations.weight._parameters)
# u should be just a reused buffer
self.assertTrue(hasattr(spectral_norm_m, '_u'))
self.assertTrue('_u' in spectral_norm_m._buffers)
self.assertTrue('_v' in spectral_norm_m._buffers)
# weight should be a plain attribute, not counted as a buffer or a param
self.assertIsNotNone(m.weight)
self.assertFalse('weight' in m._buffers)
self.assertFalse('weight' in m._parameters)
# it should also be sharing storage as `weight_orig`
# self.assertEqual(m.parametrizations.weight.original.storage(), m.weight.storage())
self.assertEqual(m.parametrizations.weight.original.size(), m.weight.size())
self.assertEqual(m.parametrizations.weight.original.stride(), m.weight.stride())
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
# spectral_norm is the only parametrization
self.assertFalse(hasattr(m, 'parametrizations'))
self.assertTrue('weight' in m._parameters)
# We can register spectral_norm multiple times on the same parameter
# and on multiple parameters in the same module
m = torch.nn.utils.parametrizations.spectral_norm(m, 'weight')
m = torch.nn.utils.parametrizations.spectral_norm(m, 'weight')
m = torch.nn.utils.parametrizations.spectral_norm(m, 'bias')
# If we remove the parametrization on bias, weight is still parametrized
# Removing a parametrization runs forward in eval mode if leave_parametrized=True
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'bias')
self.assertTrue('bias' in m._parameters)
self.assertTrue(hasattr(m, 'parametrizations'))
self.assertFalse('weight' in m._parameters)
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
# Neither weight and bias are parametrized
self.assertFalse(hasattr(m, 'parametrizations'))
self.assertTrue('weight' in m._parameters)
self.assertFalse(torch.nn.utils.parametrize.is_parametrized(m))
# test correctness in training/eval modes and cpu/multi-gpu settings
for apply_dp in (True, False):
if apply_dp:
if not TEST_MULTIGPU:
continue
device = torch.device('cuda:0')
def maybe_wrap(m):
return torch.nn.DataParallel(m, [0, 1])
else:
device = torch.device('cpu')
def maybe_wrap(m):
return m
for requires_grad in (True, False):
def get_modules():
m = nn.Linear(3, 4).to(device)
m.weight.requires_grad_(requires_grad)
m = torch.nn.utils.parametrizations.spectral_norm(m)
wrapped_m = maybe_wrap(m)
spectral_norm_m = m.parametrizations.weight[0]
return m, wrapped_m, spectral_norm_m
input = torch.randn(2, 3, device=device)
m, wrapped_m, spectral_norm_m = get_modules()
self.assertTrue(hasattr(spectral_norm_m, '_u'))
u0 = spectral_norm_m._u.clone()
v0 = spectral_norm_m._v.clone()
# TEST TRAINING BEHAVIOR
# We perform GD first to modify the initial matrix
opt = torch.optim.SGD(wrapped_m.parameters(), lr=0.1)
opt.zero_grad()
wrapped_m(input).sum().backward()
opt.step()
out = wrapped_m(input)
if requires_grad:
# run forward again and assert that u and v are updated
self.assertNotEqual(u0, spectral_norm_m._u)
self.assertNotEqual(v0, spectral_norm_m._v)
# assert that backprop reaches original weight
# can't use gradcheck because the function changes as we
if requires_grad:
torch.autograd.grad(out.sum(), m.parametrizations.weight.original)
saved_u = spectral_norm_m._u.clone()
saved_v = spectral_norm_m._v.clone()
def fn(input):
spectral_norm_m._u.data.copy_(saved_u)
spectral_norm_m._v.data.copy_(saved_v)
out0 = wrapped_m(input)
out1 = wrapped_m(input)
return out0 + out1
fn(input.clone().requires_grad_()).sum().backward()
gradcheck(fn, (input.clone().requires_grad_(),), check_batched_grad=False)
# avoid doing another power iteration
m, wrapped_m, _ = get_modules()
pre_remove_out = wrapped_m(input)
m.eval()
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
self.assertEqual(wrapped_m(input), pre_remove_out)
torch.nn.utils.parametrizations.spectral_norm(m)
for _ in range(3):
pre_remove_out = wrapped_m(input)
m.eval()
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
self.assertEqual(wrapped_m(input), pre_remove_out)
# TEST EVAL BEHAVIOR
m, wrapped_m, spectral_norm_m = get_modules()
wrapped_m(input)
last_train_out = wrapped_m(input)
last_train_u = spectral_norm_m._u.clone()
last_train_v = spectral_norm_m._v.clone()
wrapped_m.zero_grad()
wrapped_m.eval()
eval_out0 = wrapped_m(input)
# assert eval gives same result as last training iteration
self.assertEqual(eval_out0, last_train_out)
# assert doing more iteartion in eval don't change things
self.assertEqual(eval_out0, wrapped_m(input))
self.assertEqual(last_train_u, spectral_norm_m._u)
self.assertEqual(last_train_v, spectral_norm_m._v)
if apply_dp:
continue
saved_u = spectral_norm_m._u.clone()
saved_v = spectral_norm_m._v.clone()
def fn(input):
spectral_norm_m._u.data.copy_(saved_u)
spectral_norm_m._v.data.copy_(saved_v)
wrapped_m.train()
out0 = wrapped_m(input)
wrapped_m.eval()
out1 = wrapped_m(input)
wrapped_m.train()
out2 = wrapped_m(input)
wrapped_m.eval()
out3 = wrapped_m(input)
return out0 + out1 + out2 + out3
gradcheck(fn, (input.clone().requires_grad_(),))
if requires_grad:
def fn(weight):
return wrapped_m(input)
gradcheck(fn, (m.parametrizations.weight.original,))
def test_new_spectral_norm_load_state_dict(self):
for activate_times in (0, 3):
inp = torch.randn(2, 3)
m = nn.Linear(3, 5)
snm = torch.nn.utils.parametrizations.spectral_norm(m)
snm.train()
for _ in range(activate_times):
snm(inp)
state_dict = deepcopy(snm.state_dict())
self.assertEqual({
'parametrizations.weight.original',
'bias',
'parametrizations.weight.0._v',
'parametrizations.weight.0._u'
}, set(state_dict.keys()))
non_strict_state_dict = deepcopy(state_dict)
non_strict_state_dict['nonsense'] = 'nonsense'
with self.assertRaisesRegex(RuntimeError, r'Unexpected key\(s\) in state_dict: "nonsense"'):
snm.load_state_dict(non_strict_state_dict, strict=True)
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.original']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.0._u']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.0._v']
snm.load_state_dict(non_strict_state_dict, strict=False)
non_strict_state_dict['weight'] = snm.weight.detach().clone()
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict._metadata['parametrizations.weight.0']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['bias']
snm.load_state_dict(non_strict_state_dict, strict=False)
m = torch.nn.utils.parametrize.remove_parametrizations(snm, 'weight')
snm = torch.nn.utils.parametrizations.spectral_norm(m)
snm.load_state_dict(state_dict)
with torch.no_grad():
snm.eval()
out0_eval = snm(inp)
snm.train()
out1_train = snm(inp)
out2_train = snm(inp)
snm.eval()
out3_eval = snm(inp)
m = torch.nn.utils.parametrize.remove_parametrizations(snm, 'weight')
snm = torch.nn.utils.parametrizations.spectral_norm(m)
snm.load_state_dict(state_dict)
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
@skipIfNoLapack
def test_spectral_norm_load_state_dict(self):
inp = torch.randn(2, 3)
for activate_times in (0, 3):
m = nn.Linear(3, 5)
snm = torch.nn.utils.spectral_norm(m)
snm.train()
for _ in range(activate_times):
snm(inp)
version_latest_ref_state_dict = deepcopy(snm.state_dict())
self.assertEqual({'weight_orig', 'bias', 'weight_u', 'weight_v'}, set(version_latest_ref_state_dict.keys()))
non_strict_state_dict = deepcopy(version_latest_ref_state_dict)
non_strict_state_dict['nonsense'] = 'nonsense'
with self.assertRaisesRegex(RuntimeError, r'Unexpected key\(s\) in state_dict: "nonsense"'):
snm.load_state_dict(non_strict_state_dict, strict=True)
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_orig']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_u']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_v']
snm.load_state_dict(non_strict_state_dict, strict=False)
non_strict_state_dict['weight'] = snm.weight.detach().clone()
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict._metadata['']['spectral_norm']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['bias']
snm.load_state_dict(non_strict_state_dict, strict=False)
version_none_state_dict = deepcopy(version_latest_ref_state_dict)
self.assertIn('spectral_norm', version_none_state_dict._metadata[''])
del version_none_state_dict._metadata['']['spectral_norm']
del version_none_state_dict['weight_v']
version_none_state_dict['weight'] = snm.weight.detach().clone()
for version_latest_with_metadata in [True, False]:
version_latest_state_dict = deepcopy(version_latest_ref_state_dict)
if not version_latest_with_metadata:
del version_latest_state_dict._metadata['']['spectral_norm']
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
snm.load_state_dict(version_latest_ref_state_dict)
with torch.no_grad():
snm.eval()
out0_eval = snm(inp)
snm.train()
out1_train = snm(inp)
out2_train = snm(inp)
snm.eval()
out3_eval = snm(inp)
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
snm.load_state_dict(version_none_state_dict)
if activate_times > 0:
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
snm.load_state_dict(version_latest_state_dict)
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
def test_spectral_norm_dim(self):
inp = torch.randn(2, 3, 10, 12)
m = nn.ConvTranspose2d(3, 4, (5, 6))
m = torch.nn.utils.spectral_norm(m)
x = m(inp)
self.assertEqual(m.weight_u.shape, m.weight_orig[0, :, 0, 0].shape)
def test_new_spectral_norm_dim(self):
inp = torch.randn(2, 3, 10, 12)
m = nn.ConvTranspose2d(3, 4, (5, 6))
m = torch.nn.utils.parametrizations.spectral_norm(m)
snm = m.parametrizations.weight[0]
x = m(inp)
self.assertEqual(snm._u.shape, m.parametrizations.weight.original[0, :, 0, 0].shape)
def test_spectral_norm_forward(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.spectral_norm(m)
_weight, _bias, _u = m.weight_orig, m.bias, m.weight_u
_weight_mat = _weight.view(_weight.size(0), -1)
_v = torch.mv(_weight_mat.t(), _u)
_v = F.normalize(_v, dim=0, eps=1e-12)
_u = torch.mv(_weight_mat, _v)
_u = F.normalize(_u, dim=0, eps=1e-12)
_weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))
out_hat = torch.nn.functional.linear(input, _weight, _bias)
expect_out = m(input)
self.assertEqual(expect_out, out_hat)
def test_new_spectral_norm_forward(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.parametrizations.spectral_norm(m)
snm = m.parametrizations.weight[0]
_weight = m.parametrizations.weight.original
_bias, _v = m.bias, snm._v
_weight_mat = _weight.view(_weight.size(0), -1)
_u = torch.mv(_weight_mat, _v)
_u = F.normalize(_u, dim=0, eps=1e-12)
_v = torch.mv(_weight_mat.t(), _u)
_v = F.normalize(_v, dim=0, eps=1e-12)
_weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))
out_hat = torch.nn.functional.linear(input, _weight, _bias)
expect_out = m(input)
self.assertEqual(expect_out, out_hat)
def test_spectral_norm_pickle(self):
m = torch.nn.utils.spectral_norm(nn.Linear(5, 7))
m = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m, nn.Linear)
@skipIfNoLapack
def test_orthogonal_parametrization(self):
def assert_is_orthogonal(X):
n, k = X.size(-2), X.size(-1)
if n < k:
X = X.mT
n, k = k, n
Id = torch.eye(k, dtype=X.dtype, device=X.device).expand(*(X.size()[:-2]), k, k)
eps = 10 * n * torch.finfo(X.dtype).eps
torch.testing.assert_allclose(X.mH @ X, Id, atol=eps, rtol=0.)
def assert_weight_allclose_Q(weight, W):
wide_matrix = W.size(-2) < W.size(-1)
if wide_matrix:
W = W.mT
Q, R = torch.linalg.qr(W)
Q *= R.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2)
if wide_matrix:
Q = Q.mT
torch.testing.assert_allclose(Q, weight, atol=1e-5, rtol=0.)
for shape, dtype, use_linear in product(((4, 4), (5, 3), (3, 5)),
(torch.float32, torch.complex64),
(True, False)):
if not use_linear and dtype.is_complex:
continue
if use_linear:
input = torch.randn(3, shape[0], dtype=dtype)
else:
input = torch.randn(2, 2, shape[0] + 2, shape[1] + 1, dtype=dtype)
for parametrization, use_trivialization in product(("matrix_exp", "cayley", "householder"),
(False, True)):
can_initialize = use_trivialization or parametrization == "householder"
if use_linear:
m = nn.Linear(*shape, dtype=dtype)
else:
m = nn.Conv2d(2, 3, shape, dtype=dtype)
w_init = m.weight.clone()
if parametrization == "householder" and m.weight.is_complex():
msg = "householder parametrization does not support complex tensors"
with self.assertRaisesRegex(ValueError, msg):
torch.nn.utils.parametrizations.orthogonal(m,
"weight",
parametrization,
use_trivialization=use_trivialization)
continue
wide_matrix = w_init.size(-2) < w_init.size(-1)
torch.nn.utils.parametrizations.orthogonal(m,
"weight",
parametrization,
use_trivialization=use_trivialization)
self.assertEqual(w_init.shape, m.weight.shape)
assert_is_orthogonal(m.weight)
if can_initialize:
assert_weight_allclose_Q(m.weight, w_init)
X = torch.randn_like(m.weight)
if wide_matrix:
X = X.mT
w_new = torch.linalg.qr(X).Q
if wide_matrix:
w_new = w_new.mT
if can_initialize:
m.weight = w_new
torch.testing.assert_allclose(w_new, m.weight, atol=1e-5, rtol=0.)
else:
msg = "assign to the matrix exponential or the Cayley parametrization"
with self.assertRaisesRegex(NotImplementedError, msg):
m.weight = w_new
w_new = torch.randn_like(m.weight)
if can_initialize:
m.weight = w_new
assert_weight_allclose_Q(m.weight, w_new)
else:
msg = "assign to the matrix exponential or the Cayley parametrization"
with self.assertRaisesRegex(NotImplementedError, msg):
m.weight = w_new
opt = torch.optim.SGD(m.parameters(), lr=0.1)
for _ in range(2):
opt.zero_grad()
m(input).norm().backward()
grad = m.parametrizations.weight.original.grad
self.assertIsNotNone(grad)
if grad.size(-2) >= grad.size(-1):
zeros_grad = grad.triu(1)
else:
zeros_grad = grad.tril(-1)
self.assertEqual(zeros_grad, torch.zeros_like(zeros_grad))
diag_grad = grad.diagonal(dim1=-2, dim2=-1)
if grad.is_complex():
diag_grad = diag_grad.real
self.assertEqual(diag_grad, torch.zeros_like(diag_grad))
opt.step()
assert_is_orthogonal(m.weight)
@skipIfNoLapack
def test_orthogonal_errors(self):
m = nn.Linear(3, 4)
with self.assertRaisesRegex(ValueError, "has to be one of"):
torch.nn.utils.parametrizations.orthogonal(m, "weight", "foo")
with self.assertRaisesRegex(ValueError, "Expected a matrix"):
torch.nn.utils.parametrizations.orthogonal(m, "bias")
torch.nn.utils.parametrizations.orthogonal(m, "weight")
with self.assertRaisesRegex(ValueError, "matrices of shape"):
m.weight = torch.randn(5, 5)
torch.nn.utils.parametrize.remove_parametrizations(m, "weight")
def test_threshold_int(self):
x = torch.tensor([-3, -2, -1, 0, 1, 2, 3])
expected = torch.tensor([99, 99, 99, 99, 1, 2, 3])
self.assertEqual(F.threshold(x, 0, 99), expected)
def test_threshold_bfloat16(self):
x = torch.randn(100)
for threshold in [0, -0.5, 0.5, float('inf'), float('-inf'), float('nan')]:
expected = F.threshold(x, threshold, 0).bfloat16().float()
res_bf16 = F.threshold(x.bfloat16(), threshold, 0).float()
self.assertEqual(res_bf16, expected)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_embedding_max_norm_unsorted_repeating_indices(self):
def create_embedding(device):
torch.manual_seed(0)
return torch.nn.Embedding(
num_embeddings=20,
embedding_dim=64,
max_norm=1.0).to(device)
ix = torch.arange(2, device='cpu', dtype=torch.long).repeat(2000)
out_cpu = create_embedding('cpu')(ix)
ix = ix.to('cuda')
out = create_embedding('cuda')(ix)
self.assertEqual(out.cpu(), out_cpu)
def test_embedding_sparse_basic(self):
embedding = nn.Embedding(10, 20, sparse=True)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long)
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
def test_embedding_sparse_empty_tensor(self):
embedding = nn.Embedding(0, 0, sparse=True)
input = torch.tensor([], dtype=torch.int64)
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
embedding = nn.Embedding(10, 0, sparse=True)
input = torch.LongTensor([[0, 2, 4, 5], [4, 3, 0, 9]])
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
def test_move_sparse_half_embedding(self):
embedding = nn.Embedding(10, 3, sparse=True)
self.assertEqual(embedding.weight.device.type, 'cpu')
self.assertEqual(embedding.weight.dtype, torch.float64)
embedding.to(torch.float16)
self.assertEqual(embedding.weight.dtype, torch.float16)
self.assertEqual(embedding.embedding_dim, 3)
self.assertEqual(embedding.num_embeddings, 10)
if torch.cuda.is_available():
embedding.to('cuda')
self.assertEqual(embedding.weight.device.type, 'cuda')
embedding.to('cpu')
self.assertEqual(embedding.weight.device.type, 'cpu')
def test_embedding_max_norm(self):
embedding = nn.Embedding(22, 5, max_norm=1.0)
input = torch.tensor([2, 8, 8, 6], dtype=torch.long)
output = embedding(input)
self.assertEqual(output[1], output[2])
self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())
def test_embedding_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embedding = nn.Embedding.from_pretrained(a)
self.assertEqual(a, embedding.weight.data)
input = torch.LongTensor([0, 1])
output = embedding(input)
self.assertEqual(a, output)
def test_embedding_bag_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embedding = nn.EmbeddingBag.from_pretrained(a)
self.assertEqual(a, embedding.weight)
input = torch.tensor([0, 1], dtype=torch.long)
output = embedding(input, torch.arange(input.size(0)))
self.assertEqual(a, output)
def test_embedding_from_pretrained_padding_idx(self):
padding_idx = 2
padding_vec = torch.ones(3) * 7
embeddings = torch.rand(4, 3, requires_grad=True)
with torch.no_grad():
embeddings[padding_idx] = padding_vec
embedding_nn = nn.Embedding.from_pretrained(embeddings, padding_idx=padding_idx)
self.assertEqual(embedding_nn.weight[padding_idx], padding_vec)
def test_embedding_bag_from_pretrained_padding_idx(self):
padding_idx = 2
embeddings = torch.rand(4, 3, requires_grad=True)
embedding_nn = nn.EmbeddingBag.from_pretrained(embeddings, padding_idx=padding_idx)
self.assertEqual(embedding_nn.weight, embeddings)
def test_embedding_from_pretrained_options(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
opts = {
"max_norm": 2.,
"norm_type": .5,
"scale_grad_by_freq": False,
"sparse": True
}
embedding = nn.Embedding.from_pretrained(a, **opts)
input = torch.LongTensor([0, 1])
output = embedding(input)
self.assertEqual(a, output)
self.assertTrue(a.ne(torch.arange(1, 7, dtype=a.dtype).view(2, 3)).all())
self.assertTrue(output.data.norm(p=opts["norm_type"], dim=1).le(opts["max_norm"]).all())
def test_embedding_functional(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
embeddings = torch.rand(4, 3, requires_grad=True)
embed_old = torch.nn.Embedding(4, 3)
embed_old.weight.data = embeddings.data
res_old = embed_old(a)
res_F = F.embedding(a, embeddings)
self.assertEqual(res_old, res_F)
embed_old = torch.nn.Embedding(4, 3)
embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)
res_old = embed_old(a)
res_F = F.embedding(a, embeddings, padding_idx=2)
self.assertEqual(res_old, res_F)
def test_embedding_bag_functional(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
embeddings = torch.rand(4, 3, requires_grad=True)
embed_old = torch.nn.EmbeddingBag(4, 3)
embed_old.weight = torch.nn.Parameter(embeddings)
res_old = embed_old(a)
res_F = F.embedding_bag(a, embeddings)
self.assertEqual(res_old, res_F)
embed_old = torch.nn.EmbeddingBag(4, 3)
embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)
res_old = embed_old(a)
res_F = F.embedding_bag(a, embeddings, padding_idx=2)
self.assertEqual(res_old, res_F)
def test_embedding_bag_padding_idx_error(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
num_embeddings = 4
num_features = 3
embeddings = torch.rand(num_embeddings, num_features, requires_grad=True)
functional_err_msg = r'padding_idx must be within the number of embeddings'
module_err_msg = r'padding_idx must be within num_embeddings'
for padding_idx in range(-(num_embeddings + 2), (num_embeddings + 2)):
if (padding_idx < -num_embeddings) or (padding_idx >= num_embeddings):
with self.assertRaisesRegex(RuntimeError, functional_err_msg):
F.embedding_bag(a, embeddings, padding_idx=padding_idx)
with self.assertRaisesRegex(AssertionError, module_err_msg):
torch.nn.EmbeddingBag(num_embeddings, num_features, padding_idx=padding_idx)
else:
F.embedding_bag(a, embeddings, padding_idx=padding_idx)
torch.nn.EmbeddingBag(num_embeddings, num_features, padding_idx=padding_idx)
@unittest.skipUnless('fbgemm' in torch.backends.quantized.supported_engines,
'Linear_FP16_weight requires FBGEMM. FBGEMM is only optimized for CPUs'
' with instruction set support avx2 or newer.')
def test_fb_fc_packed(self):
X = np.random.rand(16, 16).astype(np.float32) - 0.5
W = np.random.rand(16, 16).astype(np.float32) - 0.5
b = np.random.rand(16).astype(np.float32) - 0.5
def fc_op(X, W, b):
return np.dot(X, W.T) + b
x_tensor = torch.tensor(X)
w_tensor = torch.tensor(W)
b_tensor = torch.tensor(b)
packed_w_tensor = torch.fbgemm_pack_gemm_matrix_fp16(w_tensor)
actual_output = torch.fbgemm_linear_fp16_weight(x_tensor, packed_w_tensor, b_tensor)
expected_output = fc_op(X, W, b)
torch.testing.assert_close(torch.from_numpy(expected_output), actual_output.cpu(), atol=1e-3, rtol=1e-3)
def test_embeddingbag_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embeddingbag = nn.EmbeddingBag.from_pretrained(a)
self.assertEqual(a, embeddingbag.weight.data)
input = torch.LongTensor([[0, 1]])
output = embeddingbag(input)
self.assertEqual(a.mean(0, keepdim=True), output)
def test_embeddingbag_from_pretrained_options(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
opts = {
"max_norm": 2.,
"norm_type": .5,
"scale_grad_by_freq": False,
"mode": "max",
"sparse": False
}
embeddingbag = nn.EmbeddingBag.from_pretrained(a, **opts)
input = torch.LongTensor([[0, 1]])
output = embeddingbag(input)
self.assertEqual(a.max(0, keepdim=True)[0], output)
self.assertTrue(a.ne(torch.arange(1, 7, dtype=a.dtype).view(2, 3)).all())
self.assertTrue(a.norm(p=opts["norm_type"], dim=1).le(opts["max_norm"]).all())
def test_AlphaDropout(self):
input = torch.randn(5000)
self._test_alpha_dropout(nn.AlphaDropout, input)
def test_FeatureAlphaDropout(self):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
d = random.randint(1, 2)
num_features = 1000
input = torch.randn(num_features, b, d, w, h)
self._test_alpha_dropout(nn.FeatureAlphaDropout, input)
input = torch.randn(50, 20, 64, 64)
self._test_alpha_dropout(nn.FeatureAlphaDropout, input)
def test_pad_scalar_error(self):
inputs = torch.tensor(0., requires_grad=True)
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1, 1)))
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1,)))
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
@parametrize_test("average_attn_weights", [True, False])
def test_multihead_attention(self, average_attn_weights):
def _scaled_dot_attn_ref(Q, K, V, dims, unseen_mask=None, key_padding_mask=None,
average_attn_weights=average_attn_weights):
QKT = _batchmatmul(
Q,
np.transpose(K, axes=[0, 1, 3, 2])
/ np.sqrt(dims[3], dtype=np.float32),
)
b1, b2, s1, s2 = QKT.shape
if unseen_mask is not None or key_padding_mask is not None:
for i in range(b1):
for j in range(b2):
for m in range(s1):
for n in range(s2):
if unseen_mask is not None and unseen_mask[m][n] == 0:
QKT[i, j, m, n] = -np.inf
if key_padding_mask is not None and key_padding_mask[i][n]:
QKT[i, j, m, n] = -np.inf
reference = _softmax(QKT)
ref_attn_weight = reference
if average_attn_weights:
ref_attn_weight = np.sum(ref_attn_weight, axis=1) / b2
reference = _batchmatmul(reference, V)
return reference, ref_attn_weight
def _batchmatmul(a, b):
assert a.shape[0] == b.shape[0]
assert a.shape[1] == b.shape[1]
retval = np.zeros(
(a.shape[0], a.shape[1], a.shape[2], b.shape[3]), dtype=np.float32
)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
retval[i, j, :, :] = np.matmul(a[i, j, :, :], b[i, j, :, :])
return retval
def _softmax(x):
np.seterr(invalid='ignore')
output = np.zeros(x.shape, dtype=np.float64)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
x_curr = x[i, j, k, :]
e_x = np.exp(x_curr - np.amax(x_curr))
output[i, j, k, :] = e_x / np.sum(e_x)
return output
def _split_heads_ref(X, dims, nheads, d_head):
X_split = np.reshape(X, dims[:2] + [nheads, d_head])
X_split_transposed = np.transpose(X_split, [0, 2, 1, 3])
reference = np.reshape(X_split_transposed, [dims[0], nheads, dims[1], d_head])
return reference
def _combine_heads_ref(X, dims, nheads, d_head):
X_transposed = np.transpose(X, [0, 2, 1, 3])
reference = np.reshape(X_transposed, dims[:2] + [nheads * d_head])
return reference
def _fc(X, X_weight, X_bias):
X_fc_b = X_bias.detach().numpy()
X_fc_w = X_weight.detach().numpy()
return np.matmul(X, np.transpose(X_fc_w)) + X_fc_b
def _create_src_lengths_mask(batch_size, src_lengths):
max_srclen = src_lengths.max()
src_indices = torch.arange(0, max_srclen).unsqueeze(0).to(src_lengths)
src_indices = src_indices.expand(batch_size, max_srclen)
src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_srclen)
return (src_indices < src_lengths).int().detach()
def _multihead_attn_test_helper(add_key_padding_mask=False, add_bias_kv=False, add_zero_attn=False,
saved_kv=False, same_embed_dim=False, byte_mask=False,
average_attn_weights=average_attn_weights):
for _ in range(100):
batch_sz, seq_len = [random.randint(2, 10) for r in range(2)]
d_head = random.randint(3, 10)
nheads = random.randint(3, 10)
d_model = d_head * nheads
if same_embed_dim:
kv_dim = d_model
else:
kv_dim = random.randint(5, 20)
dims = [batch_sz, seq_len, kv_dim]
saved_k = None
saved_k_tensor = None
saved_v = None
saved_v_tensor = None
if saved_kv:
saved_k = np.random.rand(batch_sz * nheads, seq_len, d_head)
saved_k_tensor = torch.from_numpy(saved_k).to(torch.get_default_dtype())
saved_v = np.random.rand(batch_sz * nheads, seq_len, d_head)
saved_v_tensor = torch.from_numpy(saved_v).to(torch.get_default_dtype())
key_padding_mask = None
key_padding_mask_tensor = None
if add_key_padding_mask:
seq_mask = np.random.randint(0, 2, (1, seq_len))
key_padding_mask = (np.repeat(seq_mask, batch_sz, axis=0) == 1)
key_padding_mask_tensor = torch.from_numpy(key_padding_mask)
if byte_mask:
key_padding_mask_tensor = key_padding_mask_tensor.byte()
decoder_state = np.random.rand(batch_sz, d_model)
K = np.random.rand(*dims)
V = K
Q = np.expand_dims(decoder_state, 1)
attn_mask = np.random.randint(0 , 2, size=(1, seq_len))
attn_mask_tensor = torch.from_numpy(attn_mask).float()
if byte_mask:
attn_mask_tensor = (attn_mask_tensor == 0).byte()
else:
attn_mask_tensor.masked_fill_(attn_mask_tensor == 0, float('-inf'))
attn_mask_tensor.masked_fill_(attn_mask_tensor > 0, float('0.0'))
attn_mask_tensor = attn_mask_tensor.double()
decoder_state_tensor = torch.from_numpy(decoder_state).to(torch.get_default_dtype())
source_hid_tensor = torch.from_numpy(K).to(torch.get_default_dtype()).transpose(0, 1)
multihead_attn_module = MultiheadAttention(d_model, nheads,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
kdim=kv_dim, vdim=kv_dim)
if add_bias_kv:
bias_k = multihead_attn_module.bias_k.detach().numpy()
bias_v = multihead_attn_module.bias_v.detach().numpy()
else:
bias_k = None
bias_v = None
_Q = decoder_state_tensor.unsqueeze(1).transpose(0, 1)
_V = source_hid_tensor
_K = source_hid_tensor
if multihead_attn_module._qkv_same_embed_dim:
result, result_weight = torch.nn.functional.multi_head_attention_forward(
_Q, _K, _V,
d_model, nheads,
multihead_attn_module.in_proj_weight, multihead_attn_module.in_proj_bias,
multihead_attn_module.bias_k, multihead_attn_module.bias_v,
multihead_attn_module.add_zero_attn, multihead_attn_module.dropout,
multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias,
multihead_attn_module.training, key_padding_mask_tensor, True, attn_mask_tensor,
static_k=saved_k_tensor, static_v=saved_v_tensor,
average_attn_weights=average_attn_weights)
else:
result, result_weight = torch.nn.functional.multi_head_attention_forward(
_Q, _K, _V,
d_model, nheads,
None, multihead_attn_module.in_proj_bias,
multihead_attn_module.bias_k, multihead_attn_module.bias_v,
multihead_attn_module.add_zero_attn, multihead_attn_module.dropout,
multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias,
multihead_attn_module.training, key_padding_mask_tensor, True, attn_mask_tensor,
True, multihead_attn_module.q_proj_weight,
multihead_attn_module.k_proj_weight, multihead_attn_module.v_proj_weight,
static_k=saved_k_tensor, static_v=saved_v_tensor,
average_attn_weights=average_attn_weights)
result = result.squeeze(0).detach().numpy()
if multihead_attn_module._qkv_same_embed_dim:
q_proj_weight = multihead_attn_module.in_proj_weight[:d_model]
k_proj_weight = multihead_attn_module.in_proj_weight[d_model:(d_model * 2)]
v_proj_weight = multihead_attn_module.in_proj_weight[(d_model * 2):]
else:
q_proj_weight = multihead_attn_module.q_proj_weight
k_proj_weight = multihead_attn_module.k_proj_weight
v_proj_weight = multihead_attn_module.v_proj_weight
Q_fc = _fc(Q, q_proj_weight, multihead_attn_module.in_proj_bias[:d_model])
K_fc = _fc(K, k_proj_weight, multihead_attn_module.in_proj_bias[d_model:(d_model * 2)])
V_fc = _fc(V, v_proj_weight, multihead_attn_module.in_proj_bias[(d_model * 2):])
if add_bias_kv:
K_fc = np.concatenate((K_fc, np.repeat(bias_k, K_fc.shape[0], axis=0)), axis=1)
V_fc = np.concatenate((V_fc, np.repeat(bias_v, V_fc.shape[0], axis=0)), axis=1)
if attn_mask is not None:
attn_mask = np.concatenate((attn_mask, np.ones([1, 1])), axis=1)
if key_padding_mask is not None:
key_padding_mask = np.concatenate((key_padding_mask, np.full((batch_sz, 1), False, dtype=bool)), axis=1)
dims[1] += 1
Q_split = _split_heads_ref(
Q_fc, [batch_sz, 1, d_model], nheads, d_head
)
if saved_k is not None:
K_split = np.reshape(saved_k, [dims[0], nheads, dims[1], d_head])
else:
K_split = _split_heads_ref(K_fc, dims, nheads, d_head)
if saved_v is not None:
V_split = np.reshape(saved_v, [dims[0], nheads, dims[1], d_head])
else:
V_split = _split_heads_ref(V_fc, dims, nheads, d_head)
if add_zero_attn:
dims[1] += 1
K_split = np.concatenate((K_split, np.zeros([K_split.shape[0], K_split.shape[1], 1, K_split.shape[3]])), axis=2)
V_split = np.concatenate((V_split, np.zeros([V_split.shape[0], V_split.shape[1], 1, V_split.shape[3]])), axis=2)
if attn_mask is not None:
attn_mask = np.concatenate((attn_mask, np.ones([1, 1])), axis=1)
if key_padding_mask is not None:
key_padding_mask = np.concatenate((key_padding_mask, np.full((batch_sz, 1), False, dtype=bool)), axis=1)
attn_heads, ref_attn_weight = _scaled_dot_attn_ref(
Q=Q_split,
K=K_split,
V=V_split,
dims=Q_split.shape,
unseen_mask=attn_mask,
key_padding_mask=key_padding_mask
)
combined_attn_heads = _combine_heads_ref(
X=attn_heads, dims=[batch_sz, 1], nheads=nheads, d_head=d_head
)
reference = _fc(combined_attn_heads, multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias)
reference = np.squeeze(reference, axis=1)
self.assertEqual(tuple(result.shape), (batch_sz, d_model))
np.testing.assert_allclose(result, reference, atol=1e-5)
result_weight = result_weight.detach().numpy()
self.assertEqual(tuple(result_weight.shape), tuple(ref_attn_weight.shape))
np.testing.assert_allclose(result_weight, ref_attn_weight, atol=1e-5)
def test_multihead_attn_add_bias_kv():
_multihead_attn_test_helper(add_bias_kv=True)
def test_multihead_attn_add_zero_attn():
_multihead_attn_test_helper(add_zero_attn=True)
def test_multihead_attn_no_masking():
_multihead_attn_test_helper()
def test_multihead_attn_key_padding_mask():
_multihead_attn_test_helper(add_key_padding_mask=True)
def test_multihead_attn_saved_kv():
_multihead_attn_test_helper(saved_kv=True)
def test_multihead_attn_add_bias_kv_zero_attn():
_multihead_attn_test_helper(add_key_padding_mask=True, add_bias_kv=True,
add_zero_attn=True)
def test_multihead_attn_all_arguments1():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True, saved_kv=True)
def test_multihead_attn_all_arguments2():
_multihead_attn_test_helper(add_key_padding_mask=True, add_bias_kv=True,
add_zero_attn=True, saved_kv=True)
def test_multihead_attn_all_arguments3():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True,
saved_kv=True, same_embed_dim=True)
def test_multihead_attn_all_arguments4():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True,
saved_kv=True, same_embed_dim=True, byte_mask=True)
test_multihead_attn_add_zero_attn()
test_multihead_attn_add_bias_kv()
test_multihead_attn_no_masking()
test_multihead_attn_key_padding_mask()
test_multihead_attn_saved_kv()
test_multihead_attn_add_bias_kv_zero_attn()
test_multihead_attn_all_arguments1()
with self.assertRaisesRegex(AssertionError, "bias cannot be added to static key."):
test_multihead_attn_all_arguments2()
test_multihead_attn_all_arguments3()
test_multihead_attn_all_arguments4()
def test_multihead_attn_3d_attn_mask(self):
embed_dim = 8
num_heads = 4
batch_size = 8
src_len = 3
tgt_len = 2
query = torch.rand(batch_size, tgt_len, embed_dim)
key = torch.rand(batch_size, src_len, embed_dim)
value = key
attn_mask = torch.randint(0, 2, (batch_size, tgt_len, src_len)).float()
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))
mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads)
attn_mask_3d = torch.repeat_interleave(attn_mask, num_heads, dim=0)
output_3d = mta_model(query.transpose(0, 1), key.transpose(0, 1), value.transpose(0, 1), attn_mask=attn_mask_3d)[0]
output_3d = output_3d.transpose(0, 1)
for i in range(0, batch_size):
output_2d = mta_model(query[i].unsqueeze(0).transpose(0, 1),
key[i].unsqueeze(0).transpose(0, 1),
value[i].unsqueeze(0).transpose(0, 1),
attn_mask=attn_mask[i])[0]
self.assertEqual(output_3d[i].unsqueeze(0).transpose(0, 1), output_2d)
def test_multihead_attn_no_bias(self):
embed_dim = 8
num_heads = 4
mha = torch.nn.MultiheadAttention(embed_dim, num_heads, bias=False)
self.assertIsNone(mha.in_proj_bias)
self.assertIsNone(mha.out_proj.bias)
def test_multihead_attn_invalid_shape(self):
mha = torch.nn.MultiheadAttention(3, 3)
query = torch.randn(3, 3, 3)
key = torch.randn(3, 3, 3)
value = torch.randn(3, 3, 3)
msg = "expected `key` and `value` to be 3-D but found 2-D and 3-D tensors respectively"
with self.assertRaisesRegex(AssertionError, msg):
mha(query, torch.randn(3, 3), value)
msg = "expected `key` and `value` to be 3-D but found 3-D and 2-D tensors respectively"
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, torch.randn(3, 3))
msg = "expected `key_padding_mask` to be `None` or 2-D but found 1-D tensor instead"
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, key_padding_mask=torch.tensor([False, True, True], dtype=torch.bool))
msg = "expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead"
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool))
query = torch.randn(3, 3)
key = torch.randn(3, 3)
value = torch.randn(3, 3)
msg = "expected `key` and `value` to be 2-D but found 3-D and 2-D tensors respectively"
with self.assertRaisesRegex(AssertionError, msg):
mha(query, torch.randn(3, 3, 3), value)
msg = "expected `key` and `value` to be 2-D but found 2-D and 3-D tensors respectively"
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, torch.randn(3, 3, 3))
msg = "expected `key_padding_mask` to be `None` or 1-D but found 2-D tensor instead"
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, key_padding_mask=torch.tensor([[False, True, True] * 2], dtype=torch.bool))
msg = "expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead"
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool))
msg = r"Expected `attn_mask` shape to be \(3, 3, 3\)"
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.randn(4, 3, 3).bernoulli_().to(torch.bool))
def test_normalize(self):
inputs = torch.randn(1, 3, 4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=2, dim=-2), (inputs,)))
inputs = torch.randn((), requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
def test_adaptive_pooling_input_size(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * numel
module = module_cls(output_size)
input = torch.randn(output_size)
self.assertRaises(ValueError, lambda: module(input))
def test_adaptive_pooling_size_none(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * (numel - 1) + (None,)
module = module_cls(output_size)
input = torch.randn((4,) * (numel + 1))
output = module(input)
self.assertEqual(output.size(), (4,) + (2,) * (numel - 1) + (4,))
@unittest.skipIf(TEST_WITH_UBSAN, "signed integer overflow error with UBSAN")
def test_adaptive_pooling_size_overflow(self):
self.assertRaises(
RuntimeError,
lambda: torch.nn.AdaptiveMaxPool1d(0x3fffffffffffffff)(torch.empty([2, 2, 2])))
def test_adaptive_pooling_avg_nhwc(self):
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
input = torch.randint(1, 10, (4, 8, 8, 8), dtype=torch.float32).to(device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randint(1, 10, (4, 8, 7, 7), dtype=torch.float32).to(device)
pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
def test_adaptive_pooling_avg_nhwc_non_contiguous(self):
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
input = torch.randint(1, 10, (4, 8, 8, 8), dtype=torch.float32).to(device)
input = input.contiguous(memory_format=torch.channels_last)
input = input[:, ::2, :, :].requires_grad_()
grad = torch.randint(1, 10, (4, 8, 7, 7), dtype=torch.float32).to(device)
grad = grad[:, ::2, :, :]
pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
def test_adaptive_pooling_bfloat16(self):
def _test_adaptive_pooling_bfloat16(self, device, mod, memory_format):
input = torch.randint(1, 10, (3, 19, 8, 8), dtype=torch.float32)
input = input.to(device).to(memory_format=memory_format).requires_grad_()
pool = mod((7, 7)).to(device)
input2 = input.detach().clone().bfloat16().requires_grad_(True)
out = pool(input)
out.sum().backward()
out2 = pool(input2)
out2.sum().backward()
self.assertTrue(out2.is_contiguous(memory_format=memory_format))
self.assertEqual(out2.dtype, torch.bfloat16)
self.assertEqual(input2.grad.dtype, torch.bfloat16)
self.assertEqual(out, out2.float(), atol=0.1, rtol=0)
self.assertEqual(input.grad, input2.grad.float(), atol=0.1, rtol=0)
device_list = ['cpu']
for device in device_list:
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveAvgPool2d, torch.contiguous_format)
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveAvgPool2d, torch.channels_last)
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveMaxPool2d, torch.contiguous_format)
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveMaxPool2d, torch.channels_last)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@largeTensorTest('12GB', device='cuda')
def test_adaptive_pooling_avg_nhwc_launch_config_backward(self):
input = torch.randint(1, 10, (1, 32, 2 ** 17 + 1, 32), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randint(1, 10, (1, 32, 10, 32), dtype=torch.float32, device="cuda")
pool = torch.nn.AdaptiveAvgPool2d((10, 32)).cuda()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((10, 32)).cuda()
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@largeTensorTest('12GB', device='cuda')
def test_adaptive_pooling_avg_nhwc_launch_config_forward(self):
input = torch.randint(1, 10, (1, 32, 16, 16), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
pool = torch.nn.AdaptiveAvgPool2d((2 ** 17 + 1, 32)).cuda()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_pool = torch.nn.AdaptiveAvgPool2d((2 ** 17 + 1, 32)).cuda()
out = pool(input)
ref_out = ref_pool(ref_input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@skipIfRocm
def test_broadcast_double_backwards_gpu(self):
tensors = (torch.randn(4, 4, device='cuda', requires_grad=True),
torch.randn(4, 4, device='cuda', requires_grad=True),
torch.randn(4, 4, device='cuda', requires_grad=True))
ast.apply((0, 1), *i), tensors,
check_batched_grad=False)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_not_requiring_grad(self):
variables = [
torch.randn(1, 2, device='cuda', requires_grad=True),
torch.randn(1, 2, device='cuda', requires_grad=False),
torch.randn(1, 2, device='cuda', requires_grad=False),
torch.randn(1, 2, device='cuda', requires_grad=True),
torch.randn(1, 2, device='cuda', requires_grad=True),
]
broadcasted_variables = Broadcast.apply((0, 1), *variables)
for output_idx, broadcasted_var in enumerate(broadcasted_variables):
input_var = variables[output_idx % len(variables)]
self.assertEqual(input_var.requires_grad, broadcasted_var.requires_grad)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_no_grad(self):
x = torch.randn(1, 2, dtype=torch.float32, requires_grad=True, device='cuda')
with torch.no_grad():
broadcasted = Broadcast.apply((0, 1), x)
self.assertTrue(x.requires_grad)
for output in broadcasted:
self.assertFalse(output.requires_grad)
def test_state_dict(self):
l = nn.Linear(5, 5)
block = nn.Module()
block.conv = nn.Conv2d(3, 3, 3, bias=False)
net = nn.Module()
net.linear1 = l
net.linear2 = l
net.bn = nn.BatchNorm2d(2)
net.block = block
net.add_module('empty', None)
state_dict = net.state_dict()
self.assertEqual(len(state_dict), 10)
self.assertEqual(len(state_dict._metadata), 6)
self.assertIn('', state_dict._metadata)
self.assertIn('linear1', state_dict._metadata)
self.assertIn('linear1.weight', state_dict)
self.assertIn('linear1.bias', state_dict)
self.assertIn('linear2', state_dict._metadata)
self.assertIn('linear2.weight', state_dict)
self.assertIn('linear2.bias', state_dict)
self.assertIn('block', state_dict._metadata)
self.assertIn('block.conv', state_dict._metadata)
self.assertIn('block.conv.weight', state_dict)
self.assertIn('block.conv.weight', state_dict)
self.assertNotIn('block.conv.bias', state_dict)
self.assertIn('bn', state_dict._metadata)
self.assertIn('bn.weight', state_dict)
self.assertIn('bn.bias', state_dict)
self.assertIn('bn.running_var', state_dict)
self.assertIn('bn.running_mean', state_dict)
self.assertIn('bn.num_batches_tracked', state_dict)
self.assertFalse(any(k.startswith('empty') for k in state_dict.keys()))
for k, v in state_dict.items():
param = net
for component in k.split('.'):
param = getattr(param, component)
if isinstance(param, Parameter):
param = param.data
self.assertEqual(v.data_ptr(), param.data_ptr())
l = nn.Linear(5, 5)
state_dict = l.state_dict()
self.assertEqual(len(state_dict), 2)
self.assertEqual(len(state_dict._metadata), 1)
self.assertIn('', state_dict._metadata)
self.assertTrue(state_dict._metadata['']['version'] >= 0)
self.assertEqual(state_dict['weight'].data_ptr(), l.weight.data_ptr())
self.assertEqual(state_dict['bias'].data_ptr(), l.bias.data_ptr())
def test_load_state_dict(self):
l = nn.Linear(5, 5)
block = nn.Module()
block.conv1 = nn.Conv2d(3, 3, 3, bias=True)
block.conv2 = nn.Conv2d(3, 3, 3, bias=False)
net = nn.Module()
net.linear1 = l
net.linear2 = l
net.bn = nn.BatchNorm2d(2)
net.block = block
net.add_module('empty', None)
conv1_bias_dtype = block.conv1.bias.dtype
state_dict = net.state_dict()
state_dict.update({
'linear1.weight': torch.ones(5, 5),
'block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'bn.running_mean': torch.randn(2),
})
ddp_state_dict = net.state_dict()
ddp_state_dict.update({
'module.linear1.weight': torch.ones(5, 5),
'module.block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'module.bn.running_mean': torch.randn(2),
})
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(ddp_state_dict, 'module.')
for sd in [state_dict, ddp_state_dict]:
incompatible_keys = net.load_state_dict(sd)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 0)
self.assertNotIn('Incompatible', str(incompatible_keys))
self.assertEqual(net.linear1.weight, sd['linear1.weight'])
self.assertEqual(net.block.conv1.bias, sd['block.conv1.bias'])
self.assertEqual(net.bn.running_mean, sd['bn.running_mean'])
state_dict = net.state_dict()
state_dict.update({'extra': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('extra', incompatible_keys.unexpected_keys)
self.assertIn('Incompatible', str(incompatible_keys))
state_dict = net.state_dict()
state_dict.update({'extra.param': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('extra.param', incompatible_keys.unexpected_keys)
state_dict = net.state_dict()
del state_dict['linear1.weight']
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 1)
self.assertEqual(len(incompatible_keys.unexpected_keys), 0)
self.assertIn('linear1.weight', incompatible_keys.missing_keys)
state_dict.update({'extra.param': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 1)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('linear1.weight', incompatible_keys.missing_keys)
self.assertIn('extra.param', incompatible_keys.unexpected_keys)
state_dict = net.state_dict()
state_dict.update({'bn.running_mean': torch.rand(14, 4)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict, strict=False))
state_dict = net.state_dict()
old_state_dict = deepcopy(state_dict)
state_dict = {
'linear1.weight': torch.ones(5, 5),
'block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'bn.running_mean': torch.randn(2),
'nonexistent_key': torch.rand(3)
}
net.load_state_dict(state_dict, strict=False)
self.assertEqual(net.linear1.weight, state_dict['linear1.weight'])
self.assertEqual(net.block.conv1.bias, state_dict['block.conv1.bias'])
self.assertEqual(net.bn.running_mean, state_dict['bn.running_mean'])
new_state_dict = net.state_dict()
del old_state_dict['linear1.weight']
del old_state_dict['block.conv1.bias']
del old_state_dict['bn.running_mean']
for k, v, in old_state_dict.items():
self.assertTrue(v.equal(new_state_dict[k]))
def test_load_state_dict_BC(self):
bn = nn.BatchNorm2d(3)
state_dict = bn.state_dict()
del state_dict['num_batches_tracked']
state_dict._metadata['']['version'] = 1
bn.load_state_dict(state_dict)
self.assertEqual(bn.num_batches_tracked.dtype, torch.long)
self.assertEqual(bn.num_batches_tracked.item(), 0)
del state_dict._metadata['']['version']
bn.load_state_dict(state_dict)
self.assertEqual(bn.num_batches_tracked.dtype, torch.long)
self.assertEqual(bn.num_batches_tracked.item(), 0)
def test_load_state_dict_ref_cycle(self):
import gc
m = torch.nn.LSTM(16, 16, bidirectional=True)
gc.collect()
m.load_state_dict(deepcopy(m).state_dict())
refcycles = gc.collect()
self.assertEqual(refcycles, 0)
def test_load_state_dict_custom(self):
class CustomState(nn.Module):
def __init__(self):
super(CustomState, self).__init__()
self.param = torch.nn.Parameter(torch.ones(1))
self.sub = torch.nn.Linear(5, 5)
def _save_to_state_dict(self, destination, prefix, keep_vars):
destination[prefix + "serialized"] = self.param.data + 1
def _load_from_state_dict(self, state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs):
# skip some of the error handling
self.param.data.copy_(state_dict[prefix + "serialized"] - 1)
# use sequential to verify nesting
m = nn.Sequential(CustomState())
with torch.no_grad():
m[0].param[0] = 10
m[0].sub.weight[0, 0] = 555
state_dict = m.state_dict()
self.assertEqual(state_dict["0.serialized"].item(), 11)
self.assertIn("0.sub.weight", state_dict)
self.assertNotIn("0.param", state_dict)
del m
mm = nn.Sequential(CustomState())
self.assertEqual(mm[0].param[0].item(), 1)
mm.load_state_dict(state_dict)
self.assertEqual(mm[0].param[0].item(), 10)
self.assertEqual(mm[0].sub.weight[0, 0].item(), 555)
def test_extra_state(self):
class SubModule(torch.nn.Module):
def __init__(self, foo):
super().__init__()
self.foo = foo
def get_extra_state(self):
return {
'foo': self.foo
}
def set_extra_state(self, state):
self.foo = state['foo']
class MyModule(torch.nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.sub = SubModule(foo)
self.bar = bar
def get_extra_state(self):
return {
'bar': self.bar
}
def set_extra_state(self, state):
self.bar = state['bar']
# Ensure state_dict contains the extra state by loading it into another module.
m = MyModule(3, 'something')
m2 = MyModule(5, 'something else')
m2.load_state_dict(m.state_dict())
self.assertEqual(m.state_dict(), m2.state_dict())
self.assertEqual(m2.bar, m.bar)
self.assertEqual(m2.sub.foo, m.sub.foo)
def test_extra_state_non_dict(self):
class MyModule(torch.nn.Module):
def __init__(self, foo):
super().__init__()
self.foo = foo
def get_extra_state(self):
return self.foo
def set_extra_state(self, state):
self.foo = state
# Test various types of extra state.
for state in ('something', 5, MyModule(3)):
m = MyModule(state)
m2 = MyModule('something else')
m2.load_state_dict(m.state_dict())
self.assertEqual(m.state_dict(), m2.state_dict())
self.assertEqual(m.foo, m2.foo)
def test_extra_state_missing_set_extra_state(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
def get_extra_state(self):
return {
'foo': 5
}
m = MyModule()
with self.assertRaisesRegex(RuntimeError, 'Unexpected key'):
m.load_state_dict(m.state_dict())
def test_extra_state_missing_get_extra_state(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
def set_extra_state(self):
pass
m = MyModule()
with self.assertRaisesRegex(RuntimeError, 'Missing key'):
m.load_state_dict(m.state_dict())
def test_parameter_assignment(self):
l = nn.Linear(5, 5)
def num_params():
return len(list(l.parameters()))
self.assertEqual(num_params(), 2)
new_param = Parameter(torch.randn(5, 5))
l.param_name = new_param
self.assertEqual(num_params(), 3)
self.assertObjectIn(new_param, l.parameters())
var = torch.randn(5, 5)
l.var_name = var
self.assertEqual(num_params(), 3)
self.assertNotIn(id(var), map(id, l.parameters()))
# Make sure Variables are not saved as parameters
l.variable_attr = torch.empty(5, 5)
self.assertEqual(num_params(), 3)
l.param_attr = Parameter(torch.empty(5, 5))
self.assertEqual(num_params(), 4)
# It shouldn't be possible to replace a parameter with a Variable
def assign_var():
l.param_attr = torch.empty(5, 5)
self.assertRaises(TypeError, assign_var)
l.param_attr = None
self.assertEqual(num_params(), 3)
def test_assignment(self):
l = nn.Module()
a = nn.Parameter(torch.randn(2))
b = nn.Parameter(torch.randn(3))
c = nn.Parameter(torch.randn(4))
q = nn.Linear(4, 4)
r = nn.Linear(5, 5)
w = nn.Linear(6, 6)
def test_assignments(get_list, a, b, c):
l.a = None
self.assertIsNone(l.a)
self.assertIn('a', l.__dict__)
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [a])
self.assertNotIn('a', l.__dict__)
l.b = None
self.assertIsNone(l.b)
self.assertIn('b', l.__dict__)
l.b = b
self.assertIs(l.b, b)
self.assertEqual(get_list(), [a, b])
self.assertNotIn('b', l.__dict__)
l.a = None
self.assertIsNone(l.a)
self.assertEqual(get_list(), [b])
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [a, b])
l.a = c
self.assertIs(l.a, c)
self.assertEqual(get_list(), [c, b])
del l.a
self.assertFalse(hasattr(l, 'a'))
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [b, a])
test_assignments(lambda: list(l.parameters()), a, b, c)
del l.a, l.b
self.assertEqual(list(l.parameters()), [])
test_assignments(lambda: list(l.children()), q, r, w)
del l.a, l.b
self.assertEqual(list(l.children()), [])
buf = torch.randn(10)
l.register_buffer('buf', buf)
self.assertIs(l.buf, buf)
l.buf = None
self.assertIs(l.buf, None)
self.assertNotIn('buf', l.__dict__)
l.buf = buf
self.assertIn('buf', l.state_dict())
self.assertEqual(l.state_dict()['buf'], buf)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_thnn_conv_strided_padded_dilated(self):
for convfn, dims, transposed in (
(torch.nn.functional.conv2d, 2, False),
(torch.nn.functional.conv_transpose2d, 2, True),
(torch.nn.functional.conv3d, 3, False),
(torch.nn.functional.conv_transpose3d, 3, True)):
for stride, padding, dilation in (
(2, 0, 1), (1, 1, 1), (2, 1, 1), (1, 0, 2)):
kwargs = {"stride": stride, "padding": padding, "dilation": dilation}
inp_shape = (1, 2) + dims * (4,)
weight_shape = (2, 2) + dims * (1,)
inputs = torch.randn(inp_shape, dtype=torch.double, device="cuda", requires_grad=True)
weight = torch.randn(weight_shape, dtype=torch.double, device="cuda", requires_grad=True)
bias = torch.randn(2, dtype=torch.double, device="cuda", requires_grad=True)
with torch.backends.cudnn.flags(enabled=False):
res = convfn(inputs, weight, bias, **kwargs)
res_cpu = convfn(inputs.cpu(), weight.cpu(), bias.cpu(), **kwargs)
self.assertEqual(res, res_cpu)
with torch.backends.cudnn.flags(enabled=False):
torch.autograd.gradcheck(
lambda x, w, b: convfn(x, w, b, **kwargs),
(inputs, weight, bias)
)
torch.autograd.gradcheck(
lambda x, w, b: convfn(x, w, b, **kwargs),
(inputs.cpu(), weight.cpu(), bias.cpu())
)
def test_Conv2d_inconsistent_types(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float)
weights = torch.randn(1, 1, 3, 3, dtype=torch.double)
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
nn.functional.conv2d(inputs.float(), weights.float())
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_Conv2d_inconsistent_types_on_GPU_without_cudnn(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device="cuda")
weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device="cuda")
bias = torch.randn(1, dtype=torch.double, device="cuda")
with torch.backends.cudnn.flags(enabled=False):
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
def test_Conv2d_1x1(self):
in_channels = 2
out_channels = 2
mod = torch.nn.Conv2d(2, 2, 1, bias=False).to(dtype=torch.double)
input = torch.randn(1, in_channels, 5, 5, requires_grad=True, dtype=torch.double)
for enabled in (False, True):
with torch.backends.mkldnn.flags(enabled=enabled):
gradcheck(F.conv2d, (input, mod.weight))
def test_Conv2d_OneDNN(self):
def run_once(group_val=24, dilation=1):
ifm = torch.ones([1, group_val, 6, 6], dtype=torch.float32)
weights = torch.ones([group_val, 1, 3, 3], dtype=torch.float32)
op = torch.nn.Conv2d(
in_channels=group_val,
out_channels=group_val,
kernel_size=[3, 3],
stride=[2, 2],
padding=[1, 1],
dilation=[dilation, dilation],
groups=group_val,
bias=False,
padding_mode='zeros'
)
op.weight.data = weights
res = op(ifm)
grad_in = torch.ones(res.shape, dtype=torch.float32)
res.backward(grad_in)
return op.weight.grad
for gorup_val in (24, 48, 23, 25):
for dilation in (1, 2):
with torch.backends.mkldnn.flags(enabled=False):
without_onednn = run_once(gorup_val, dilation)
with torch.backends.mkldnn.flags(enabled=True):
with_onednn = run_once(gorup_val, dilation)
self.assertEqual(without_onednn, with_onednn)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_cudnn_non_contiguous(self):
x = torch.randn(192, 16, 50).cuda()
x = x.permute(0, 2, 1).contiguous().permute(0, 2, 1)
m = torch.nn.Conv1d(
in_channels=16,
out_channels=32,
kernel_size=2,
bias=True).cuda()
result = m(x)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_Conv2d_inconsistent_types_on_GPU_with_cudnn(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device="cuda")
weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device="cuda")
bias = torch.randn(1, dtype=torch.double, device="cuda")
with torch.backends.cudnn.flags(enabled=True):
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
def test_Conv2d_missing_argument(self):
c = nn.Conv2d(3, 3, 3)
self.assertRaises(TypeError, lambda: c(None))
def test_Conv2d_backward_twice(self):
input = torch.randn(2, 3, 5, 5)
c = nn.Conv2d(3, 3, 3)
o1 = c(input)
o1.sum().backward()
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: o1.sum().backward())
def test_conv_modules_raise_error_on_incorrect_input_size(self):
for dtype in [torch.bfloat16, torch.double, torch.float]:
modules = [nn.Conv1d(3, 8, 3).to(dtype), nn.ConvTranspose1d(3, 8, 3).to(dtype),
nn.Conv2d(3, 8, 3).to(dtype), nn.ConvTranspose2d(3, 8, 3).to(dtype),
nn.Conv3d(3, 8, 3).to(dtype), nn.ConvTranspose3d(3, 8, 3).to(dtype)]
invalid_input_dims = [(1, 4), (1, 4),
(2, 5), (2, 5),
(3, 6), (3, 6)]
for invalid_dims, module in zip(invalid_input_dims, modules):
for dims in invalid_dims:
input = torch.empty(torch.Size((3, ) * dims))
self.assertRaises(RuntimeError, lambda: module(input))
def test_conv_shapecheck(self):
def test(should_raise, module, input_size, dtype):
input = torch.empty(3, *input_size).to(dtype)
if should_raise:
self.assertRaises(RuntimeError, lambda: module(input))
else:
module(input)
for dtype in [torch.bfloat16, torch.float, torch.double]:
test(True, nn.Conv1d(1, 1, 3).to(dtype), (1, 2), dtype)
test(True, nn.Conv1d(1, 1, 3, stride=2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 2, stride=2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 3, stride=2, padding=1).to(dtype), (1, 2), dtype)
test(True, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 2, 2), dtype)
test(False, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 3, 3), dtype)
test(False, nn.Conv2d(1, 1, (3, 3), padding=1).to(dtype), (1, 2, 2), dtype)
test(True, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 2, 2, 2), dtype)
test(False, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 3, 3, 3), dtype)
test(False, nn.Conv3d(1, 1, (3, 3, 3), padding=1).to(dtype), (1, 2, 2, 2), dtype)
def test_ConvTranspose2d_output_size(self):
m = nn.ConvTranspose2d(3, 4, 3, 3, 0, 2)
i = torch.randn(2, 3, 6, 6)
for h in range(15, 22):
for w in range(15, 22):
if 18 <= h <= 20 and 18 <= w <= 20:
output = m(i, output_size=(h, w))
self.assertEqual(output.size()[2:], (h, w))
else:
self.assertRaises(ValueError, lambda: m(i, (h, w)))
def test_ConvTranspose2d_output_size_downsample_upsample(self):
b, c, hid_c = 2, 3, 2
for h in range(13, 24):
for w in range(13, 17):
for k in range(2, 5):
for d in range(1, 5):
for s in range(1, 4):
for p in range(3):
conv = nn.Conv2d(
in_channels=c,
out_channels=hid_c,
kernel_size=k,
stride=s,
padding=p,
dilation=d,
)
t_conv = nn.ConvTranspose2d(
in_channels=hid_c,
out_channels=c,
kernel_size=k,
stride=s,
padding=p,
dilation=d,
)
i = torch.randn(b, c, h, w)
out = t_conv(conv(i), output_size=i.shape)
self.assertEqual(out.size()[2:], i.size()[2:])
def test_ConvTranspose3d_correct_output_size(self):
m = nn.ConvTranspose3d(2, 2, 2)
i = torch.rand(1, 2, 1, 1, 1)
out = m(i, output_size=(1, 2, 2, 2, 2))
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_ConvTranspose2d_half_cublas_gemm(self):
with torch.backends.cudnn.flags(enabled=False):
inputs = torch.randn(1, 1, 16, 16, device='cuda', dtype=torch.half)
deconv = nn.ConvTranspose2d(
1, 1, 3, stride=2, padding=1, output_padding=1).cuda().half()
output = deconv(inputs)
output.mean().backward()
@skipIfRocm
def test_Conv2d_groups_nobias(self):
dev_dtypes = [("cpu", torch.float)]
if TEST_CUDA:
dev_dtypes += [("cuda", torch.float), ("cuda", torch.half)]
if AMPERE_OR_ROCM:
dev_dtypes += [("cuda", torch.bfloat16)]
for device, dtype in dev_dtypes:
m = nn.Conv2d(4, 4, kernel_size=3, groups=2, bias=False).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype], rtol=0)
ps_nobias_v2(self):
torch.manual_seed(123)
dev_dtypes = [("cpu", torch.float)]
if TEST_CUDA:
dev_dtypes += [("cuda", torch.float), ("cuda", torch.half)]
if AMPERE_OR_ROCM:
dev_dtypes += [("cuda", torch.bfloat16)]
for device, dtype in dev_dtypes:
m = nn.Conv2d(4, 16, kernel_size=3, groups=2, bias=False).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)
m2.weight.data.copy_(m.weight.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype], rtol=0)
def test_Conv3d_groups_nobias(self):
torch.manual_seed(123)
m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=False).to("cpu", torch.float)
i = torch.randn(2, 4, 6, 6, 6, device="cpu", dtype=torch.float, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, 4, device="cpu", dtype=torch.float)
output.backward(grad_output)
m1 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to("cpu", torch.float)
m1.weight.data.copy_(m.weight.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to("cpu", torch.float)
m2.weight.data.copy_(m.weight.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[torch.float], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float], rtol=dtype2prec_DONTUSE[torch.float])
def test_Conv3d_groups_wbias(self):
torch.manual_seed(123)
m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=True).to("cpu", torch.float)
i = torch.randn(2, 4, 6, 6, 6, device="cpu", dtype=torch.float, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, 4, device="cpu", dtype=torch.float)
output.backward(grad_output)
m1 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to("cpu", torch.float)
m1.weight.data.copy_(m.weight.data[:8])
m1.bias.data.copy_(m.bias.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to("cpu", torch.float)
m2.weight.data.copy_(m.weight.data[8:])
m2.bias.data.copy_(m.bias.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float])
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float])
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float], rtol=dtype2prec_DONTUSE[torch.float])
def test_MaxUnpool2d_output_size(self):
m = nn.MaxPool2d(3, stride=2, return_indices=True)
mu = nn.MaxUnpool2d(3, stride=2)
big_t = torch.rand(1, 1, 6, 6)
big_t[0][0][4][4] = 100
output_big, indices_big = m(big_t)
self.assertRaises(RuntimeError, lambda: mu(output_big, indices_big))
small_t = torch.rand(1, 1, 5, 5)
for i in range(0, 4, 2):
for j in range(0, 4, 2):
small_t[:, :, i, j] = 100
output_small, indices_small = m(small_t)
for h in range(3, 10):
for w in range(3, 10):
if 4 <= h <= 6 and 4 <= w <= 6:
size = (h, w)
if h == 6:
size = (1, 1) + size
mu(output_small, indices_small, output_size=size)
else:
self.assertRaises(ValueError, lambda: mu(output_small, indices_small, (h, w)))
def test_max_unpool2d_nhwc_cpu(self):
input = torch.randn(2, 10, 9, 9).float().cpu()
input = input.contiguous(memory_format=torch.channels_last)
ref_input = input.clone().contiguous()
pool = nn.MaxPool2d(3, stride=2, return_indices=True).cpu()
ref_pool = nn.MaxPool2d(3, stride=2, return_indices=True).cpu()
out, ind = pool(input)
ref_out, ref_ind = ref_pool(ref_input)
out.requires_grad_()
ref_out.requires_grad_()
unpool = nn.MaxUnpool2d(3, stride=2).cpu()
ref_unpool = nn.MaxUnpool2d(3, stride=2).cpu()
upout = unpool(out, ind)
ref_upout = ref_unpool(ref_out, ref_ind)
grad = torch.randn(upout.size()).float().cpu()
grad = grad.contiguous(memory_format=torch.channels_last)
ref_grad = grad.clone().contiguous()
upout.backward(grad)
ref_upout.backward(ref_grad)
self.assertTrue(upout.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_upout.is_contiguous())
self.assertTrue(torch.allclose(upout, ref_upout))
self.assertTrue(torch.allclose(out.grad, ref_out.grad))
def test_container_copy(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(4, 5)
def forward(self, input):
return self.linear(input)
input = torch.randn(2, 4)
model = Model()
model_cp = deepcopy(model)
self.assertEqual(model(input).data, model_cp(input).data)
model_cp.linear.weight.data[:] = 2
self.assertNotEqual(model(input).data, model_cp(input).data)
def test_RNN_cell(self):
for module in (nn.RNNCell, nn.GRUCell):
for bias in (True, False):
input = torch.randn(3, 10)
hx = torch.randn(3, 20)
cell = module(10, 20, bias=bias)
for _ in range(6):
hx = cell(input, hx)
hx.sum().backward()
def test_RNN_cell_forward_input_size(self):
input = torch.randn(3, 11)
hx = torch.randn(3, 20)
for module in (nn.RNNCell, nn.GRUCell):
cell = module(10, 20)
self.assertRaises(Exception, lambda: cell(input, hx))
def test_RNN_cell_forward_hidden_size(self):
input = torch.randn(3, 10)
hx = torch.randn(3, 21)
cell_shared_param = (10, 20)
for cell in (nn.RNNCell(*cell_shared_param, nonlinearity="relu"),
nn.RNNCell(*cell_shared_param, nonlinearity="tanh"),
nn.GRUCell(*cell_shared_param)):
self.assertRaises(Exception, lambda: cell(input, hx))
def test_RNN_cell_forward_zero_hidden_size(self):
input = torch.randn(3, 10)
hx = torch.randn(3, 0)
cell_shared_param = (10, 0)
for cell in (nn.RNNCell(*cell_shared_param, nonlinearity="relu"),
nn.RNNCell(*cell_shared_param, nonlinearity="tanh"),
nn.GRUCell(*cell_shared_param)):
self.assertEqual(cell(input, hx).shape, torch.Size([3, 0]))
def _test_loss_equal_input_target_shape(self, cast):
losses = {
'mse_loss': lambda x, y: F.mse_loss(x, y),
'l1_loss': lambda x, y: F.l1_loss(x, y),
'smooth_l1_loss': lambda x, y: F.smooth_l1_loss(x, y),
'huber_loss': lambda x, y: F.huber_loss(x, y),
'kl_div': lambda x, y: F.kl_div(x, y),
'poisson_nll_loss': lambda x, y: F.poisson_nll_loss(x, y),
}
input = cast(torch.randn(3, 5))
target = cast(torch.randn(5, 3))
for _name, fn in losses.items():
self.assertRaises(Exception, lambda: fn(input, target))
def test_loss_equal_input_target_shape(self):
self._test_loss_equal_input_target_shape(lambda x: x)
def test_mse_loss_size_warning(self):
i = torch.randn((10, 1), requires_grad=True)
t = torch.randn((10,))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
F.mse_loss(i, t)
self.assertEqual(len(w), 1)
self.assertIn('Please ensure they have the same size.', str(w[0]))
def test_poisson_nll_loss_reduction_modes(self):
input = torch.tensor([0.5, 1.5, 2.5])
target = torch.tensor([1., 2., 3.])
component_wise_loss = torch.exp(input) - target * input
self.assertEqual(component_wise_loss,
F.poisson_nll_loss(input, target, reduction='none'))
self.assertEqual(torch.sum(component_wise_loss),
F.poisson_nll_loss(input, target, reduction='sum'))
self.assertEqual(torch.mean(component_wise_loss),
F.poisson_nll_loss(input, target, reduction='mean'))
with self.assertRaisesRegex(ValueError, 'is not valid'):
F.poisson_nll_loss(input, target, reduction='total')
def test_gaussian_nll_loss_reduction_modes(self):
input = torch.tensor([[0.5, 1.5, 2.5], [2., 4., 6.]])
target = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
var = torch.tensor([[0.5, 1., 1.5], [1., 1.5, 2.]])
component_wise_loss = 0.5 * (torch.log(var) + (input - target)**2 / var)
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target, var, reduction='none'))
self.assertEqual(torch.sum(component_wise_loss),
F.gaussian_nll_loss(input, target, var, reduction='sum'))
self.assertEqual(torch.mean(component_wise_loss),
F.gaussian_nll_loss(input, target, var, reduction='mean'))
with self.assertRaisesRegex(ValueError, 'is not valid'):
F.gaussian_nll_loss(input, target, var, reduction='total')
def test_gaussian_nll_loss_broadcasting(self):
input = torch.tensor([[0.5, 1.5, 2.5], [2., 4., 6.]])
target_full = torch.tensor([[1., 2., 3.], [1., 2., 3.]])
target_part = torch.tensor([[1., 2., 3.]])
var_full = torch.tensor([[0.5, 0.5, 0.5], [1.5, 1.5, 1.5]])
var_part1 = torch.tensor([[0.5], [1.5]])
var_part2 = torch.tensor([0.5, 1.5])
component_wise_loss = 0.5 * (torch.log(var_full) + (input - target_full)**2 / var_full)
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_full, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_full, var_part1, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_full, var_part2, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_part1, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_part2, reduction='none'))
def test_gaussian_nll_loss_args(self):
input = torch.randn(3, 5)
with self.assertRaisesRegex(ValueError, 'var is of incorrect size'):
target = torch.randn(3, 5)
var = torch.ones(3, 3)
torch.nn.functional.gaussian_nll_loss(input, target, var)
with self.assertRaisesRegex(ValueError, 'var has negative entry/entries'):
var = -1 * torch.ones(3, 5)
torch.nn.functional.gaussian_nll_loss(input, target, var)
def test_KLDivLoss_batch_mean(self):
input_shape = (2, 5)
log_prob1 = F.log_softmax(torch.randn(input_shape), 1)
prob2 = F.softmax(torch.randn(input_shape), 1)
loss = nn.KLDivLoss(reduction='batchmean')
l = loss(log_prob1, prob2)
loss_none_reduce = nn.KLDivLoss(reduction='sum')(log_prob1, prob2)
expected = loss_none_reduce / input_shape[0]
self.assertEqual(l, expected)
def test_KLDivLoss_batch_mean_log_target(self):
input_shape = (2, 5)
log_prob1 = F.log_softmax(torch.randn(input_shape), 1)
log_prob2 = F.log_softmax(torch.randn(input_shape), 1)
loss = nn.KLDivLoss(reduction='batchmean', log_target=True)
l = loss(log_prob1, log_prob2)
loss_none_reduce = nn.KLDivLoss(reduction='sum', log_target=True)(log_prob1, log_prob2)
expected = loss_none_reduce / input_shape[0]
self.assertEqual(l, expected)
def test_CTCLoss_typechecks(self):
target_lengths = torch.tensor([30, 25, 20])
input_lengths = torch.tensor([50, 50, 50])
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2)
with self.assertRaises(RuntimeError):
_input_lengths = input_lengths.to(dtype=torch.float)
torch.nn.functional.ctc_loss(log_probs, targets, _input_lengths, target_lengths)
with self.assertRaises(RuntimeError):
target_lengths = target_lengths.to(dtype=torch.float)
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_lengthchecks_cuda(self):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (3, 29), dtype=torch.long, device='cuda')
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2)
with self.assertRaises(RuntimeError):
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
def test_CTCLoss_lengthchecks_cpu(self):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (3, 29), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2)
with self.assertRaises(RuntimeError):
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_long_targets(self):
input_length = 4000
vocab_size = 3
batch_size = 4
target_length = 1200
log_probs = torch.randn(input_length, batch_size, vocab_size).log_softmax(2).requires_grad_()
targets = torch.randint(low=1, high=vocab_size - 1, size=(batch_size, target_length), dtype=torch.long)
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
res_cpu = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
grad_out = torch.randn_like(res_cpu)
grad_cpu, = torch.autograd.grad(res_cpu, log_probs, grad_out)
with torch.backends.cudnn.flags(enabled=False):
res_gpu = torch.nn.functional.ctc_loss(log_probs.cuda(), targets.cuda(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
grad_gpu, = torch.autograd.grad(res_gpu, log_probs, grad_out.cuda())
self.assertEqual(res_cpu, res_gpu, atol=1e-4, rtol=0)
self.assertEqual(grad_cpu, grad_gpu, atol=1e-4, rtol=0)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_critical_target_len(self):
N = 1
S = 256
C = 10
T = 500
target = torch.randint(low=1, high=C, size=(S,), dtype=torch.int)
input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.int)
target_lengths = torch.tensor(S, dtype=torch.int)
inp = torch.randn(T, N, C, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_()
with cudnn.flags(enabled=True):
res_gpu = torch.nn.functional.ctc_loss(inp, target, input_lengths, target_lengths, reduction='none')
res_cpu = torch.nn.functional.ctc_loss(inp.cpu(), target, input_lengths, target_lengths, reduction='none')
self.assertEqual(res_cpu, res_gpu, atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_zero_infinity(self):
target_lengths = [60, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int, device='cuda')
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_()
res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
with torch.backends.cudnn.flags(enabled=False):
res2 = torch.nn.functional.ctc_loss(log_probs, targets.cuda().long(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
res_cpu = torch.nn.functional.ctc_loss(log_probs.cpu(), targets.cpu(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
self.assertEqual(res2, res, atol=1e-4, rtol=0)
self.assertEqual(res_cpu, res.cpu(), atol=1e-4, rtol=0)
g1, = torch.autograd.grad(res, log_probs)
g2, = torch.autograd.grad(res2, log_probs)
g3, = torch.autograd.grad(res_cpu, log_probs)
self.assertEqual(g2, g3, atol=1e-4, rtol=0)
self.assertEqual(g1, g2, atol=1e-4, rtol=0)
self.assertTrue((g1 == g1).all().item())
def test_RNN_cell_no_broadcasting(self):
def test(cell_module, input, hx, input_size, hidden_size):
cell = cell_module(input_size, hidden_size)
self.assertRaises(RuntimeError, lambda: cell(input, hx))
def test_all(hidden_size, bad_hx, good_hx, input_size, input):
test(nn.RNNCell, input, bad_hx, input_size, hidden_size)
test(nn.GRUCell, input, bad_hx, input_size, hidden_size)
test(nn.LSTMCell, input, (bad_hx, good_hx), input_size, hidden_size)
test(nn.LSTMCell, input, (good_hx, bad_hx), input_size, hidden_size)
hidden_size = 20
input_size = 10
input = torch.randn(3, input_size)
bad_hx = torch.randn(1, hidden_size)
good_hx = torch.randn(3, hidden_size)
# Test hidden/input batch size broadcasting
test_all(hidden_size, bad_hx, good_hx, input_size, input)
# Test hx's hidden_size vs module's hidden_size broadcasting
bad_hx = torch.randn(3, 1)
test_all(hidden_size, bad_hx, good_hx, input_size, input)
# Test input's input_size vs module's input_size broadcasting
bad_input = torch.randn(3, 1)
test_all(hidden_size, good_hx, good_hx, input_size, bad_input)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_native_dropout_corner_case(self):
for train in [True, False]:
for p in [0.0, 1.0]:
for device in ["cuda", "cpu"]:
x = torch.randn(5).to(device=device).requires_grad_()
x_ref = x.detach().requires_grad_()
o = torch.native_dropout(x, p, train)[0]
o_ref = torch.dropout(x_ref, p, train)
o.sum().backward()
o_ref.sum().backward()
assert(o.equal(o_ref))
assert(x.grad.equal(x_ref.grad))
def test_invalid_dropout_p(self):
v = torch.ones(1)
self.assertRaises(ValueError, lambda: nn.Dropout(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(1.1))
self.assertRaises(ValueError, lambda: F.dropout(v, -0.1))
self.assertRaises(ValueError, lambda: F.dropout(v, 1.1))
def test_pad_sequence(self):
def pad(tensor, length):
return torch.cat(
[tensor.data, tensor.data.new(
length - tensor.size(0), *tensor.size()[1:]).zero_()])
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
# batch_first = true
expected = torch.tensor([[4, 5, 0], [1, 2, 3], [6, 0, 0]])
padded = rnn_utils.pad_sequence([b, a, c], True)
self.assertEqual(padded, expected)
# batch_first = false
padded = rnn_utils.pad_sequence([b, a, c])
self.assertEqual(padded, expected.transpose(0, 1))
# pad with non-zero value
expected = torch.tensor([[4, 5, 1], [1, 2, 3], [6, 1, 1]])
padded = rnn_utils.pad_sequence([b, a, c], True, 1)
self.assertEqual(padded, expected)
# Test pad sorted sequence
expected = torch.tensor([[1, 2, 3], [4, 5, 0], [6, 0, 0]])
padded = rnn_utils.pad_sequence([a, b, c], True)
self.assertEqual(padded, expected)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
trailing_dims = [4] * num_dim
for i in range(1, maxlen + 1):
seq_len = i * i
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
random.shuffle(sequences)
expected = []
for seq in sequences:
expected.append(pad(seq, maxlen * maxlen))
# batch first = true
expected = torch.stack(expected)
padded = rnn_utils.pad_sequence(sequences, True)
self.assertEqual(padded, expected)
# batch first = false
padded = rnn_utils.pad_sequence(sequences)
self.assertEqual(padded, expected.transpose(0, 1))
def test_unpad_sequence(self):
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
sequences = [a, b, c]
lengths = torch.as_tensor([v.size(0) for v in sequences])
for batch_first in [True, False]:
padded_sequences = rnn_utils.pad_sequence(sequences, batch_first=batch_first)
unpadded_sequences = rnn_utils.unpad_sequence(padded_sequences, lengths, batch_first=batch_first)
self.assertEqual(sequences, unpadded_sequences)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
trailing_dims = [4] * num_dim
for i in range(1, maxlen + 1):
seq_len = i * i
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
random.shuffle(sequences)
lengths = torch.as_tensor([v.size(0) for v in sequences])
padded_sequences = rnn_utils.pad_sequence(sequences, batch_first=batch_first)
unpadded_sequences = rnn_utils.unpad_sequence(padded_sequences, lengths, batch_first=batch_first)
self.assertEqual(sequences, unpadded_sequences)
def test_pack_sequence(self):
def _compatibility_test(sequences, lengths, batch_first, enforce_sorted=False):
padded = rnn_utils.pad_sequence(sequences, batch_first)
packed = rnn_utils.pack_sequence(sequences, enforce_sorted)
unpacked = rnn_utils.pad_packed_sequence(packed, batch_first)
self.assertEqual(padded, unpacked[0])
pack_padded = rnn_utils.pack_padded_sequence(
padded, lengths, batch_first, enforce_sorted)
self.assertEqual(packed, pack_padded)
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
packed = rnn_utils.pack_sequence([a, b, c], enforce_sorted=False)
expected = torch.tensor([1, 4, 6, 2, 5, 3])
self.assertEqual(packed.batch_sizes, [3, 2, 1])
self.assertEqual(packed.data.data, expected)
self.assertEqual(packed.sorted_indices, [0, 1, 2])
self.assertEqual(packed.unsorted_indices, [0, 1, 2])
packed_unsorted = rnn_utils.pack_sequence([b, c, a], enforce_sorted=False)
self.assertEqual(packed_unsorted.batch_sizes, [3, 2, 1])
self.assertEqual(packed_unsorted.data.data, expected)
self.assertEqual(packed_unsorted.sorted_indices, [2, 0, 1])
self.assertEqual(packed_unsorted.unsorted_indices, [1, 2, 0])
# single dimensional, enforce_sorted = True
packed_enforce_sorted = rnn_utils.pack_sequence([a, b, c], enforce_sorted=True)
self.assertEqual(packed_enforce_sorted.batch_sizes, [3, 2, 1])
self.assertEqual(packed_enforce_sorted.data.data, expected)
self.assertTrue(packed_enforce_sorted.sorted_indices is None)
self.assertTrue(packed_enforce_sorted.unsorted_indices is None)
with self.assertRaisesRegex(RuntimeError, 'must be sorted in decreasing order'):
rnn_utils.pack_sequence([b, c, a], enforce_sorted=True)
with self.assertRaisesRegex(RuntimeError, 'You can pass `enforce_sorted=False`'):
rnn_utils.pack_sequence([b, c, a], enforce_sorted=True)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
lengths = []
trailing_dims = [4] * num_dim
for i in range(maxlen, 0, -1):
seq_len = i * i
lengths.append(seq_len)
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
unsorted_sequences = [s.clone() for s in sequences]
random.shuffle(unsorted_sequences)
unsorted_sequences_lengths = [t.size(0) for t in unsorted_sequences]
# compatibility with other utilities
for batch_first in (True, False):
for enforce_sorted in (True, False):
_compatibility_test(sequences, lengths, batch_first, enforce_sorted)
_compatibility_test(unsorted_sequences, unsorted_sequences_lengths,
batch_first)
def test_unpack_sequence(self):
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
sequences = [a, b, c]
packed_sequences = rnn_utils.pack_sequence(sequences, enforce_sorted=False)
unpacked_sequences = rnn_utils.unpack_sequence(packed_sequences)
self.assertEqual(sequences, unpacked_sequences)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
trailing_dims = [4] * num_dim
for i in range(1, maxlen + 1):
seq_len = i * i
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
random.shuffle(sequences)
packed_sequences = rnn_utils.pack_sequence(sequences, enforce_sorted=False)
unpacked_sequences = rnn_utils.unpack_sequence(packed_sequences)
self.assertEqual(sequences, unpacked_sequences)
def test_pack_padded_sequence(self):
def generate_test_case(sorted_lengths, should_shuffle):
def pad(tensor, length):
return torch.cat([tensor, tensor.new(length - tensor.size(0), *tensor.size()[1:]).zero_()])
max_length = sorted_lengths[0]
batch_sizes = [sum(map(bool, filter(lambda x: x >= i, sorted_lengths)))
for i in range(1, max_length + 1)]
offset = 0
padded = torch.cat([pad(i * 100 + torch.arange(1., 5 * l + 1).view(l, 1, 5), max_length)
for i, l in enumerate(sorted_lengths, 1)], 1)
expected_data = [[torch.arange(1., 6) + (i + 1) * 100 + 5 * n for i in range(batch_size)]
for n, batch_size in enumerate(batch_sizes)]
expected_data = list(itertools.chain.from_iterable(expected_data))
expected_data = torch.stack(expected_data, dim=0)
if should_shuffle:
# Shuffle the padded sequence to create an unsorted sequence
permutation = list(range(len(sorted_lengths)))
random.shuffle(permutation)
unsorted_indices = torch.tensor(permutation)
padded = padded.index_select(1, unsorted_indices)
lengths = torch.tensor(sorted_lengths).index_select(0, unsorted_indices)
else:
unsorted_indices = None
lengths = sorted_lengths
return padded.requires_grad_(), lengths, expected_data, batch_sizes, unsorted_indices
test_cases = [
# sorted_lengths, should_shuffle
[[10, 8, 4, 2, 2, 2, 1], False],
[[11, 10, 8, 6, 4, 3, 1], False],
[[11, 10, 8, 6, 4, 3, 1], True],
]
for test_case, batch_first in itertools.product(test_cases, (True, False)):
sorted_lengths, should_shuffle = test_case
padded, lengths, expected_data, batch_sizes, unsorted_indices = generate_test_case(
sorted_lengths, should_shuffle)
src = padded
if batch_first:
src = src.transpose(0, 1)
# check output
packed = rnn_utils.pack_padded_sequence(src, lengths, batch_first=batch_first,
enforce_sorted=not should_shuffle)
self.assertEqual(packed.data.data, expected_data)
self.assertEqual(packed.batch_sizes, batch_sizes)
self.assertEqual(packed.unsorted_indices, unsorted_indices)
# test inverse
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)
self.assertEqual(unpacked, src)
self.assertEqual(unpacked_len, lengths)
# check grad
if padded.grad is not None:
padded.grad.data.zero_()
grad_output = unpacked.data.clone().normal_()
unpacked.backward(grad_output)
if batch_first:
grad_output.transpose_(0, 1)
for i, l in enumerate(lengths):
self.assertEqual(padded.grad.data[:l, i], grad_output[:l, i])
if l < 10:
self.assertEqual(padded.grad.data[l:, i].abs().sum(), 0)
# test error messages
with self.assertRaisesRegex(RuntimeError, 'You can pass `enforce_sorted=False`'):
packed = rnn_utils.pack_padded_sequence(torch.randn(3, 3), [1, 3, 2])
with self.assertRaisesRegex(RuntimeError, 'empty tensor'):
packed = rnn_utils.pack_padded_sequence(torch.randn(0, 0), [])
def test_LSTM_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
for bias in (True, False):
input = torch.randn(3, 10)
hx = torch.randn(3, 20)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20, bias=bias)
for _ in range(6):
hx, cx = lstm(input, (hx, cx))
(hx + cx).sum().backward()
def test_LSTM_cell_forward_input_size(self):
input = torch.randn(3, 11)
hx = torch.randn(3, 20)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20)
self.assertRaises(Exception, lambda: lstm(input, (hx, cx)))
def test_LSTM_cell_forward_hidden_size(self):
input = torch.randn(3, 10)
hx = torch.randn(3, 21)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20)
self.assertRaises(Exception, lambda: lstm(input, (hx, cx)))
self.assertRaises(Exception, lambda: lstm(input, (cx, hx)))
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_pack_sequence_batch_sizes_throw(self):
with self.assertRaisesRegex(ValueError, r"batch_sizes should always be on CPU"):
m = nn.LSTM(3, 4, bidirectional=True, num_layers=2).to('cuda')
a = torch.rand(5, 3, device='cuda')
b = torch.tensor([1, 1, 1, 1, 1], device='cuda')
input = nn.utils.rnn.PackedSequence(a, b)
def test_Transformer_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
d_model = 512
nhead = 16
num_encoder_layers = 4
num_decoder_layers = 3
dim_feedforward = 256
dropout = 0.3
bsz = 8
seq_length = 35
tgt_length = 15
for batch_first, src_size, tgt_size in zip((True, False),
[(bsz, seq_length, d_model),
(seq_length, bsz, d_model)],
[(bsz, tgt_length, d_model),
(tgt_length, bsz, d_model)]):
transformer = nn.Transformer(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, batch_first=batch_first)
src = torch.randn(src_size)
src_mask = transformer.generate_square_subsequent_mask(seq_length).double()
tgt = torch.randn(tgt_size)
tgt_mask = transformer.generate_square_subsequent_mask(tgt_length).double()
memory_mask = torch.randn(tgt_length, seq_length).double()
src_key_padding_mask = torch.rand(bsz, seq_length) >= 0.5
tgt_key_padding_mask = torch.rand(bsz, tgt_length) >= 0.5
memory_key_padding_mask = torch.rand(bsz, seq_length) >= 0.5
output = transformer(src, tgt,
src_mask=src_mask,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
output.sum().backward()
def test_transformerencoderlayer(self):
# this is a deterministic test for TransformerEncoderLayer
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
for batch_first in (False, True):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
batch_first=batch_first)
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
encoder_input = torch.tensor([[[20., 30., 40., 50.]]])
result = model(encoder_input)
ref_output = torch.tensor([[[2.258703, 0.127985, -0.697881, 0.170862]]])
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# 0 values are NOT masked. This shouldn't mask anything.
mask = torch.tensor([[0]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
mask = torch.tensor([[1]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertTrue(np.isnan(result).all())
encoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.272644, 0.119035, -0.691669, 0.153486]],
[[2.272644, 0.119035, -0.691669, 0.153486]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
mask = torch.tensor([[0, 0]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
mask = torch.tensor([[1, 0]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.301516, 0.092249, -0.679101, 0.103088]],
[[2.301516, 0.092249, -0.679101, 0.103088]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],
[2.427987, 0.021213, -0.602496, -0.084103]],
[[2.424689, 0.019155, -0.604793, -0.085672],
[2.413863, 0.022211, -0.612486, -0.072490]],
[[2.433774, 0.021598, -0.598343, -0.087548],
[2.425104, 0.019748, -0.604515, -0.084839]],
[[2.436185, 0.022682, -0.596625, -0.087261],
[2.433556, 0.021891, -0.598509, -0.086832]],
[[2.416246, 0.017512, -0.610712, -0.082961],
[2.422901, 0.024187, -0.606178, -0.074929]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
mask = torch.zeros([2, 5]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
mask[0, 1] = 1
mask[1, 3] = 1
mask[1, 4] = 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],
[2.428811, 0.021445, -0.601912, -0.084252]],
[[2.425009, 0.019155, -0.604566, -0.085899],
[2.415408, 0.02249 , -0.611415, -0.073]],
[[2.434199, 0.021682, -0.598039, -0.087699],
[2.42598, 0.019941, -0.603896, -0.085091]],
[[2.436457, 0.022736, -0.59643 , -0.08736],
[2.434021, 0.022093, -0.598179, -0.08679]],
[[2.416531, 0.017498, -0.610513, -0.083181],
[2.4242, 0.024653, -0.605266, -0.074959]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
def test_transformerencoderlayer_gelu(self):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
for activation, batch_first in product(('gelu', F.gelu, nn.GELU()), (True, False)):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, batch_first=batch_first)
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
encoder_input = torch.tensor([[[20., 30., 40., 50.]]])
result = model(encoder_input)
ref_output = torch.tensor([[[2.249815, 0.131006, -0.702199, 0.177868]]])
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
encoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.264103, 0.121417, -0.696012, 0.159724]],
[[2.264103, 0.121417, -0.696012, 0.159724]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.42163188, 0.03227153, -0.60714219, -0.05908082],
[2.42151276, 0.03302179, -0.60722523, -0.05762651]],
[[2.41926761, 0.02974034, -0.60879519, -0.0621269],
[2.41626395, 0.03539356, -0.61087842, -0.04978623]],
[[2.42382808, 0.03218872, -0.6055963, -0.06073591],
[2.41983477, 0.03085259, -0.60840145, -0.06046414]],
[[2.42500749, 0.03328855, -0.60476388, -0.0595334],
[2.4237977, 0.03290575, -0.60561789, -0.05940082]],
[[2.41383916, 0.02686345, -0.61256377, -0.06380707],
[2.42000277, 0.03800944, -0.60824798, -0.04754947]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
def test_transformerdecoderlayer(self):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
seq_length = 5
tgt_length = 3
for batch_first in (False, True):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
batch_first=batch_first)
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
decoder_input = torch.tensor([[[20., 30., 40., 50.]]])
memory_input = torch.tensor([[[60., 70., 80., 90.]]])
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.314351, 0.094805, -0.671322, 0.101977]]])
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
memory_input = torch.tensor([[[1., 2., 3., 4.]]])
result = model(decoder_input, memory_input)
result = result.detach().numpy()
ref_output = perm_fn(torch.tensor([[[2.422245, 0.051716, -0.606338, -0.024756]],
[[2.422245, 0.051716, -0.606338, -0.024756]]]))
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.343536, 0.085561, -0.654954, 0.074991]],
[[2.343536, 0.085561, -0.654954, 0.074991]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]))
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
key_padding_mask = torch.zeros(2, 3) == 1
result = model(decoder_input, memory_input, tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
key_padding_mask[0, 2] = 1
key_padding_mask[1, 1] = 1
key_padding_mask[1, 2] = 1
result = model(decoder_input, memory_input, tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430025, 0.027643, -0.601164, -0.073476],
[2.4323, 0.029375, -0.599553, -0.071881]],
[[2.428523, 0.026838, -0.602226, -0.07391],
[2.432634, 0.029842, -0.599318, -0.071253]],
[[2.432278, 0.028152, -0.599555, -0.074139],
[2.432659, 0.029244, -0.599294, -0.072382]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
key_padding_mask = torch.zeros(2, 5) == 1
result = model(decoder_input, memory_input, memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
key_padding_mask[0, 4] = 1
key_padding_mask[1, 3] = 1
key_padding_mask[1, 4] = 1
result = model(decoder_input, memory_input, memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.429757, 0.027358, -0.601351, -0.073816],
[2.432692, 0.028583, -0.599263, -0.073634]],
[[2.428247, 0.02662, -0.602419, -0.074123],
[2.432657, 0.029055, -0.599293, -0.072732]],
[[2.431515, 0.027687, -0.600096, -0.074459],
[2.433075, 0.028543, -0.598987, -0.073985]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
def test_transformerdecoderlayer_gelu(self):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
seq_length = 5
tgt_length = 3
for activation, batch_first in product(('gelu', F.gelu, nn.GELU()), (True, False)):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, batch_first=batch_first)
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
decoder_input = torch.tensor([[[20., 30., 40., 50.]]])
memory_input = torch.tensor([[[60., 70., 80., 90.]]])
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.306435, 0.095946, -0.675796, 0.10687]]])
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.415448, 0.054389, -0.610932, -0.0156613]],
[[2.415448, 0.054389, -0.610932, -0.0156613]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.338531, 0.087709, -0.65776, 0.080646]],
[[2.338531, 0.087709, -0.65776, 0.080646]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]))
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42049104, 0.03443088, -0.60793706, -0.05436271],
[2.42210631, 0.03546578, -0.60679895, -0.05357488]],
[[2.41907674, 0.0336104, -0.60892977, -0.05490462],
[2.42216881, 0.03586554, -0.6067524, -0.05289126]],
[[2.42205716, 0.03488046, -0.60683681, -0.05460596],
[2.42240309, 0.0354595, -0.60659063, -0.05378816]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
def test_transformerencoder(self):
def get_a_test_layer(use_cuda, activation, batch_first=False):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
device = torch.device("cuda" if use_cuda else "cpu")
layer = nn.TransformerEncoderLayer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
batch_first=batch_first).to(device)
with torch.no_grad():
for idx, p in enumerate(layer.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
return layer
activation = F.relu
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
for batch_first in (True, False):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
encoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,
batch_first=batch_first)
model = nn.TransformerEncoder(encoder_layer, 1).to(device)
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],
[2.427987, 0.021213, -0.602496, -0.084103]],
[[2.424689, 0.019155, -0.604793, -0.085672],
[2.413863, 0.022211, -0.612486, -0.072490]],
[[2.433774, 0.021598, -0.598343, -0.087548],
[2.425104, 0.019748, -0.604515, -0.084839]],
[[2.436185, 0.022682, -0.596625, -0.087261],
[2.433556, 0.021891, -0.598509, -0.086832]],
[[2.416246, 0.017512, -0.610712, -0.082961],
[2.422901, 0.024187, -0.606178, -0.074929]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
mask = torch.zeros([2, 5]).to(device) == 1
result = model(encoder_input, src_key_padding_mask=mask)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
mask[0, 1] = 1
mask[1, 3] = 1
mask[1, 4] = 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],
[2.428811, 0.021445, -0.601912, -0.084252]],
[[2.425009, 0.019155, -0.604566, -0.085899],
[2.415408, 0.02249, -0.611415, -0.073]],
[[2.434199, 0.021682, -0.598039, -0.087699],
[2.42598, 0.019941, -0.603896, -0.085091]],
[[2.436457, 0.022736, -0.59643, -0.08736],
[2.434021, 0.022093, -0.598179, -0.08679]],
[[2.416531, 0.017498, -0.610513, -0.083181],
[2.4242, 0.024653, -0.605266, -0.074959]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 2).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419051, 0.017446, -0.608738, -0.085003],
[2.419102, 0.017452, -0.608703, -0.085026]],
[[2.419043, 0.017445, -0.608744, -0.084999],
[2.419052, 0.017446, -0.608738, -0.085004]],
[[2.419067, 0.017448, -0.608727, -0.085010],
[2.419098, 0.017452, -0.608706, -0.085024]],
[[2.419072, 0.017449, -0.608724, -0.085012],
[2.419119, 0.017455, -0.608691, -0.085034]],
[[2.419019, 0.017442, -0.608761, -0.084989],
[2.419075, 0.017449, -0.608722, -0.085014]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
norm = nn.LayerNorm(4)
model = nn.TransformerEncoder(encoder_layer, 2, norm=norm).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695949, -0.357635, -0.893077, -0.445238],
[1.695955, -0.357639, -0.893050, -0.445266]],
[[1.695948, -0.357634, -0.893082, -0.445233],
[1.695950, -0.357635, -0.893077, -0.445238]],
[[1.695951, -0.357636, -0.893069, -0.445246],
[1.695955, -0.357639, -0.893052, -0.445264]],
[[1.695952, -0.357636, -0.893066, -0.445249],
[1.695957, -0.357641, -0.893041, -0.445276]],
[[1.695946, -0.357632, -0.893095, -0.445220],
[1.695952, -0.357637, -0.893065, -0.445251]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6, norm=norm).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
def test_transformerdecoder(self):
def get_a_test_layer(use_cuda, activation, batch_first=False):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
device = torch.device("cuda" if use_cuda else "cpu")
layer = nn.TransformerDecoderLayer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
batch_first=batch_first).to(device)
with torch.no_grad():
for idx, p in enumerate(layer.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
return layer
for batch_first in (False, True):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
activation = F.relu
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
decoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,
batch_first=batch_first)
model = nn.TransformerDecoder(decoder_layer, 1).to(device)
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[2.314351, 0.094805, -0.671322, 0.101977]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.422245, 0.051716, -0.606338, -0.024756]],
[[2.422245, 0.051716, -0.606338, -0.024756]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.343536, 0.085561, -0.654954, 0.074991]],
[[2.343536, 0.085561, -0.654954, 0.074991]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
key_padding_mask = torch.zeros(2, 3).to(device) == 1
result = model(decoder_input, memory_input,
tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
key_padding_mask[0, 2] = 1
key_padding_mask[1, 1] = 1
key_padding_mask[1, 2] = 1
result = model(decoder_input, memory_input,
tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430025, 0.027643, -0.601164, -0.073476],
[2.4323, 0.029375, -0.599553, -0.071881]],
[[2.428523, 0.026838, -0.602226, -0.07391],
[2.432634, 0.029842, -0.599318, -0.071253]],
[[2.432278, 0.028152, -0.599555, -0.074139],
[2.432659, 0.029244, -0.599294, -0.072382]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
key_padding_mask = torch.zeros(2, 5).to(device) == 1
result = model(decoder_input, memory_input,
memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
key_padding_mask[0, 4] = 1
key_padding_mask[1, 3] = 1
key_padding_mask[1, 4] = 1
result = model(decoder_input,
memory_input,
memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.429757, 0.027358, -0.601351, -0.073816],
[2.432692, 0.028583, -0.599263, -0.073634]],
[[2.428247, 0.02662, -0.602419, -0.074123],
[2.432657, 0.029055, -0.599293, -0.072732]],
[[2.431515, 0.027687, -0.600096, -0.074459],
[2.433075, 0.028543, -0.598987, -0.073985]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerDecoder(decoder_layer, 2).to(device)
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[2.31316, 0.0950293, -0.671995, 0.102802]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
model = nn.TransformerDecoder(decoder_layer, 6).to(device)
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]],
[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]],
[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
norm = nn.LayerNorm(4)
model = nn.TransformerDecoder(decoder_layer, 2, norm=norm).to(device)
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[1.66166, -0.326986, -1.01466, -0.320017]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
model = nn.TransformerDecoder(decoder_layer, 6, norm=norm).to(device)
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]],
[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]],
[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
activation = "gelu"
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
decoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,
batch_first=batch_first)
model = nn.TransformerDecoder(decoder_layer, 1).to(device)
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.306435, 0.095946, -0.675796, 0.10687]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.415448, 0.054389, -0.610932, -0.0156613]],
[[2.415448, 0.054389, -0.610932, -0.0156613]]])).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.338531, 0.087709, -0.65776, 0.080646]],
[[2.338531, 0.087709, -0.65776, 0.080646]]])).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42049104, 0.03443088, -0.60793706, -0.05436271],
[2.42210631, 0.03546578, -0.60679895, -0.05357488]],
[[2.41907674, 0.0336104, -0.60892977, -0.05490462],
[2.42216881, 0.03586554, -0.6067524, -0.05289126]],
[[2.42205716, 0.03488046, -0.60683681, -0.05460596],
[2.42240309, 0.0354595, -0.60659063, -0.05378816]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
@unittest.skipIf(not (TEST_CUDNN and TEST_MULTIGPU), 'CUDNN or multi-gpu not available')
def test_cudnn_rnn_dropout_states_device(self):
rnn = nn.RNN(10, 20, num_layers=2, dropout=.5)
device = 1
input = torch.randn(5, 4, 10).cuda(device)
rnn.cuda(device)
hx = torch.randn(2, 4, 20).cuda(device)
output = rnn(input, hx)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_weight_format(self):
rnns = [
nn.LSTM(10, 20, batch_first=True),
nn.LSTM(10, 20, batch_first=True, proj_size=10),
nn.GRU(10, 20, batch_first=True),
nn.RNN(10, 20, batch_first=True)
]
first_warn = True
for rnn in rnns:
rnn.cuda()
input = torch.randn(5, 4, 10, requires_grad=True, device="cuda")
hx = torch.randn(1, 5, 20, requires_grad=True, device="cuda")
all_vars = [input, hx] + list(rnn.parameters())
if isinstance(rnn, nn.LSTM):
if rnn.proj_size > 0:
hx = torch.randn(1, 5, 10, requires_grad=True, device="cuda")
all_vars[1] = hx
cx = torch.randn(1, 5, 20, requires_grad=True, device="cuda")
all_vars[2:2] = [cx]
hx = (hx, cx)
output = rnn(input, hx)
output[0].sum().backward()
grads = [v.grad.data.clone() for v in all_vars]
for v in all_vars:
v.grad.data.zero_()
weight = all_vars[4]
weight_data = weight.data.clone()
with torch.no_grad():
weight.set_(weight_data)
for _ in range(2):
with warnings.catch_warnings(record=True) as w:
output_noncontig = rnn(input, hx)
if first_warn:
self.assertEqual(len(w), 1)
self.assertIn('weights are not part of single contiguous chunk of memory', w[0].message.args[0])
first_warn = False
warnings.resetwarnings()
output_noncontig[0].sum().backward()
grads_noncontig = [v.grad.data.clone() for v in all_vars]
for v in all_vars:
v.grad.data.zero_()
self.assertEqual(output, output_noncontig)
self.assertEqual(grads_noncontig, grads)
weight_data[:] = 4
self.assertEqual(weight_data, all_vars[4].data)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_cudnn_weight_tying(self):
rnns = [
nn.LSTM(10, 20, batch_first=True, bidirectional=True),
nn.LSTM(10, 20, batch_first=True, bidirectional=True, proj_size=10),
nn.GRU(10, 20, batch_first=True, bidirectional=True),
nn.RNN(10, 20, batch_first=True, bidirectional=True)
]
for rnn in rnns:
rnn.bias_ih_l0_reverse = rnn.bias_ih_l0
rnn.cuda()
input = torch.randn(5, 4, 10, requires_grad=True, device="cuda")
hx = torch.randn(2, 5, 20, requires_grad=True, device="cuda")
all_vars = [input, hx] + list(rnn.parameters())
opt = torch.optim.SGD(rnn.parameters(), lr=0.1)
opt.zero_grad()
if isinstance(rnn, nn.LSTM):
if rnn.proj_size > 0:
hx = torch.randn(2, 5, 10, requires_grad=True, device="cuda")
all_vars[1] = hx
cx = torch.randn(2, 5, 20, requires_grad=True, device="cuda")
all_vars[2:2] = [cx]
hx = (hx, cx)
with warnings.catch_warnings(record=True) as w:
output = rnn(input, hx)
output[0].sum().backward()
opt.step()
with warnings.catch_warnings(record=True) as w:
output_cuda = rnn(input, hx)
rnn.cpu()
hx = (hx[0].cpu(), hx[1].cpu()) if isinstance(rnn, nn.LSTM) else hx.cpu()
output_cpu = rnn(input.cpu(), hx)
self.assertEqual(output_cuda, output_cpu)
def test_transformer_args_check(self):
model_name = 'Transformer'
d_model = 128
nhead = 4
num_encoder_layers = 2
num_decoder_layers = 3
dim_feedforward = 65
dropout = 0.3
bsz = 3
seq_len = 35
tgt_len = 15
activations = [F.relu, F.gelu]
wrong_bsz = 7
wrong_d_model = 63
wrong_nhead = 5
wrong_activation = "abc"
def test(encoder_input_shape, decoder_input_shape,
src_mask_len=None, tgt_mask_len=None, memory_mask_size=None,
src_key_padding_mask_size=None, tgt_key_padding_mask_size=None,
memory_key_padding_mask_size=None):
encoder_input = torch.randn(encoder_input_shape)
decoder_input = torch.randn(decoder_input_shape)
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers,
num_decoder_layers, dim_feedforward, dropout)
if src_mask_len is not None:
src_mask = model.generate_square_subsequent_mask(src_mask_len)
else:
src_mask = None
if tgt_mask_len is not None:
tgt_mask = model.generate_square_subsequent_mask(tgt_mask_len)
else:
tgt_mask = None
if memory_mask_size is not None:
memory_task = torch.rand(memory_mask_size)
else:
memory_task = None
if src_key_padding_mask_size is not None:
src_key_padding_mask = torch.rand(src_key_padding_mask_size) >= 0.5
else:
src_key_padding_mask = None
if tgt_key_padding_mask_size is not None:
tgt_key_padding_mask = torch.rand(tgt_key_padding_mask_size) >= 0.5
else:
tgt_key_padding_mask = None
if memory_key_padding_mask_size is not None:
memory_key_padding_mask = torch.rand(memory_key_padding_mask_size) >= 0.5
else:
memory_key_padding_mask = None
with self.assertRaises(RuntimeError):
model(encoder_input, decoder_input,
src_mask=src_mask,
tgt_mask=tgt_mask,
memory_mask=memory_task,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
correct_encoder_input_shape = (seq_len, bsz, d_model)
correct_decoder_input_shape = (tgt_len, bsz, d_model)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
encoder_input_shape = update_shape(correct_encoder_input_shape, 1, wrong_bsz)
decoder_input_shape = correct_decoder_input_shape
test(encoder_input_shape, decoder_input_shape)
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = update_shape(correct_decoder_input_shape, 1, wrong_bsz)
test(encoder_input_shape, decoder_input_shape)
encoder_input_shape = update_shape(correct_encoder_input_shape, 2, wrong_d_model)
decoder_input_shape = correct_decoder_input_shape
test(encoder_input_shape, decoder_input_shape)
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = update_shape(correct_decoder_input_shape, 2, wrong_d_model)
test(encoder_input_shape, decoder_input_shape)
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
model = getattr(nn, model_name)(d_model, wrong_nhead, num_encoder_layers,
num_decoder_layers, dim_feedforward, dropout)
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_src_mask_size = seq_len + 1
test(encoder_input_shape, decoder_input_shape, src_mask_len=wrong_src_mask_size)
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_tgt_mask_size = tgt_len + 1
test(encoder_input_shape, decoder_input_shape, tgt_mask_len=wrong_tgt_mask_size)
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_tgt_mask_size = tgt_len + 1
test(encoder_input_shape, decoder_input_shape,
memory_mask_size=(wrong_tgt_mask_size, wrong_src_mask_size))
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
src_key_padding_mask_size=(wrong_bsz, wrong_src_mask_size))
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
tgt_key_padding_mask_size=(wrong_bsz, wrong_tgt_mask_size))
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
memory_key_padding_mask_size=(wrong_bsz, wrong_src_mask_size))
for activation in activations:
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, activation)
with self.assertRaises(RuntimeError):
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, wrong_activation)
def test_transformer_layer_args_check(self):
model_names = ['TransformerEncoderLayer', 'TransformerDecoderLayer']
d_model = 128
nhead = 4
dim_feedforward = 65
dropout = 0.3
bsz = 3
seq_len = 35
tgt_len = 15
activations = [F.relu, F.gelu]
wrong_activation = "abc"
encoder_input_shape = (seq_len, bsz, d_model)
decoder_input_shape = (tgt_len, bsz, d_model)
encoder_input = torch.randn(encoder_input_shape)
decoder_input = torch.randn(decoder_input_shape)
for model_name in model_names:
for activation in activations:
model = getattr(nn, model_name)(d_model, nhead, dim_feedforward,
dropout, activation)
for model_name in model_names:
with self.assertRaises(RuntimeError):
model = getattr(nn, model_name)(d_model, nhead, dim_feedforward,
dropout, wrong_activation)
def test_rnn_args_check(self):
input_size = 3
hidden_size = 5
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
bad_size = 7
def test(input_shape, hidden_shape, mode):
for input, hidden in get_inputs(input_shape, hidden_shape, mode):
model = getattr(nn, mode)(input_size, hidden_size, num_layers)
self.assertRaises(RuntimeError, lambda: model(input, hidden))
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_shape = (num_layers * num_directions, batch_size, hidden_size)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
def get_inputs(input_shape, hidden_shape, mode):
input = torch.randn(input_shape)
hidden = torch.randn(hidden_shape)
if mode != 'LSTM':
return [(input, hidden)]
if hidden_shape == correct_hidden_shape:
return [(input, (hidden, hidden))]
good_hidden = torch.randn(correct_hidden_shape)
return [
(input, (hidden, good_hidden)),
(input, (good_hidden, hidden)),
]
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
input_shape = update_shape(correct_input_shape, 1, bad_size)
hidden_shape = correct_hidden_shape
test(input_shape, hidden_shape, mode)
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 1, bad_size)
test(input_shape, hidden_shape, mode)
input_shape = update_shape(correct_input_shape, 2, bad_size)
hidden_shape = correct_hidden_shape
test(input_shape, hidden_shape, mode)
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 2, bad_size)
test(input_shape, hidden_shape, mode)
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 0, bad_size)
test(input_shape, hidden_shape, mode)
def test_projections_lstm_args_check(self):
input_size = 3
hidden_size = 5
proj_size = 2
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
bad_size = 7
def test(input_shape, hidden_h_shape, hidden_c_shape):
for input, hidden in get_inputs(input_shape, hidden_h_shape, hidden_c_shape):
model = nn.LSTM(input_size, hidden_size, num_layers, proj_size=proj_size)
self.assertRaises(RuntimeError, lambda: model(input, hidden))
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_h_shape = (num_layers * num_directions, batch_size, proj_size)
correct_hidden_c_shape = (num_layers * num_directions, batch_size, hidden_size)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
def get_inputs(input_shape, hidden_h_shape, hidden_c_shape):
input = torch.randn(input_shape)
hidden_h = torch.randn(hidden_h_shape)
hidden_c = torch.randn(hidden_c_shape)
return [(input, (hidden_h, hidden_c))]
input_shape = update_shape(correct_input_shape, 1, bad_size)
test(input_shape, correct_hidden_h_shape, correct_hidden_c_shape)
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 1, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 1, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
input_shape = update_shape(correct_input_shape, 2, bad_size)
test(input_shape, correct_hidden_h_shape, correct_hidden_c_shape)
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 2, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 2, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 0, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, hidden_size)
hidden_c_shape = correct_hidden_c_shape
test(input_shape, hidden_h_shape, hidden_c_shape)
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, bad_size)
hidden_c_shape = correct_hidden_c_shape
test(input_shape, hidden_h_shape, hidden_c_shape)
input_shape = correct_input_shape
hidden_h_shape = correct_hidden_h_shape
hidden_c_shape = update_shape(correct_hidden_c_shape, 0, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_rnn_check_device(self):
input_size = 3
hidden_size = 5
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_shape = (num_layers * num_directions, batch_size, hidden_size)
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
model = getattr(nn, mode)(input_size, hidden_size, num_layers)
input = torch.randn(correct_input_shape)
hidden = torch.randn(correct_hidden_shape)
with self.assertRaisesRegex(RuntimeError,
"Input and parameter tensors are not at the same device"):
model(input.to('cuda:0'))
with self.assertRaisesRegex(RuntimeError,
r"Input and hidden tensors are not at the same device"):
if mode == 'LSTM':
model(input, (hidden.to('cuda:0'), hidden.to('cuda:0')))
else:
model(input, (hidden.to('cuda:0')))
if mode == 'LSTM':
with self.assertRaisesRegex(RuntimeError,
"Input and hidden tensors are not at the same device"):
model(input.to('cuda:0'), (hidden.to('cuda:0'), hidden.to('cuda:1')))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_projections_lstm_check_device(self):
input_size = 3
hidden_size = 5
proj_size = 2
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_h_shape = (num_layers * num_directions, batch_size, proj_size)
correct_hidden_c_shape = (num_layers * num_directions, batch_size, hidden_size)
model = nn.LSTM(input_size, hidden_size, num_layers, proj_size=proj_size)
input = torch.randn(correct_input_shape)
hidden_h = torch.randn(correct_hidden_h_shape)
hidden_c = torch.randn(correct_hidden_c_shape)
with self.assertRaisesRegex(RuntimeError,
"Input and parameter tensors are not at the same device"):
model(input.to('cuda:0'))
with self.assertRaisesRegex(RuntimeError,
r"Input and hidden tensors are not at the same device"):
model(input, (hidden_h.to('cuda:0'), hidden_c.to('cuda:0')))
with self.assertRaisesRegex(RuntimeError,
"Input and hidden tensors are not at the same device"):
model(input.to('cuda:0'), (hidden_h.to('cuda:0'), hidden_c.to('cuda:1')))
def test_rnn_initial_hidden_state(self):
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
rnn = getattr(nn, mode)(30, 20, 2)
input = torch.randn(10, 32, 30)
hidden = torch.zeros(2, 32, 20)
if mode == 'LSTM':
hidden = (hidden, hidden)
output1, hidden1 = rnn(input, hidden)
output2, hidden2 = rnn(input)
self.assertEqual(output1, output2)
self.assertEqual(hidden1, hidden2)
def test_projections_lstm_initial_hidden_state(self):
for bidir in [False, True]:
rnn = nn.LSTM(30, 20, 2, bidirectional=bidir, proj_size=10)
num_dirs = 2 if bidir else 1
input = torch.randn(10, 32, 30)
hidden_h = torch.zeros(2 * num_dirs, 32, 10)
hidden_c = torch.zeros(2 * num_dirs, 32, 20)
hidden = (hidden_h, hidden_c)
output1, hidden1 = rnn(input, hidden)
output2, hidden2 = rnn(input)
self.assertEqual(output1, output2)
self.assertEqual(hidden1, hidden2)
def test_projections_errors_on_gru_and_rnn(self):
error_msg = "proj_size argument is only supported for LSTM, not RNN or GRU"
for mode in ['RNN', 'GRU']:
with self.assertRaisesRegex(ValueError, error_msg):
rnn = getattr(nn, mode)(30, 20, 2, proj_size=10)
def _test_RNN_cpu_vs_cudnn(self, dropout, dtype=torch.double):
def forward_backward(cuda, rnn, input_val, grad_output, weights_val, hx_val, grad_hy,
cx_val=None, grad_cy=None):
is_lstm = isinstance(rnn, nn.LSTM)
for x_layer, y_layer in zip(rnn.all_weights, weights_val):
for x, y in zip(x_layer, y_layer):
x.data.copy_(y.data)
if isinstance(input_val, rnn_utils.PackedSequence):
input = rnn_utils.PackedSequence(
input_val.data.data.requires_grad_(True), input_val.batch_sizes)
input_var = input.data
else:
input = input_val.clone().requires_grad_(True)
input_var = input
if is_lstm:
if cx_val is None:
hx = (hx_val.clone().requires_grad_(True),
hx_val.add(1).requires_grad_(True))
else:
hx = (hx_val.clone().requires_grad_(True),
cx_val.add(1).requires_grad_(True))
else:
hx = hx_val.clone().requires_grad_(True)
if cuda:
rnn.cuda()
input_var.data = input_var.data.cuda()
if is_lstm:
hx[0].data = hx[0].data.cuda()
hx[1].data = hx[1].data.cuda()
else:
hx.data = hx.data.cuda()
grad_hy = grad_hy.cuda()
if grad_cy is not None:
grad_cy = grad_cy.cuda()
grad_output = grad_output.cuda()
output, hy = rnn(input, hx)
if isinstance(output, rnn_utils.PackedSequence):
output = output.data
if is_lstm:
if grad_cy is None:
torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_hy + 1])
else:
torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_cy + 1])
else:
torch.autograd.backward([output, hy], [grad_output, grad_hy])
return {'output': output.data,
'hy': hy[0].data if is_lstm else hy.data,
'weights': rnn.all_weights,
'grad_input': input_var.grad.data,
'grad_hx': hx[0].grad.data if is_lstm else hx.grad.data,
'cy': hy[1].data if is_lstm else None,
'grad_cx': hx[1].grad.data if is_lstm else None}
input_size = 10
hidden_size = 6
proj_size = 3
num_layers = 2
seq_length = 7
batch = 6
def make_noncontig(tensor):
ndim = tensor.dim()
return torch.stack([tensor.clone().zero_(), tensor], ndim).select(ndim, 1)
def compare_cpu_gpu(outputs_cpu, outputs_gpu):
self.assertEqual(list(outputs_cpu.keys()), list(outputs_gpu.keys()))
for key in outputs_cpu.keys():
if key != 'weights':
self.assertEqual(outputs_cpu[key], outputs_gpu[key], atol=5e-5, rtol=0, msg=key)
for cpu_layer_weight, gpu_layer_weight in zip(outputs_cpu['weights'], outputs_gpu['weights']):
for (cpu_weight, gpu_weight) in zip(cpu_layer_weight, gpu_layer_weight):
self.assertEqual(cpu_weight.grad.data, gpu_weight.grad.data, atol=5e-5, rtol=0)
for module in (nn.RNN, nn.LSTM, nn.GRU):
for bias, bidirectional, batch_first, contig, variable_len, lens_as_tensor \
in product((True, False), repeat=6):
num_directions = 2 if bidirectional else 1
if batch_first:
input_val = torch.randn(batch, seq_length, input_size, dtype=dtype)
grad_output = torch.randn(batch, seq_length, hidden_size * num_directions, dtype=dtype)
else:
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, hidden_size * num_directions, dtype=dtype)
hx_val = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
if not contig:
grad_output = make_noncontig(grad_output)
grad_hy = make_noncontig(grad_hy)
input_var = make_noncontig(input_val)
hx_val = make_noncontig(hx_val)
if variable_len:
lengths = [7, 5, 5, 2, 1, 1]
if lens_as_tensor:
lengths = torch.tensor(lengths, dtype=torch.long)
input_val = rnn_utils.pack_padded_sequence(input_val, lengths, batch_first=batch_first)
grad_output = rnn_utils.pack_padded_sequence(grad_output, lengths, batch_first=batch_first).data
rnn = module(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first).to(dtype)
outputs_cpu = forward_backward(
False, rnn, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
rnn_gpu = module(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first).to(dtype)
outputs_gpu = forward_backward(
True, rnn_gpu, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
for nonlinearity in ('tanh', 'relu'):
hx_val = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(
seq_length, batch, hidden_size * num_directions, dtype=dtype)
grad_hy = torch.randn(
num_layers * num_directions, batch, hidden_size, dtype=dtype)
rnn = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity).to(dtype)
outputs_cpu = forward_backward(False, rnn, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
rnn_gpu = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity).to(dtype)
outputs_gpu = forward_backward(True, rnn_gpu, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
for bias, bidirectional, batch_first, contig, variable_len, lens_as_tensor \
in product((True, False), repeat=6):
num_directions = 2 if bidirectional else 1
if batch_first:
input_val = torch.randn(batch, seq_length, input_size, dtype=dtype)
grad_output = torch.randn(batch, seq_length, proj_size * num_directions, dtype=dtype)
else:
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, proj_size * num_directions, dtype=dtype)
hx_val = torch.randn(num_layers * num_directions, batch, proj_size, dtype=dtype)
cx_val = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers * num_directions, batch, proj_size, dtype=dtype)
grad_cy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
if not contig:
grad_output = make_noncontig(grad_output)
grad_hy = make_noncontig(grad_hy)
grad_cy = make_noncontig(grad_cy)
input_var = make_noncontig(input_val)
hx_val = make_noncontig(hx_val)
cx_val = make_noncontig(cx_val)
if variable_len:
lengths = [7, 5, 5, 2, 1, 1]
if lens_as_tensor:
lengths = torch.tensor(lengths, dtype=torch.long)
input_val = rnn_utils.pack_padded_sequence(input_val, lengths, batch_first=batch_first)
grad_output = rnn_utils.pack_padded_sequence(grad_output, lengths, batch_first=batch_first).data
rnn = nn.LSTM(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first,
proj_size=proj_size).to(dtype)
outputs_cpu = forward_backward(
False, rnn, input_val, grad_output, rnn.all_weights,
hx_val, grad_hy, cx_val, grad_cy)
rnn_gpu = nn.LSTM(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first,
proj_size=proj_size).to(dtype)
outputs_gpu = forward_backward(
True, rnn_gpu, input_val, grad_output, rnn.all_weights,
hx_val, grad_hy, cx_val, grad_cy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_RNN_cpu_vs_cudnn_no_dropout(self):
dtype = torch.double
self._test_RNN_cpu_vs_cudnn(0, dtype)
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_cpu_vs_cudnn_with_dropout(self):
self._test_RNN_cpu_vs_cudnn(1)
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_RNN_cudnn_weight_norm(self):
input_size = 10
hidden_size = 6
num_layers = 2
seq_length = 7
batch = 6
def check_weight_norm(m, name):
input = torch.randn(seq_length, batch, input_size)
expected_output = m(input)
m = torch.nn.utils.weight_norm(m, name=name)
m = m.cuda()
input = input.cuda()
warnings.simplefilter("always")
self.assertEqual(m(input), expected_output)
m = torch.nn.utils.remove_weight_norm(m, name=name)
self.assertEqual(m(input), expected_output)
check_weight_norm(nn.LSTM(input_size, hidden_size, num_layers), 'weight_hh_l0')
check_weight_norm(nn.LSTM(input_size, hidden_size, num_layers, proj_size=3), 'weight_hr_l0')
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_partial_flat_weights(self):
input_size = 10
hidden_size = 6
num_layers = 2
m = nn.LSTM(input_size, hidden_size, num_layers)
inp = torch.randn(3, 2, 10)
out_expected = m(inp)
weight_orig = m.weight_hh_l0
del m.weight_hh_l0
self.assertFalse(hasattr(m, "weight_hh_l0"))
m.cuda()
m.weight_hh_l0 = weight_orig.cuda()
inp = inp.cuda()
warnings.simplefilter("always")
self.assertEqual(m(inp)[0].cpu(), out_expected[0])
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_dropout(self):
for p in (0, 0.276, 0.731, 1):
for train in (True, False):
for cuda in (True, False):
rnn = nn.RNN(10, 1000, 2, bias=False, dropout=p, nonlinearity='relu')
if cuda:
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
rnn.weight_ih_l0.data.fill_(1)
rnn.weight_hh_l0.data.fill_(1)
rnn.weight_ih_l1.data.fill_(1)
rnn.weight_hh_l1.data.fill_(1)
input = torch.ones(1, 1, 10)
hx = torch.zeros(2, 1, 1000)
if cuda:
input = input.cuda()
hx = hx.cuda()
output, hy = rnn(input, hx)
self.assertEqual(output.data.min(), output.data.max())
output_val = output.data[0][0][0]
if p == 0 or not train:
self.assertEqual(output_val, 10000)
elif p == 1:
self.assertEqual(output_val, 0)
else:
self.assertGreater(output_val, 8000)
self.assertLess(output_val, 12000)
denorm_mod = (output_val * (1 - p)) % 10
self.assertLess(min(denorm_mod, 10 - denorm_mod), 1e-2)
self.assertEqual(hy[0].data.min(), hy[0].data.max())
self.assertEqual(hy[1].data.min(), hy[1].data.max())
self.assertEqual(hy.data[0][0][0], 10)
self.assertEqual(hy.data[1][0][0], output_val)
def test_error_RNN_seq_len_zero(self):
for module in (nn.RNN, nn.LSTM, nn.GRU):
for bidirectional in [True, False]:
for device in get_all_device_types():
input = torch.ones(0, 10, 5)
rnn = module(5, 6, bidirectional=bidirectional)
if device == 'cuda':
rnn.cuda()
input = input.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected sequence length to be larger than 0 in RNN"):
rnn(input)
def test_RNN_input_size_zero(self):
for module in (nn.RNN, nn.LSTM, nn.GRU):
for device in get_all_device_types():
input = torch.zeros((5, 0, 3))
rnn = module(input_size=3, hidden_size=4)
if device == 'cuda':
rnn.cuda()
input = input.cuda()
outs = rnn(input)
self.assertEqual(outs[0].shape, torch.Size([5, 0, 4]))
outs[0].sum().backward()
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_dropout_state(self):
for p in (0, 0.1234):
for train in (True, False):
for cuda in (True, False):
rnn = nn.RNN(100, 100, 2, bias=False, dropout=p, nonlinearity='relu')
if cuda:
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
input = torch.rand(1, 1, 100)
hx = torch.rand(2, 1, 100)
if cuda:
input = input.cuda()
hx = hx.cuda()
output1, hy1 = rnn(input, hx)
output2, hy2 = rnn(input, hx)
buf = io.BytesIO()
rnn_pickle = torch.save(rnn, buf)
buf.seek(0)
rnn2 = torch.load(buf)
rnn2.flatten_parameters()
output3, hy3 = rnn2(input, hx)
if p == 0 or not train:
self.assertEqual(output1, output2)
self.assertEqual(output1, output3)
self.assertEqual(hy1, hy2)
self.assertEqual(hy1, hy3)
else:
self.assertNotEqual(output1, output2)
self.assertNotEqual(output1, output3)
self.assertNotEqual(hy1, hy2)
self.assertNotEqual(hy1, hy3)
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_change_dropout(self):
for train, cuda in product((True, False), repeat=2):
rnn = nn.RNN(100, 100, 2, dropout=0, nonlinearity='relu')
input = torch.rand(3, 2, 100)
if cuda:
input.data = input.data.cuda()
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
prev_output = None
for p in (0, 0.5, 0, 0.7, 0.2, 1, 0.2, 0):
rnn.dropout = p
output1, hy1 = rnn(input)
output2, hy2 = rnn(input)
if p == 0 or p == 1 or not train:
self.assertEqual(output1, output2)
self.assertEqual(hy1, hy2)
else:
self.assertNotEqual(output1, output2)
self.assertNotEqual(hy1, hy2)
if prev_output is not None:
if not train:
self.assertEqual(output1.data, prev_output)
self.assertEqual(output2.data, prev_output)
else:
self.assertNotEqual(output1.data, prev_output)
self.assertNotEqual(output2.data, prev_output)
prev_output = output1.data
def test_inplace_thnn(self):
modules = [nn.ReLU, nn.ELU, nn.SELU, nn.CELU, nn.RReLU]
for mod in modules:
r = mod(inplace=True)
input = torch.randn(5, 5, requires_grad=True)
output = r(input + 0)
grad_output = torch.randn(5, 5)
grad_output_clone = grad_output.clone()
output.backward(grad_output)
self.assertEqual(grad_output, grad_output_clone)
def test_pixel_shuffle_unshuffle(self):
def _test_pixel_shuffle_unshuffle_helper(num_input_dims, valid_channels_dim=True,
upscale_factor=None):
def _verify_pixel_shuffle(input, output, upscale_factor):
for c in range(output.size(-3)):
for h in range(output.size(-2)):
for w in range(output.size(-1)):
height_idx = h // upscale_factor
weight_idx = w // upscale_factor
channel_idx = (upscale_factor * (h % upscale_factor)) + (w % upscale_factor) + \
(c * upscale_factor ** 2)
self.assertEqual(output[..., c, h, w], input[..., channel_idx, height_idx, weight_idx])
upscale_factor = random.randint(2, 5) if upscale_factor is None else upscale_factor
channels = random.randint(1, 4) * upscale_factor ** 2 + (0 if valid_channels_dim else 1)
height = random.randint(5, 10)
width = random.randint(5, 10)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True)
elif num_input_dims == 2:
input = torch.rand(height, width, requires_grad=True)
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(*batch_sizes, channels, height, width, requires_grad=True)
ps = nn.PixelShuffle(upscale_factor)
pus = nn.PixelUnshuffle(downscale_factor=upscale_factor)
if num_input_dims >= 3 and valid_channels_dim and upscale_factor > 0:
output = ps(input)
_verify_pixel_shuffle(input, output, upscale_factor)
output.backward(output.data)
self.assertEqual(input.data, input.grad.data)
unshuffle_output = pus(output)
self.assertEqual(input, unshuffle_output)
else:
self.assertRaises(RuntimeError, lambda: ps(input))
def _test_pixel_unshuffle_error_case_helper(num_input_dims, valid_height_dim=True, valid_width_dim=True,
downscale_factor=None):
downscale_factor = random.randint(2, 5) if downscale_factor is None else downscale_factor
channels = random.randint(1, 4)
height = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_height_dim else 1)
width = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_width_dim else 1)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True)
elif num_input_dims == 2:
input = torch.rand(height, width, requires_grad=True)
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(*batch_sizes, channels, height, width, requires_grad=True)
pus = nn.PixelUnshuffle(downscale_factor)
self.assertRaises(RuntimeError, lambda: pus(input))
def _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims):
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims)
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, valid_channels_dim=False)
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, upscale_factor=0)
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, upscale_factor=-2)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_height_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_width_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=0)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=-2)
def test_pixel_shuffle_unshuffle_1D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=1)
def test_pixel_shuffle_unshuffle_2D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=2)
def test_pixel_shuffle_unshuffle_3D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=3)
def test_pixel_shuffle_unshuffle_4D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=4)
def test_pixel_shuffle_unshuffle_5D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=5)
test_pixel_shuffle_unshuffle_1D()
test_pixel_shuffle_unshuffle_2D()
test_pixel_shuffle_unshuffle_3D()
test_pixel_shuffle_unshuffle_4D()
test_pixel_shuffle_unshuffle_5D()
def test_elu_inplace_on_view(self):
v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)
def func(root):
x = root.clone()
view = x.narrow(0, 1, 2)
res = F.elu(view, inplace=True)
self.assertIs(res, view)
return x
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_elu_inplace_gradgrad(self):
v = torch.randn(8, requires_grad=True)
def func(root):
x = root.clone()
return F.elu(x, inplace=True)
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_relu_inplace_on_view(self):
v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)
def func(root):
x = root.clone()
view = x.narrow(0, 1, 2)
res = F.relu(view, inplace=True)
self.assertIs(res, view)
return x
gradcheck(func, [v])
gradgradcheck(func, [v])
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_PReLU_backward_requires_grad_false(self):
m = nn.PReLU().to('cuda')
x = torch.randn(2, 3, 4, 5, requires_grad=False, device='cuda')
y = m(x)
y.mean().backward()
self.assertEqual(x.grad, None)
@unittest.skipIf(
not TEST_NUMPY or not TEST_SCIPY, "Numpy or Scipy not found")
def test_gelu(self):
def _test_gelu(n, m, dtype, contiguous, atol=None, rtol=None):
numpy_dtype = {
torch.bfloat16: torch.float, torch.float: torch.float, torch.double: torch.double
}[dtype]
devices = ['cpu']
devices += ['cuda'] if TEST_CUDA else []
def _gelu_ref(X):
return X * stats.norm.cdf(X)
for d in devices:
if contiguous:
X = torch.rand(n, m, dtype=dtype, requires_grad=True, device=d)
else:
X = torch.rand(n, m, dtype=dtype, requires_grad=True, device=d)[:, ::2]
res = F.gelu(X)
ref = _gelu_ref(X.to(numpy_dtype).cpu().detach().numpy())
self.assertEqual(res, ref, rtol=rtol, atol=atol, exact_dtype=False)
if dtype == torch.float64:
gradcheck(F.gelu, [X], eps=1e-4)
for n in range(1, 10):
for m in range(1, 10):
_test_gelu(n, m, torch.bfloat16, True, 1e-2, 0)
_test_gelu(n, m, torch.bfloat16, False, 1e-2, 0)
_test_gelu(n, m, torch.float32, True)
_test_gelu(n, m, torch.float32, False)
_test_gelu(n, m, torch.float64, True)
_test_gelu(n, m, torch.float64, False)
# Test multi threaded
num_threads = torch.get_num_threads()
torch.set_num_threads(4)
try:
_test_gelu(32, 32, torch.float32, False)
finally:
torch.set_num_threads(num_threads)
def test_bce_loss_always_nonnegative(self):
target = torch.ones(5)
input = torch.ones(5)
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
target = torch.zeros(5)
input = torch.zeros(5)
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
def test_bce_with_logits_raises_if_target_and_input_are_different_size(self):
target = torch.rand(5)
input = torch.rand(5, 1)
with self.assertRaises(ValueError):
nn.BCEWithLogitsLoss()(input, target)
target = torch.rand(5, 1)
input = torch.rand(5)
with self.assertRaises(ValueError):
nn.BCEWithLogitsLoss()(input, target)
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss(self):
sigmoid = nn.Sigmoid()
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))
weight = torch.rand(4)
self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
target = torch.zeros(4, 1, dtype=torch.float)
output = torch.empty(4, 1, dtype=torch.float).fill_(-100)
self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))
self.assertEqual(nn.BCEWithLogitsLoss(reduction='none')(output, target),
nn.BCELoss(reduction='none')(sigmoid(output), target))
weight = torch.rand(1, dtype=torch.float)
self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
def test_bce_loss_input_range(self):
bceloss = nn.BCELoss()
target = torch.rand(25, 25)
output_valid = torch.rand(25, 25)
output_too_negative = output_valid - 1.0
output_too_positive = output_valid + 1.0
loss_valid = bceloss(output_valid, target)
with self.assertRaisesRegex(RuntimeError, 'between 0 and 1'):
loss_too_negative = bceloss(output_too_negative, target)
with self.assertRaisesRegex(RuntimeError, 'between 0 and 1'):
loss_too_positive = bceloss(output_too_positive, target)
def test_bce_loss_size_mismatch(self):
bceloss = nn.BCELoss()
a = torch.rand(25)
b = torch.rand(25, 1)
with self.assertRaisesRegex(ValueError, r'Using a target size \('):
bceloss(a, b)
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss_large_tensors_with_grad(self):
x_size = 1024
y_size = 256
target = torch.rand(x_size, y_size)
for reduction in ['none', 'mean', 'sum']:
output_sig = torch.rand(x_size, y_size) - 0.5
output_logits = output_sig.clone().detach()
output_sig.requires_grad = True
output_logits.requires_grad = True
weight = torch.rand(y_size)
loss_sig = nn.BCELoss(weight, reduction=reduction)(
torch.sigmoid(output_sig), target
)
loss_logits = nn.BCEWithLogitsLoss(weight, reduction=reduction)(
output_logits, target
)
self.assertEqual(loss_logits, loss_sig)
if reduction == 'none':
grad = torch.rand(x_size, y_size)
loss_sig.backward(grad)
loss_logits.backward(grad)
else:
loss_sig.backward()
loss_logits.backward()
self.assertEqual(output_sig.grad, output_logits.grad)
def test_bce_with_logits_has_correct_forward_grad(self):
output = torch.randn(3, 5, requires_grad=True)
target = torch.randn(3, 5)
for reduction in ('sum', 'mean', 'none'):
gradcheck(lambda self, target: nn.BCEWithLogitsLoss(reduction=reduction)(self, target),
(output, target), check_forward_ad=True)
def test_bce_with_logits_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True)
target = torch.zeros(3, 1)
nn.BCEWithLogitsLoss(reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1).fill_(0.5)
self.assertEqual(output.grad, expected_grad)
def test_bce_with_logits_broadcasts_weights(self):
target = torch.rand(16, 4)
output = torch.rand(16, 4) - 0.5
weight = torch.rand(4)
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1)
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
def test_bce_with_logits_ones_in_pos_weights_are_the_same_as_none(self):
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
pos_weight = torch.ones(64, 4)
self.assertEqual(nn.BCEWithLogitsLoss()(output, target),
nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target))
def test_bce_with_logits_broadcasts_pos_weights(self):
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
pos_weight = torch.rand(4)
out1 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
pos_weight1 = pos_weight.expand(1, 4)
out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight1)(output, target)
pos_weight2 = pos_weight.expand(64, 4)
out3 = nn.BCEWithLogitsLoss(pos_weight=pos_weight2)(output, target)
self.assertEqual(out1, out2)
self.assertEqual(out1, out3)
def test_bce_with_logits_with_pos_weight_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True)
target = torch.zeros(3, 1)
pos_weight = torch.ones(3, 1)
nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1).fill_(0.5)
grad = output.grad
self.assertEqual(grad, expected_grad)
def test_bce_with_logits_stability(self):
output = torch.tensor([0., -120.])
target = torch.tensor([0., 1.])
pos_weight = torch.tensor([1., 1.])
out1 = nn.BCEWithLogitsLoss()(output, target)
self.assertTrue(torch.isfinite(out1).all().item())
out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
self.assertTrue(torch.isfinite(out2).all().item())
def test_bce_loss_broadcasts_weights(self):
sigmoid = nn.Sigmoid()
target = torch.rand(16, 4)
output = torch.rand(16, 4) - 0.5
weight = torch.rand(4)
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1)
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
def test_hardtanh_inplace_gradgrad(self):
v = torch.randn(8, requires_grad=True)
def func(root):
x = root.clone()
return F.hardtanh(x, inplace=True)
gradcheck(func, [v])
gradgradcheck(func, [v])
# test hardtanh backward froo large tensor
def test_hardtanh_backward(self):
x = torch.randn(128, 10000, requires_grad=True)
grad = torch.randn(128, 10000)
z = torch.zeros(128, 10000)
y = F.hardtanh(x)
y.backward(grad)
# ref backward path for hardtanh
mask = (x > -1) & (x < 1)
x_grad_ref = torch.where(mask, grad, z)
self.assertEqual(x.grad, x_grad_ref)
def test_batchnorm_nhwc_cpu(self):
def helper(self, size):
channels = size[1]
input = torch.randn(size, dtype=torch.float32, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last)
input.retain_grad()
grad = torch.randn(size, dtype=torch.float32, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().float()
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().float()
ref_bn.load_state_dict(bn.state_dict())
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
helper(self, (4, 8, 10, 10))
helper(self, (4, 1, 9, 9))
helper(self, (4, 9, 1, 1))
def test_batchnorm_non_contig_cpu(self):
input = torch.arange(6, dtype=torch.float).reshape(1, 3, 2, 1).cpu()
input = input.permute(0, 2, 1, 3)
bn = torch.nn.BatchNorm2d(2).cpu().float().eval()
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(2).cpu().float().eval()
ref_bn.load_state_dict(bn.state_dict())
out = bn(input)
ref_out = ref_bn(ref_input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
@skipIfRocm
def test_batchnorm_cudnn_nhwc(self):
def run_test(input, grad_output):
c = input.size(1)
mod = nn.BatchNorm2d(c).cuda().float()
mod.weight.data.uniform_()
mod.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_mod = nn.BatchNorm2d(c).cuda().float()
ref_mod.load_state_dict(mod.state_dict())
out = mod(input)
out.backward(grad_output)
ref_out = ref_mod(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(mod.weight.grad, ref_mod.weight.grad)
self.assertEqual(mod.bias.grad, ref_mod.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
input = torch.randint(1, 10, (4, 8, 2, 2), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).detach().requires_grad_()
grad = torch.randint(1, 10, (4, 8, 2, 2), dtype=torch.float32, device="cuda")
grad = grad.contiguous(memory_format=torch.channels_last)
run_test(input, grad)
# see #42588, grad is channels_last contiguous, but grad.suggest_memory_format (rightly) return "contiguous"
# not channels_last
input = torch.randint(1, 10, (2, 8, 8, 1), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).detach().requires_grad_()
grad = torch.randint(1, 10, (2, 8, 8, 1), dtype=torch.float32, device="cuda")
grad = grad.permute(0, 2, 1, 3)
run_test(input, grad)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_cudnn_half(self):
# THNN
input = torch.randint(1, 10, (2, 3, 2, 2), dtype=torch.half, device="cuda", requires_grad=True)
m = nn.BatchNorm2d(3).half().cuda()
thnn_output = m(input)
thnn_output.sum().backward()
thnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(thnn_output, input)
# cuDNN
if TEST_CUDNN:
input.grad = None
m = m.float()
cudnn_output = m(input)
cudnn_output.sum().backward()
cudnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(cudnn_output, input)
self.assertEqual(cudnn_output, thnn_output)
self.assertEqual(cudnn_input_grad, thnn_input_grad, atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_nonaffine_cuda_half_input(self):
input = torch.randn(16, 3, 24, 24, dtype=torch.half, device="cuda")
m = nn.BatchNorm2d(3, affine=False).cuda().float() # keep running stats in FP32
output = m(input)
self.assertEqualTypeString(output, input)
m.eval()
output = m(input)
self.assertEqualTypeString(output, input)
def test_batchnorm_raises_error_if_less_than_one_value_per_channel(self):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.BatchNorm1d(10)(x)
def test_batchnorm_raises_error_if_running_mean_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, torch.rand(size), running_var)
def test_batchnorm_raises_error_if_running_var_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, torch.rand(size))
def test_batchnorm_raises_error_if_weight_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, running_var, weight=Parameter(torch.rand(size)))
def test_batchnorm_raises_error_if_bias_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, running_var, bias=Parameter(torch.rand(size)))
def test_batchnorm_buffer_update_when_stats_are_not_tracked(self):
input_size = (32, 4)
# Instantiate BN with buffers that are not None
bn = nn.BatchNorm1d(input_size[1], track_running_stats=True)
# Use buffers for normalization but don't update them
bn.track_running_stats = False
num_batches = bn.num_batches_tracked.clone()
running_mean = bn.running_mean.clone()
running_var = bn.running_var.clone()
_ = bn(torch.rand(input_size))
self.assertTrue(torch.equal(num_batches, bn.num_batches_tracked))
self.assertTrue(torch.equal(running_mean, bn.running_mean))
self.assertTrue(torch.equal(running_var, bn.running_var))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_batchnorm_nhwc_cuda(self):
for dtype in (torch.half, torch.float):
(N, C, H, W) = 2, 64, 50, 50
model = torch.nn.BatchNorm2d(C, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
model = model.eval().cuda().to(dtype)
inp1 = torch.randn(N, C, H, W, device=torch.device('cuda'), dtype=dtype)
inp2 = inp1.contiguous(memory_format=torch.channels_last)
out1 = model(inp1)
out2 = model(inp2)
self.assertTrue(torch.equal(out1, out2))
def test_pairwise_distance(self):
input1 = torch.randn(4, 4, requires_grad=True)
input2 = torch.randn(4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x, y: F.pairwise_distance(x, y), (input1, input2)))
def test_pdist(self):
for device, trans in itertools.product(device_(), [False, True]):
inp = torch.randn(4, 5, dtype=torch.double, device=device, requires_grad=True)
if trans:
inp = inp.transpose(0, 1)
for p in [0, 1, 2, 0.5, 1.5, 2.5, float('inf')]:
self.assertTrue(gradcheck(lambda x: F.pdist(x, p), (inp,)))
def test_pdist_zeros(self):
for device in device_():
inp = torch.randn(1, 3, dtype=torch.double, device=device, requires_grad=True).repeat([2, 1])
for p in [0, 1, 2, 0.5, 1.5, 2.5, float('inf')]:
self.assertTrue(gradcheck(lambda x: F.pdist(x, p), (inp,)))
def test_pdist_empty_row(self):
for device in device_():
inp = torch.randn(1, 3, dtype=torch.double, device=device, requires_grad=True)
self.assertTrue(gradcheck(F.pdist, (inp,)))
def test_pdist_empty_col(self):
for device in device_():
inp = torch.randn(4, 0, dtype=torch.double, device=device, requires_grad=True)
self.assertTrue(gradcheck(F.pdist, (inp,)))
@unittest.expectedFailure
def test_pdist_cpu_gradgrad_unimplemented(self):
inp = torch.randn(4, 5, requires_grad=True)
gradgradcheck(F.pdist, (inp,))
@unittest.expectedFailure
def test_pdist_cuda_gradgrad_unimplemented(self):
inp = torch.randn(4, 5, device='cuda', requires_grad=True)
gradgradcheck(F.pdist, (inp,))
def test_pdist_large(self):
for device in device_():
def func(x):
return torch.pdist(x, p=2)
shape = (1000, 1)
x = torch.randn(shape, device=device).requires_grad_()
output = torch.pdist(x, p=2)
output.sum().backward()
def test_binary_cross_entropy_grads(self):
import torch.nn.functional as F
for device in device_():
input = torch.rand(3, 3, dtype=torch.double, device=device, requires_grad=True)
target = torch.rand(3, 3, dtype=torch.double, device=device)
gradcheck(F.binary_cross_entropy, [input, target])
gradgradcheck(F.binary_cross_entropy, [input, target])
target.requires_grad_(True)
gradcheck(F.binary_cross_entropy, [input, target], check_batched_grad=False)
with self.assertRaisesRegex(RuntimeError, "not implemented"):
gradgradcheck(F.binary_cross_entropy, [input, target], check_batched_grad=False)
def test_cosine_embedding_loss_with_diff_type(self):
for device in device_():
input1 = torch.tensor([[2, 3, 4], [6, 2, 4]], dtype=torch.double, device=device)
input2 = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([1, -1], dtype=torch.int, device=device)
expected = torch.nn.functional.cosine_embedding_loss(input1, input2, target)
for dt1 in get_all_math_dtypes(device):
for dt2 in get_all_math_dtypes(device):
for dt3 in get_all_math_dtypes(device):
if dt3 == torch.uint8:
continue
if dt1.is_complex or dt2.is_complex or dt3.is_complex:
continue
input1 = input1.to(dt1)
input2 = input2.to(dt2)
target = target.to(dt3)
result = torch.nn.functional.cosine_embedding_loss(input1, input2, target)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_with_diff_type(self):
for device in device_():
input = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.double, device=device)
expected = torch.nn.functional.kl_div(input, target)
for input_dtype in get_all_math_dtypes(device):
if input_dtype.is_complex:
continue
for target_dtype in [torch.float32, torch.float64, torch.float16]:
if (torch.device(device).type == 'cpu' and target_dtype == torch.float16):
continue
input = input.to(input_dtype)
target = target.to(target_dtype)
result = torch.nn.functional.kl_div(input, target)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_with_diff_type_log_target(self):
for device in device_():
input = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.double, device=device).log()
expected = torch.nn.functional.kl_div(input, target, log_target=True)
for input_dtype in get_all_math_dtypes(device):
if input_dtype.is_complex:
continue
for target_dtype in [torch.float32, torch.float64, torch.float16]:
if (torch.device(device).type == 'cpu' and target_dtype == torch.float16):
continue
input = input.to(input_dtype)
target = target.to(target_dtype)
result = torch.nn.functional.kl_div(input, target, log_target=True)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_log_softmax_target(self):
for device in device_():
a = torch.tensor([[1.0, 2, 3], [5.0, 5, 5]], device=device)
b = torch.tensor([[1.0, 2, 3], [5.0, 5, 5]], device=device)
self.assertEqual(
F.kl_div(F.log_softmax(a, 1), F.log_softmax(b, 1), reduction='none', log_target=True),
torch.zeros_like(a)
)
def test_cosine_embedding_loss_no_reduce(self):
input1 = torch.randn(15, 10, requires_grad=True)
input2 = torch.randn(15, 10, requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(
x, y, z, reduction='none'), (input1, input2, target)))
self.assertEqual(F.cosine_embedding_loss(input1, input2, target, reduction='none'),
loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target, reduction='none'))
def test_cosine_embedding_loss_margin_no_reduce(self):
input1 = torch.randn(15, 10, requires_grad=True)
input2 = torch.randn(15, 10, requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(
x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))
self.assertEqual(F.cosine_embedding_loss(input1, input2, target, margin=0.5, reduction='none'),
loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target,
margin=0.5, reduction='none'))
def test_cosine_embedding_loss_invalid_shape(self):
input1 = torch.randn(15, 10)
input2 = torch.randn(15, 10)
target = torch.randn(15, 1).sign()
with self.assertRaisesRegex(RuntimeError, "1D target tensor expected"):
F.cosine_embedding_loss(input1, input2, target)
with self.assertRaisesRegex(RuntimeError, "1D target tensor expects 2D input tensors"):
F.cosine_embedding_loss(torch.randn(10), torch.randn(10), torch.randn(10))
with self.assertRaisesRegex(RuntimeError, "0D target tensor expects 1D input tensors"):
F.cosine_embedding_loss(torch.randn(2, 5), torch.randn(2, 5), torch.randn(()))
def test_margin_ranking_loss_no_reduce(self):
input1 = torch.randn(15).mul_(10).requires_grad_()
input2 = torch.randn(15).mul_(10).requires_grad_()
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(
x, y, z, reduction='none'), (input1, input2, target)))
self.assertEqual(F.margin_ranking_loss(input1, input2, target, reduction='none'),
loss_reference_fns['MarginRankingLoss'](input1, input2, target, reduction='none'))
def test_margin_ranking_loss_margin_no_reduce(self):
input1 = torch.randn(15).mul_(10).requires_grad_()
input2 = torch.randn(15).mul_(10).requires_grad_()
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(
x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))
self.assertEqual(F.margin_ranking_loss(input1, input2, target, margin=0.5, reduction='none'),
loss_reference_fns['MarginRankingLoss'](input1, input2, target, margin=0.5, reduction='none'))
def test_triplet_margin_loss(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3))
def test_triplet_margin_loss_swap(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, swap=True), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True))
def test_triplet_margin_loss_no_reduce(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, reduction='none'), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, reduction='none'),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, reduction='none'))
def test_triplet_margin_loss_swap_no_reduce(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, swap=True, reduction='none'), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True, reduction='none'),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True, reduction='none'))
def test_triplet_margin_loss_invalid(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
input_1d = torch.randn(10, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "All inputs should have same dimension"):
F.triplet_margin_loss(input1, input2, input_1d)
with self.assertRaisesRegex(RuntimeError, "All inputs should have same dimension"):
F.triplet_margin_loss(input1, input_1d, input3)
with self.assertRaisesRegex(RuntimeError, "All inputs should have same dimension"):
F.triplet_margin_loss(input_1d, input2, input3)
def test_pointwise_loss_target_grad_none_reduction(self):
i = torch.randn(5, 10)
t = torch.randn(5, 10, requires_grad=True)
self.assertEqual(F.mse_loss(i, t, reduction='none').size(), t.size())
self.assertEqual(F.l1_loss(i, t, reduction='none').size(), t.size())
def test_pointwise_loss_broadcast(self):
losses = {
'mse_loss': lambda x, y, r: F.mse_loss(x, y, reduction=r),
'l1_loss': lambda x, y, r: F.l1_loss(x, y, reduction=r),
'smooth_l1_loss': lambda x, y, r: F.smooth_l1_loss(x, y, reduction=r),
'huber_loss': lambda x, y, r: F.huber_loss(x, y, reduction=r),
}
input = torch.randn(2, 1, requires_grad=True)
for _name, fn in losses.items():
for requires_grad in [True, False]:
# When target.requires_grad=True, its impl is in Python, while the other is in TH.
target = torch.randn(2, 10, requires_grad=requires_grad)
for reduction in ['none', 'mean', 'sum']:
l = fn(input, target, reduction)
if reduction == 'none':
self.assertEqual(l.size(), target.size())
self.assertTrue(gradcheck(fn, (input, target, reduction)))
# https://github.com/pytorch/pytorch/issues/27692 reports
# that l1_loss get a wrong result for big batch size
def test_l1_loss_correct(self):
for dtype in [torch.float, torch.cfloat]:
for N in range(1, 50, 10):
input = torch.rand(N, 3, 1024, 1024, dtype=dtype)
self.assertEqual(
torch.nn.L1Loss()(input, torch.zeros_like(input)),
input.abs().mean())
def test_smoothl1loss_intergral_target(self):
def _input_grad(input, target, reduction):
output = F.smooth_l1_loss(input, target, reduction=reduction, beta=0.5)
output.sum().backward()
return input.grad
for device, dtype, reduction in product(device_(),
integral_types(),
('none', 'sum', 'mean')):
input = torch.randn(2, 2, device=device, requires_grad=True)
target = torch.randint(0, 9, (2, 2), device=device, dtype=dtype)
input_grad_with_float_target = _input_grad(input, target.float(), reduction)
input_grad = _input_grad(input.detach().clone().requires_grad_(True),
target,
reduction)
self.assertEqual(input_grad, input_grad_with_float_target)
def test_smoothl1loss_negative_beta_not_supported(self):
with self.assertRaises(RuntimeError):
F.smooth_l1_loss(torch.randn(2, 2), torch.randn(2, 2), beta=-1.0)
def test_huber_loss_invalid_delta(self):
def _test_huber_loss_delta_error_helper(delta):
input, target = torch.randn(2, 2), torch.randn(2, 2)
loss = torch.nn.HuberLoss(delta=delta)
with self.assertRaises(RuntimeError):
loss(input, target)
def test_huber_loss_negative_delta():
_test_huber_loss_delta_error_helper(delta=-0.5)
def test_huber_loss_zero_delta():
_test_huber_loss_delta_error_helper(delta=0.0)
test_huber_loss_negative_delta()
test_huber_loss_zero_delta()
def test_cosine_similarity(self):
# Check cosine_similarity input/output shapes
input_size = (1, 3, 2, 1)
expected_size = (1, 2, 1)
input1 = torch.randn(input_size, requires_grad=True)
input2 = torch.randn(input_size, requires_grad=True)
self.assertEqual(F.cosine_similarity(input1, input2, dim=1).size(), expected_size)
# Check numerical precision, issue #18057
vv1 = torch.tensor(list([float(i) for i in range(84)])).unsqueeze(0)
vv2 = torch.tensor(list([float(i) for i in range(84)])).unsqueeze(0)
out = F.cosine_similarity(vv1, vv2)
self.assertLessEqual(out, 1.0)
# Check dividing by 0.
input1 = torch.randn(10).requires_grad_()
input2 = torch.zeros_like(input1).requires_grad_()
torch.cosine_similarity(input1, input2, 0).sum().backward()
self.assertEqual(input1.grad, torch.zeros_like(input1))
self.assertEqual(input2.grad, input1 * 1e8)
# Check type promotion, issue #61454
input = torch.tensor(12.)
out = F.cosine_similarity(input.to(torch.int8), input, dim=-1)
self.assertEqual(out, 1.)
def test_grid_sample_error_checking(self):
input = torch.empty(1, 1, 2, 2)
grid = torch.empty(1, 1, 1, 2)
# assert no error
F.grid_sample(input, grid, align_corners=False)
with self.assertRaisesRegex(ValueError, "but got: 'garbage'"):
F.grid_sample(input, grid, mode='garbage', align_corners=False)
with self.assertRaisesRegex(ValueError, "but got: 'garbage'"):
F.grid_sample(input, grid, padding_mode='garbage', align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected 4D or 5D input"):
F.grid_sample(input[0], grid, align_corners=False)
with self.assertRaisesRegex(RuntimeError, "grid with same number of dimensions"):
F.grid_sample(input, torch.empty(1, 1, 1, 1, 3), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected grid and input to have same batch size"):
F.grid_sample(input, torch.empty(2, 1, 1, 2), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected grid to have size 2 in last dimension"):
F.grid_sample(input, torch.empty(1, 1, 1, 3), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected input to have non-empty spatial dimensions"):
F.grid_sample(torch.empty(1, 1, 0, 2), grid, align_corners=False)
with self.assertRaisesRegex(RuntimeError, "bicubic interpolation only supports 4D input"):
F.grid_sample(torch.empty(1, 1, 2, 2, 2), torch.empty(1, 1, 1, 1, 3), mode='bicubic')
if TEST_CUDA:
with self.assertRaisesRegex(RuntimeError, "expected input and grid to be on same device"):
F.grid_sample(input.cuda(), grid, align_corners=False)
def test_affine_grid_error_checking(self):
# 2D affine
theta = torch.empty(1, 2, 3, dtype=torch.double)
size = torch.Size([1, 1, 2, 2])
# assert no error
F.affine_grid(theta, size, align_corners=False)
# check for warning for empty span along dimension
with warnings.catch_warnings(record=True) as w:
# Ensure warnings are being shown
warnings.simplefilter("always")
# Should not trigger warning
F.affine_grid(theta, torch.Size([1, 1, 2, 1]), align_corners=False)
# Check no warning occurs
self.assertNotIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
# Should trigger warning
F.affine_grid(theta, torch.Size([1, 1, 2, 1]), align_corners=True)
# Check warning occurs
self.assertIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
with self.assertRaisesRegex(ValueError, "Expected theta to have floating point type"):
F.affine_grid(theta.int(), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta[0], size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.unsqueeze(0), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.repeat(1, 2, 1), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.repeat(1, 1, 2), size, align_corners=False)
# 3D affine
theta = torch.empty(1, 3, 4, dtype=torch.double)
size = torch.Size([1, 1, 2, 2, 2])
# assert no error
F.affine_grid(theta, size, align_corners=False)
# check for warning for empty span along dimension
with warnings.catch_warnings(record=True) as w:
# Ensure warnings are being shown
warnings.simplefilter("always")
# Should not trigger warning
F.affine_grid(theta, torch.Size([1, 1, 3, 2, 1]), align_corners=False)
# Check no warning occurs
self.assertNotIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
# Should trigger warning
F.affine_grid(theta, torch.Size([1, 1, 3, 2, 1]), align_corners=True)
# Check warning occurs
self.assertIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta[0], size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.unsqueeze(0), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.repeat(1, 2, 1), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.repeat(1, 1, 2), size, align_corners=False)
with self.assertRaisesRegex(NotImplementedError, "affine_grid only supports 4D and 5D sizes"):
F.affine_grid(theta, torch.Size([1, 2, 2]), align_corners=False)
with self.assertRaisesRegex(NotImplementedError, "affine_grid only supports 4D and 5D sizes"):
F.affine_grid(theta, torch.Size([1, 1, 2, 2, 2, 2]), align_corners=False)
@skipIfRocm
def test_grid_sample(self):
# Backward pass of native C++ and CUDA kernels branch depending on whether input requires gradient,
# so we test both cases.
def test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad):
def test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners):
for grid_dim_contig_order in [(0, 1, 2, 3), (0, 3, 1, 2), (3, 0, 1, 2), (0, 2, 1, 3)]:
# grid_dim_contig_order specifies the dimension order that can
# make grid to be contiguous.
# i.e., grid.permute(grid_dim_contig_order) is contiguous.
# e.g., with grid_dim_contig_order=[0, 3, 1, 2], grid should be
# initialized with contiguous tensor of shape [N, 2, H, W]
# and permuted to [N, H, W, 2] afterwards.
grid_shape = [N, H, W, 2]
grid_init_shape = [grid_shape[d] for d in grid_dim_contig_order]
grid_fwd_permute = [None, None, None, None]
for i, d in enumerate(grid_dim_contig_order):
grid_fwd_permute[d] = i
def get_grid(device='cpu', data=None):
if data is not None:
assert list(data.shape) == grid_shape
data = data.permute(grid_dim_contig_order).to(device)
else:
data = torch.randn(grid_init_shape, device=device)
grid = data.permute(grid_fwd_permute)
assert grid.permute(grid_dim_contig_order).is_contiguous()
return grid
input_cpu = torch.randn(C, N, IH, IW).transpose(0, 1).requires_grad_(input_requires_grad)
grid_cpu = get_grid().requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertTrue(out_cpu.size() == torch.Size([N, C, H, W]))
gradients = torch.randn_like(out_cpu)
out_cpu.backward(gradients)
# Compare against unvectorized CPU fallback
# NOTE [ grid_sample CPU fallback ]
# grid_sample uses AVX for 2d images, but that requires 32-bit indexing for
# 32-bit floats. So we also have a fallback that is used only for float tensors
# requiring 64-bit indexing. That requires too much memory to run on CI, so we
# also export the fallback and test it here to ensure feature parity with
# the vectorized version.
input_fallback = input_cpu.float().detach_().requires_grad_()
grid_fallback = grid_cpu.float().detach_().requires_grad_()
out_fallback = torch._grid_sampler_2d_cpu_fallback(
input_fallback, grid_fallback,
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners)
self.assertEqual(out_fallback, out_cpu.float(), atol=1e-5, rtol=5e-5)
out_fallback.backward(gradients.float())
if input_requires_grad:
self.assertEqual(input_fallback.grad, input_cpu.grad.float(), atol=1e-4, rtol=5e-5)
self.assertEqual(grid_fallback.grad, grid_cpu.grad.float(), atol=1e-4, rtol=5e-5)
if TEST_CUDA:
input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_(input_requires_grad)
grid_cuda = get_grid('cuda', grid_cpu.detach()).requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
out_cuda.backward(gradients.cuda())
if input_requires_grad:
self.assertEqual(input_cpu.grad, input_cuda.grad)
self.assertEqual(grid_cpu.grad, grid_cuda.grad, atol=5e-5, rtol=0)
# check that zero-dimensional input strides don't error out
base_input = torch.randn(N, C, 1, IW)
input_cpu = base_input.expand_as(input_cuda).requires_grad_(input_requires_grad)
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_(input_requires_grad)
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
test_shape(N, C, H, W, H, W, mode, padding_mode, align_corners)
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(IH + 1, 12)
W = random.randint(IW + 1, 12)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(2, IH)
W = random.randint(2, IW)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = 1
IW = 1
H = random.randint(2, 5)
W = random.randint(2, 5)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
W = random.randint(3, IW + 2)
test_shape(N, C, IH, IW, 0, W, mode, padding_mode, align_corners)
N = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(N, 0, IH, IW, H, W, mode, padding_mode, align_corners)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(0, C, IH, IW, H, W, mode, padding_mode, align_corners)
for mode in ('bilinear', 'nearest', 'bicubic'):
for padding_mode in ('zeros', 'border', 'reflection'):
for align_corners in (True, False):
input = torch.arange(1., 11).view(1, 1, 2, 5)
grid = torch.tensor(
[[[-0.9, -4.1], [0, 0.2000], [1, -1], [-0.333, 1e-6], [0.5, 1.0]],
[[-1.0, -0.5], [0, 0.3333], [1, -1], [-0.200, 1e-6], [1.5, 0.5]]]).view(1, 2, 5, 2)
if mode == 'bilinear':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[0.0000, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 0.0000]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.0000, 6.5000000000, 1.2500, 4.6675000191, 4.6250],
[0.5000, 7.1665000916, 1.2500, 5.0000000000, 0.0000]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1.2000, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 8.7500]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1.0000, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
[1.0000, 7.1665000916, 5.0000, 5.0000000000, 10.0000]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[3.4500, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 7.7500]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[3.0000004768, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
[1.0000000000, 7.1665000916, 5.0000, 5.0000000000, 9.2500]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'nearest':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[0., 8., 5., 7., 9.],
[1., 8., 5., 8., 0.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0., 8., 5., 7., 0.],
[1., 8., 5., 8., 0.]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 10.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 10.]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 9.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 9.]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'bicubic':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[-0.10424726, 7.1400003, 5.0000, 5.7842274, 9.0000],
[2.4492188, 7.4814040, 5.0000, 6.0277520, 0.0000]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.00000, 7.6287503, 1.0625, 5.5977230, 5.3270264],
[0.40625, 8.0288770, 1.0625, 5.9375067, -0.3515625]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1.1520010, 6.0599990, 5.0000, 4.870930, 9.0000000],
[2.1328125, 6.4258375, 5.0000, 5.076003, 8.8671875]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.894531, 6.6050020, 4.625, 4.7138715, 9.800781],
[0.906250, 7.2822485, 4.625, 5.0000052, 10.00000]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[3.1822524, 6.239998, 5.0000, 4.8709273, 9.00000],
[1.7812500, 6.703594, 5.0000, 5.0760007, 8.21875]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[2.7993753, 6.6050020, 4.25, 4.7138715, 10.269531],
[0.8125000, 7.2822485, 4.25, 5.0000052, 9.332031]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
else:
raise AssertionError("missing groundtruth test for interpolation mode '{}'".format(mode))
output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,
msg="groundtruth comparison failed for mode={}, "
"padding_mode={}".format(mode, padding_mode))
output = torch._grid_sampler_2d_cpu_fallback(
input.float(), grid.float(),
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners)
self.assertEqual(output, groundtruth.float(), atol=1e-5, rtol=0)
input = torch.arange(0., 5).expand((1, 1, 5, 5))
grid = torch.tensor(
[[[1.0, 1.0], [1.0, -1.0], [0.8, 0.8], [0.8, -0.8]],
[[-1.0, -1.0], [-1.0, 1.0], [-0.8, -0.8], [-0.8, 0.8]]]).view(1, 2, 4, 2).requires_grad_()
if mode == 'bilinear':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[[[-8., -8.], [-8., 0.], [2., 0.], [2., 0.]],
[[2., 0.], [2., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-5., -5.], [-5., 5.], [-10., -10.], [-10., 10.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [2., 0.], [2., 0.]],
[[0., 0.], [0., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [2., 0.], [2., 0.]],
[[0., 0.], [0., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
else:
raise AssertionError("missing gradient groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'nearest':
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif mode == 'bicubic':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[[[-4.5, -6.], [-4.5, 6.], [2.725679, 0.740878], [2.725679, -0.740878]],
[[1.5, 0.], [1.5, 0.], [1.927921, -0.05688], [1.927921, 0.05688]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-5.859375, -5.888672], [-5.859375, 5.888672], [-5.6250, -7.5000], [-5.6250, 7.5000]],
[[-0.234375, -0.263672], [-0.234375, 0.263672], [1.8750, 0.], [1.8750, 0.]]]]
).view(1, 2, 4, 2)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[[[1.5, 0.], [1.5, 0.], [1.74, 0.], [1.74, 0.]],
[[1.5, 0.], [1.5, 0.], [1.74, 0.], [1.74, 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0.46875, 0.], [-0.46875, 0.], [1.8750, 0.], [1.8750, 0.]],
[[-0.46875, 0.], [-0.46875, 0.], [1.8750, 0.], [1.8750, 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[[[0., 0.], [0., 0.], [1.92, 0.], [1.92, 0.]],
[[0., 0.], [0., 0.], [1.92, 0.], [1.92, 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]],
[[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]]]]).view(1, 2, 4, 2)
else:
raise AssertionError("missing gradient groundtruth test for padding mode '{}'".format(padding_mode))
else:
raise AssertionError("missing gradient groundtruth test for interpolation mode '{}'".format(mode))
for input_requires_grad in [False, True]:
input = input.requires_grad_(input_requires_grad)
F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners).sum().backward()
self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0,
msg="gradient groundtruth comparison failed for mode={}, "
"padding_mode={}, input_requires_grad={}".format(mode, padding_mode, input_requires_grad))
grid.grad.zero_()
torch._grid_sampler_2d_cpu_fallback(
input.float(), grid.float(),
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners).sum().backward()
self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0)
N = random.randint(2, 8)
C = random.randint(2, 6)
H = random.randint(2, 8)
W = random.randint(2, 8)
input = torch.randn(N, C, H, W, requires_grad=True)
grid = torch.randn(N, H, W, 2, requires_grad=True)
self.assertTrue(gradcheck(
lambda inp, grid: F.grid_sample(inp, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners),
(input, grid)))
input = input.requires_grad_(False)
self.assertTrue(gradcheck(
lambda grid: F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners),
(grid,)))
for input_requires_grad in [False, True]:
test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad)
if TEST_CUDNN:
with cudnn.flags(enabled=False):
test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad)
def test_grid_sample_3d(self):
def test(N, C, D, H, W, mode, padding_mode, align_corners):
def test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners):
input_cpu = torch.randn(C, N, ID, IH, IW).transpose(0, 1).requires_grad_()
grid_cpu = torch.randn(D, N, H, W, 3).transpose(0, 1).requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertTrue(out_cpu.size() == torch.Size([N, C, D, H, W]))
gradients = torch.randn_like(out_cpu)
out_cpu.backward(gradients)
if TEST_CUDA:
input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
grid_cuda = grid_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
out_cuda.backward(gradients.cuda())
self.assertEqual(input_cpu.grad, input_cuda.grad)
self.assertEqual(grid_cpu.grad, grid_cuda.grad, atol=5e-5, rtol=0)
base_input = torch.randn(N, C, 1, IH, IW)
input_cpu = base_input.expand_as(input_cuda).requires_grad_()
grid_cpu = torch.randn(N, D, H, W, 3, requires_grad=True)
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_()
grid_cuda = grid_cpu.detach().cuda().requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
# test same size output
test_shape(N, C, D, H, W, D, H, W, mode, padding_mode, align_corners)
# test larger output
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(ID + 1, 10)
H = random.randint(IH + 1, 10)
W = random.randint(IW + 1, 10)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# test smaller output
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(2, ID)
H = random.randint(2, IH)
W = random.randint(2, IW)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# test 1x1 inpput
N = random.randint(2, 7)
C = random.randint(2, 7)
ID = 1
IH = 1
IW = 1
H = random.randint(2, 5)
W = random.randint(2, 5)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# testing empty grid
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
W = random.randint(3, IW + 2)
test_shape(N, C, ID, IH, IW, D, 0, W, mode, padding_mode, align_corners)
# testing empty channel
N = random.randint(2, 7)
ID = random.randint(2, 5)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(N, 0, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# testing empty batch
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(0, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
for mode in ('bilinear', 'nearest'):
for padding_mode in ('zeros', 'border', 'reflection'):
for align_corners in (True, False):
# do gradcheck
N = random.randint(2, 5)
C = random.randint(2, 4)
D = random.randint(2, 5)
H = random.randint(2, 5)
W = random.randint(2, 5)
input = torch.randn(N, C, D, H, W, requires_grad=True)
grid = torch.randn(N, D, H, W, 3, requires_grad=True)
self.assertTrue(gradcheck(
lambda inp, grid: F.grid_sample(inp, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners),
(input, grid)))
test(N, C, D, H, W, mode, padding_mode, align_corners)
def test_affine_grid(self):
# test known input on CPU
input = torch.arange(1., 7).view(1, 2, 3)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2]), align_corners=True)
groundtruth = torch.tensor(
[[[0., -3.], [2., 5.]], [[4., 7.], [6., 15.]]]).view(1, 2, 2, 2)
self.assertEqual(output, groundtruth)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2]), align_corners=False)
groundtruth = torch.tensor(
[[[1.5, 1.5], [2.5, 5.5]], [[3.5, 6.5], [4.5, 10.5]]]).view(1, 2, 2, 2)
self.assertEqual(output, groundtruth)
for align_corners in (True, False):
# do gradcheck
N = random.randint(1, 8)
C = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, H, W])
inp = torch.randn(N, 2, 3, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
self.assertTrue(gradcheck(
lambda inp: F.affine_grid(inp, sz, align_corners=align_corners),
(inp,)))
# test CPU against CUDA
if TEST_CUDA:
N = random.randint(1, 8)
C = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, H, W])
for align_corners in (True, False):
input_cpu = torch.randn(N, 2, 3, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cpu = F.affine_grid(input_cpu, sz, align_corners=align_corners)
gradients = torch.randn(out_cpu.size())
out_cpu.backward(gradients)
input_gpu = input_cpu.detach().cuda().requires_grad_()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cuda = F.affine_grid(input_gpu, sz, align_corners=align_corners)
out_cuda.backward(gradients.cuda())
self.assertEqual(out_cpu, out_cuda)
self.assertEqual(input_cpu.grad, input_gpu.grad)
def test_affine_grid_3d(self):
# test known input on CPU
input = torch.arange(1., 13).view(1, 3, 4)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2, 2]), align_corners=True)
groundtruth = torch.tensor(
[[[[[-2., -10., -18.], [0., 0., 0.]], [[2., 2., 2.], [4., 12., 20.]]],
[[[4., 4., 4.], [6., 14., 22.]], [[8., 16., 24.], [10., 26., 42.]]]]]).view(1, 2, 2, 2, 3)
self.assertEqual(output, groundtruth)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2, 2]), align_corners=False)
groundtruth = torch.tensor(
[[[[[1., -1., -3.], [2., 4., 6.]], [[3., 5., 7.], [4., 10., 16.]]],
[[[4., 6., 8.], [5., 11., 17.]], [[6., 12., 18.], [7., 17., 27.]]]]]).view(1, 2, 2, 2, 3)
self.assertEqual(output, groundtruth)
for align_corners in (True, False):
# do gradcheck
N = random.randint(1, 8)
C = random.randint(1, 8)
D = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, D, H, W])
inp = torch.randn(N, 3, 4, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
self.assertTrue(gradcheck(
lambda inp: F.affine_grid(inp, sz, align_corners=align_corners),
(inp,)))
# test CPU against CUDA
if TEST_CUDA:
N = random.randint(1, 8)
C = random.randint(1, 8)
D = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, D, H, W])
for align_corners in (True, False):
input_cpu = torch.randn(N, 3, 4, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cpu = F.affine_grid(input_cpu, sz, align_corners=align_corners)
gradients = torch.randn(out_cpu.size())
out_cpu.backward(gradients)
input_gpu = input_cpu.detach().cuda().requires_grad_()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cuda = F.affine_grid(input_gpu, sz, align_corners=align_corners)
out_cuda.backward(gradients.cuda())
self.assertEqual(out_cpu, out_cuda)
self.assertEqual(input_cpu.grad, input_gpu.grad)
def test_channel_shuffle(self):
# 3D tensor
x = torch.tensor(
[[[1, 2],
[5, 6],
[9, 10],
[13, 14],
]]
)
y_ref = torch.tensor(
[[[1, 2],
[9, 10],
[5, 6],
[13, 14],
]]
)
# ChannelsFirst
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast not supported for 3dim
# 4D tensor
x = torch.tensor(
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]],
[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]],
]]
)
y_ref = torch.tensor(
[[[[1, 2],
[3, 4]],
[[9, 10],
[11, 12]],
[[5, 6],
[7, 8]],
[[13, 14],
[15, 16]],
]]
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last), 2)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
# 5D tensor
x = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[5, 6],
[7, 8]]],
[[[9, 10],
[11, 12]]],
[[[13, 14],
[15, 16]]],
]]
)
y_ref = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[9, 10],
[11, 12]]],
[[[5, 6],
[7, 8]]],
[[[13, 14],
[15, 16]]],
]]
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last_3d), 2)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
def test_upsamplingLinear1d(self):
for align_corners in [True, False]:
for recompute_scale_factor in [True, False]:
kwargs = dict(
mode='linear', align_corners=align_corners, recompute_scale_factor=recompute_scale_factor
)
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
m = nn.Upsample(scale_factor=scale_factor, **kwargs)
in_t = torch.ones(1, 1, 2)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
self.assertEqual(torch.ones(1, 1, out_size), out_t.data)
input = torch.randn(1, 1, 2, requires_grad=True)
if not recompute_scale_factor:
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), (input,))
else:
gradcheck(lambda x: F.interpolate(x, scale_factor=scale_factor, **kwargs), (input,))
def test_upsamplingLinear1d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='linear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9)
in_t_9[:, :, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5])
self.assertEqual(out_t_9[:, :, :15], out_t_5)
def test_upsampling_not_recompute_scale_factor(self):
# test output against known input: result must match opencv
in_t = torch.arange(8.).view(1, 2, 2, 2)
expected_out_t = torch.tensor(
[[[[-0.32725, -0.08843, 0.37933, 0.79744],
[0.15039, 0.38921, 0.85697, 1.27508],
[1.08591, 1.32473, 1.79249, 2.21060],
[1.92213, 2.16095, 2.62871, 3.04682]],
[[3.67275, 3.91157, 4.37933, 4.79744],
[4.15039, 4.38921, 4.85697, 5.27508],
[5.08591, 5.32473, 5.79249, 6.21060],
[5.92213, 6.16095, 6.62871, 7.04682]]]])
if IS_PPC:
# Both OpenCV and PyTorch give a slightly different result on PPC
expected_out_t = torch.tensor(
[[[[-0.32725, -0.08843, 0.37933, 0.79744],
[0.15039, 0.38921, 0.85697, 1.27508],
[1.08591, 1.32473, 1.79249, 2.21060],
[1.92212, 2.16094, 2.62870, 3.04681]],
[[3.67275, 3.91157, 4.37933, 4.79743],
[4.15039, 4.38921, 4.85697, 5.27508],
[5.08591, 5.32473, 5.79249, 6.21059],
[5.92212, 6.16094, 6.62870, 7.04680]]]])
out_t = F.interpolate(in_t, scale_factor=2.3, mode='bicubic', align_corners=False, recompute_scale_factor=False)
torch.set_printoptions(precision=5)
self.assertEqual(out_t, expected_out_t, atol=1e-4, rtol=0)
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for align_corners in [True, False]:
kwargs = dict(mode='bicubic', align_corners=align_corners)
# test float scale factor up & downsampling
for device in device_list:
for scale_factor in [0.6, 1.6, 2.3]:
in_t = torch.ones(2, 2, 2, 2).to(device)
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
self.assertEqual(torch.ones(2, 2, out_size, out_size), out_t.data, atol=1e-5, rtol=0)
input = torch.randn(2, 2, 2, 2, requires_grad=True)
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
def test_upsamplingBilinear2d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='bilinear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9, 9)
in_t_9[:, :, :4, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5, :5])
self.assertEqual(out_t_9[:, :, :15, :15], out_t_5)
def test_upsamplingTrilinear3d(self):
for align_corners in [True, False]:
kwargs = dict(mode='trilinear', align_corners=align_corners)
for memory_format in [torch.contiguous_format, torch.channels_last_3d]:
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
m = nn.Upsample(scale_factor=scale_factor, **kwargs)
in_t = torch.ones(1, 2, 2, 2, 2).contiguous(memory_format=memory_format)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
self.assertEqual(torch.ones(1, 2, out_size, out_size, out_size), out_t.data)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
input = torch.randn(1, 2, 2, 2, 2, requires_grad=True)
self.assertEqual(
F.interpolate(input, (out_size, out_size, out_size), **kwargs),
F.interpolate(input, scale_factor=scale_factor, **kwargs))
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
gradgradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
def test_upsamplingTrilinear3d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='trilinear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9, 9, 9)
in_t_9[:, :, :4, :4, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5, :5, :5])
self.assertEqual(out_t_9[:, :, :15, :15, :15], out_t_5)
def test_upsampling_small_scale(self):
m = torch.nn.Upsample(scale_factor=0.5, mode="bilinear")
in_t = torch.arange(1, 5, dtype=torch.float64).reshape(1, 1, 2, 2)
out_t = m(in_t)
expected_out_t = torch.tensor([[[[2.5]]]])
self.assertEqual(expected_out_t, out_t)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_interpolate_illegal_memory_access(self):
in_s = 45
out_s = 14
input = torch.ones((1, 1, in_s), device='cuda', requires_grad=True)
# note we allocated grad_output to be larger so out of bound access
# woudl be visible in grad_input
grad = torch.ones((1, 1, out_s * 2), device='cuda', requires_grad=True)
grad = grad[:, :, :out_s]
input_ref = input.detach().cpu().requires_grad_()
grad_ref = grad.cpu()
out = F.interpolate(input, size=(out_s,), mode='nearest')
out.backward(grad)
out_ref = F.interpolate(input_ref, size=(out_s,), mode='nearest')
out_ref.backward(grad_ref)
self.assertEqual(out_ref, out)
self.assertEqual(input_ref.grad, input.grad)
def test_interpolate(self):
def _test_interpolate_helper(in_t, scale_factor, layer):
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
dim = len(in_t.shape) - 2
out_shape = [1, 1] + [out_size] * dim
with warnings.catch_warnings(record=True) as w:
out_t = layer(in_t)
self.assertEqual(torch.ones(out_shape), out_t)
self.assertEqual(
F.interpolate(in_t, (out_size,) * dim, **kwargs),
F.interpolate(in_t, scale_factor=scale_factor, **kwargs))
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t], nondet_tol=GRADCHECK_NONDET_TOL)
gradgradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t], nondet_tol=GRADCHECK_NONDET_TOL)
def _make_input(dim, device):
size = [1, 1]
size += [2] * dim
return torch.ones(size, requires_grad=True, device=device)
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
for scale_factor in [0.5, 1.5, 2]:
for mode in ['nearest', 'area']:
kwargs = dict(mode=mode)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
for input in [_make_input(1, device), _make_input(2, device), _make_input(3, device)]:
_test_interpolate_helper(input, scale_factor, m)
for align_corners in [True, False]:
kwargs = dict(mode='linear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(1, device), scale_factor, m)
kwargs = dict(mode='bilinear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(2, device), scale_factor, m)
kwargs = dict(mode='bicubic', align_corners=align_corners)
def m(t):
return F.interpolate(t, scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(2, device), scale_factor, m)
kwargs = dict(mode='trilinear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(3, device), scale_factor, m)
def test_linear_broadcasting(self):
m = nn.Linear(5, 8)
inp = torch.randn(2, 3, 5)
expected = m(inp.view(6, 5)).view(2, 3, 8)
self.assertEqual(expected, m(inp))
def test_bilinear(self):
module = nn.Bilinear(10, 10, 8)
input1 = torch.randn(4, 10, requires_grad=True)
input2 = torch.randn(4, 10, requires_grad=True)
grad_output = torch.randn(4, 8)
res = module(input1, input2)
expected = (torch.einsum("bi,kij,bj->bk", input1, module.weight, input2) +
module.bias)
self.assertEqual(res, expected)
grads = torch.autograd.grad(res, [module.weight, module.bias, input1, input2], grad_output)
grads_expected = torch.autograd.grad(expected, [module.weight, module.bias, input1, input2], grad_output)
for g, ge in zip(grads, grads_expected):
self.assertEqual(g, ge)
def test_bilinear_non_contiguous(self):
module = nn.Bilinear(7, 7, 5)
input1 = torch.randn(4, 7, 10, requires_grad=True)
input2 = torch.randn(4, 7, 10, requires_grad=True)
input1_tp = input1.transpose(1, 2)
input2_tp = input2.transpose(1, 2)
grad_output = torch.randn(4, 10, 5)
def run(input1_tp, input2_tp):
input1.grad = input2.grad = None
output = module(input1_tp, input2_tp)
output.backward(grad_output)
return output.data, input1.grad.data, input2.grad.data
out_nc, g1_nc, g2_nc = run(input1_tp, input2_tp)
input1_tp = input1_tp.contiguous()
input2_tp = input2_tp.contiguous()
out, g1, g2 = run(input1_tp, input2_tp)
self.assertEqual(out, out_nc)
self.assertEqual(g1, g1_nc)
self.assertEqual(g2, g2_nc)
def test_bilinear_no_bias(self):
module = nn.Bilinear(10, 10, 8)
module_no_bias = nn.Bilinear(10, 10, 8, False)
module.bias.data.zero_()
module.weight.data.copy_(module_no_bias.weight)
input1 = torch.randn(4, 10, requires_grad=True)
input2 = torch.randn(4, 10, requires_grad=True)
grad_output = torch.randn(4, 8)
def run(net):
input1.grad = input2.grad = None
output = net(input1, input2)
output.backward(grad_output)
return output.data, input1.grad.data, input2.grad.data
out, g1, g2 = run(module)
out_nb, g1_nb, g2_nb = run(module_no_bias)
self.assertEqual(out, out_nb)
self.assertEqual(g1, g1_nb)
self.assertEqual(g2, g2_nb)
_assertGradAndGradgradChecks(self,
lambda x1, x2: F.bilinear(x1, x2, module_no_bias.weight, module_no_bias.bias),
(input1, input2))
def test_bilinear_broadcasting(self):
m = nn.Bilinear(5, 6, 8)
input1 = torch.randn(2, 3, 5)
input2 = torch.randn(2, 3, 6)
expected = m(input1.view(6, 5), input2.view(6, 6)).view(2, 3, 8)
self.assertEqual(expected, m(input1, input2))
def test_conv_tbc(self):
inp = torch.randn(9, 4, 5, requires_grad=True)
weight = torch.randn(3, 5, 6, requires_grad=True)
bias = torch.randn(6, requires_grad=True)
gradcheck(lambda i, w, b, pad: F.conv_tbc(i, w, b, pad), (inp, weight, bias, 3))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
@skipIfRocmVersionLessThan((4, 3))
@skipIfNotMiopenSuggestNHWC
def test_grouped_conv_cudnn_nhwc_support(self):
# in order to catch the hols in grouped convolution in nhwc support for earlier cudnn version
input = torch.randn((16, 16, 8, 8), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
weight = torch.randn((8, 4, 3, 3), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
out = torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), False, (0, 0), 4)
input = torch.randn((16, 8, 8, 8), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
out_transpose = torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), True, (0, 0), 4)
@unittest.expectedFailure
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_conv_cudnn_memory_layout_dominance(self):
# desired behavior here is to have the memory_layout of conv.weight to
# dominante the layout of output.
# which is not the same as current behavior, we'll fix this in
input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, device="cuda", requires_grad=True)
conv = nn.Conv2d(8, 4, 3).cuda().float()
out = conv(input)
self.assertTrue(out.is_contiguous())
input = input.contiguous(memory_format=torch.channels_last)
out = conv(input)
self.assertTrue(out.is_contiguous())
conv.weight.data = conv.weight.contiguous(memory_format=torch.channels_last)
out = conv(input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
input = input.contiguous()
out = conv(input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_cudnn_noncontiguous_weight(self):
input = torch.tensor([1, 1, 1], dtype=torch.double, device="cuda").view(1, 1, 3)
weights1 = torch.tensor([1], dtype=torch.double, device="cuda").expand(1, 1, 2)
weights2 = torch.tensor([1], dtype=torch.double, device="cuda").expand(1, 1, 2).contiguous()
self.assertEqual(F.conv1d(input, weights1, bias=None, stride=2, dilation=2),
F.conv1d(input, weights2, bias=None, stride=2, dilation=2))
def run_grad_conv_test(self, func_forward, func_backward, dim=1, gradient='input'):
for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:
for batch, stride, padding, chan_in, chan_out, dilation in \
product([1, 2], [1, 2], [0, 1, 2], [2], [3], [1]):
for has_bias in [True, False]:
input_shape = [batch, chan_in]
weight_shape = [chan_out, chan_in]
for _ in range(dim):
input_shape.append(inp_size)
weight_shape.append(kern)
input = torch.randn(input_shape, requires_grad=True)
weight = torch.randn(weight_shape, requires_grad=True)
if has_bias:
bias = torch.randn([chan_out], requires_grad=True)
output = func_forward(input, weight, stride=stride, padding=padding, dilation=dilation, bias=bias)
gradient_o = torch.randn(output.shape)
gradient_w = torch.autograd.grad(output, input if (gradient == 'input') else weight, gradient_o)
self.assertEqual(gradient_w[0],
func_backward(
input_shape if (gradient == 'input') else input,
weight_shape if (gradient == 'weight') else weight,
gradient_o,
stride=stride,
padding=padding,
dilation=dilation))
def test_grad_conv1d_input(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_input, 1, 'input')
def test_grad_conv1d_weight(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_weight, 1, 'weight')
def test_grad_conv2d_input(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_input, 2, 'input')
def test_grad_conv2d_weight(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_weight, 2, 'weight')
def test_grad_conv3d_input(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_input, 3, 'input')
def test_grad_conv3d_weight(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_weight, 3, 'weight')
@unittest.skipIf(not torch._nnpack_available(), "NNPACK unavailable")
def test_nnpack_conv(self):
for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:
for batch, stride, padding, chan_in, chan_out in \
product([1, 2, 3, 4], [1, 2], [0, 1, 2], [2], [3]):
for has_bias in [True, False]:
input_shape = [batch, chan_in]
weight_shape = [chan_out, chan_in]
for _ in range(2):
input_shape.append(inp_size)
weight_shape.append(kern)
input = torch.randn(input_shape, requires_grad=True, dtype=torch.float)
weight = torch.randn(weight_shape, requires_grad=True, dtype=torch.float)
if has_bias:
bias = torch.randn([chan_out], requires_grad=True, dtype=torch.float)
output = torch._nnpack_spatial_convolution(input, weight, stride=stride, padding=padding, bias=bias)
output_expected = torch.nn.functional.conv2d(input, weight, stride=stride, padding=padding, bias=bias)
self.assertEqual(output, output_expected, atol=3e-4, rtol=0)
gradient_o = torch.randn(output.shape, dtype=torch.float)
grads = torch.autograd.grad(output, [input, weight], gradient_o)
grads_expected = torch.autograd.grad(output_expected, [input, weight], gradient_o)
for gr, gr_expected in zip(grads, grads_expected):
self.assertEqual(gr, gr_expected, atol=3e-4, rtol=0)
def test_fold_invalid_arg(self):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
with self.assertRaisesRegex(RuntimeError, r"be divisible by the product of kernel_size"):
fold(torch.randn(1, 5, 9))
with self.assertRaisesRegex(RuntimeError, r"be divisible by the product of kernel_size"):
fold(torch.randn(1, 19, 9))
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
fold(torch.randn(1, 6, 10))
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2))
fold(torch.randn(1, 6, 5))
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2), dilation=(1, 2), padding=(2, 0))
fold(torch.randn(1, 6, 5))
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2), stride=1, dilation=8, padding=0)
with self.assertRaisesRegex(RuntimeError, r"calculated shape of the array of sliding blocks as"):
fold(torch.randn(1, 12, 12))
def test_unfold_invalid_arg(self):
unfold = nn.Unfold(kernel_size=(2, 3))
with self.assertRaisesRegex(NotImplementedError, r"Only 4D input Tensors are supported"):
unfold(torch.randn(1, 5, 2))
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(2, 3))
unfold(torch.randn(1, 2, 2, 2))
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(5, 3), padding=(1, 1))
unfold(torch.randn(1, 2, 2, 3))
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(1, 3), padding=(1, 1), dilation=(1, 2))
unfold(torch.randn(1, 2, 2, 2))
def test_conv_padding_mode(self):
with self.assertRaisesRegex(ValueError, "padding_mode must be one of"):
nn.Conv2d(3, 3, 3, padding_mode="xyz")
with self.assertRaisesRegex(ValueError, "padding_mode must be one of"):
nn.Conv2d(3, 3, 3, padding_mode=3)
with self.assertRaisesRegex(ValueError, "Only \"zeros\" "):
nn.ConvTranspose2d(3, 3, 3, padding_mode="reflect")
def test_softmin(self):
x = torch.randn(2, 16)
self.assertEqual(F.softmin(x, 1), F.softmax(-x, 1))
self.assertEqual(F.softmin(x, 0), F.softmax(-x, 0))
def test_log_softmax_cpu(self, dtype=torch.bfloat16):
inputf = torch.rand(32, 100, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
outf = F.log_softmax(inputf, dim=-1)
out = F.log_softmax(input, dim=-1)
self.assertEqual(out.dtype, dtype)
0)
out.sum().backward()
outf.sum().backward()
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=0.1, rtol=0)
def test_softmax_cpu(self, dtype=torch.bfloat16):
inputf = torch.rand(32, 100, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
outf = F.softmax(inputf, dim=-1)
out = F.softmax(input, dim=-1)
self.assertEqual(out.dtype, dtype)
self.assertEqualIgnoreType(out, outf, atol=1e-3, rtol=0)
out.sum().backward()
outf.sum().backward()
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=1e-3, rtol=0)
def test_adaptive_log_softmax(self):
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 15], div_value=2.)
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 10], div_value=2.)
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 25], div_value=2.)
with self.assertRaisesRegex(ValueError, "cutoffs should be a sequence of unique,"):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 20], div_value=2.)
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 19], div_value=2.)
with self.assertRaisesRegex(RuntimeError, r"Input and target should have the same size"):
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 5, 10])
asfm(x, y)
with self.assertRaisesRegex(RuntimeError, r"Target values should be in"):
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 20])
asfm(x, y)
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 17])
self.assertEqual(asfm.head.weight.size(), (5 + 3, 16))
self.assertEqual(asfm.tail[0][1].weight.size(), (5, 8))
self.assertEqual(asfm.tail[1][1].weight.size(), (5, 4))
self.assertEqual(asfm.tail[2][1].weight.size(), (5, 2))
self.assertEqual(asfm(x, y).output.size(), (2, ))
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(1, 16)
y = torch.tensor([17])
x2 = x.squeeze(0)
y2 = y.squeeze(0)
self.assertEqual(asfm(x, y).output.squeeze(0), asfm(x2, y2).output)
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 4, [2], div_value=2.)
x = torch.randn(4, 8)
logprob_out = asfm.log_prob(x)
self.assertEqual(torch.exp(logprob_out).data.sum(1), torch.ones(4))
for v in [0, 1, 2, 3]:
y = torch.full((4,), v, dtype=torch.long)
out, loss = asfm(x, y)
self.assertEqual(out, logprob_out.gather(1, y.unsqueeze(1)).squeeze())
self.assertEqual(loss, F.nll_loss(logprob_out, y))
x = torch.randn(64, 8).abs_()
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
asfm.head.weight.data[asfm.shortlist_size:, :].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
asfm.head.weight.data[:asfm.shortlist_size, :].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
x[:32, :asfm.shortlist_size].zero_()
x[32:, asfm.shortlist_size:].zero_()
asfm.head.weight.data[:asfm.shortlist_size, asfm.shortlist_size:].zero_()
asfm.head.weight.data[asfm.shortlist_size:, :asfm.shortlist_size].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
def test_cross_entropy_loss(self, dtype=torch.bfloat16):
loss_cpu = nn.CrossEntropyLoss().cpu()
inputf = torch.randn(15, 10, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
target = torch.empty(15, dtype=torch.long).random_(10)
outf = loss_cpu(inputf, target)
out = loss_cpu(input, target)
self.assertEqual(out.dtype, dtype)
=0)
outf.backward()
out.backward()
self.assertEqual(input.grad.dtype, dtype)
tol=1e-1, rtol=0)
def test_cross_entropy_loss_precision(self):
loss_cpu = nn.CrossEntropyLoss().cpu()
inputf = torch.randn(128, 2, 768, 768, device="cpu", dtype=torch.float)
inputd = inputf.double()
target = torch.randint(2, (128, 768, 768), dtype=torch.long)
outf = loss_cpu(inputf, target)
outd = loss_cpu(inputd, target)
self.assertEqual(outf, outd, exact_dtype=False)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_convert_sync_batchnorm(self):
module = torch.nn.Sequential(
torch.nn.BatchNorm1d(100),
torch.nn.InstanceNorm1d(100)
).cuda()
comp_module = torch.nn.Sequential(
torch.nn.BatchNorm1d(100),
torch.nn.InstanceNorm1d(100)
).cuda()
comp_module.load_state_dict(module.state_dict())
sync_bn_module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module)
children = list(sync_bn_module.children())
self.assertEqual(children[0].__class__, torch.nn.SyncBatchNorm)
self.assertEqual(children[1].__class__, torch.nn.InstanceNorm1d)
for layer, converted_layer in zip(comp_module.children(), sync_bn_module.children()):
for key in layer.state_dict().keys():
self.assertEqual(layer.state_dict()[key].device, converted_layer.state_dict()[key].device)
self.assertEqual(layer.state_dict()[key], converted_layer.state_dict()[key])
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_sync_batchnorm_backward_elemt(self):
device = 'cuda'
saved_input = torch.rand(2, 3, 2, 1, device=device)
grad_output = torch.rand(2, 3, 2, 1, device=device)
mean = torch.rand(3, device=device)
invstd = torch.rand(3, device=device)
weight = torch.rand(3, device=device)
sum_dy = torch.rand(3, device=device)
sum_dy_xmu = torch.rand(3, device=device)
count_tensor = torch.tensor([5, 5, 5], dtype=torch.int32, device=device)
gI_contiguous = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor
)
for a, b in [
(torch.channels_last, torch.contiguous_format),
(torch.contiguous_format, torch.channels_last),
(torch.channels_last, torch.channels_last),
]:
gI_actual = torch.batch_norm_backward_elemt(
grad_output.contiguous(memory_format=a),
saved_input.contiguous(memory_format=b),
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor
)
self.assertEqual(gI_actual, gI_contiguous)
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_sync_batchnorm_accuracy_cuda(self):
def _batch_norm_stats(data):
mean1, _ = torch.batch_norm_stats(data, 1e-5)
mean2, _ = torch.batch_norm_stats(data.to(memory_format=torch.channels_last), 1e-5)
mean_ref = torch.mean(data, (0, 2, 3), keepdim=False)
self.assertEqual(mean_ref, mean1)
self.assertEqual(mean_ref, mean2)
data = torch.randn(1, 96, 112, 112, dtype=torch.float, device='cuda')
_batch_norm_stats(data)
def test_functional_grad_conv(self):
input = torch.randn(1, 1, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, requires_grad=True)
output = F.conv1d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]
grad_input_functional = torch.nn.grad.conv1d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
input = torch.randn(1, 1, 5, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, 3, requires_grad=True)
output = F.conv2d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]
grad_input_functional = torch.nn.grad.conv2d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
input = torch.randn(1, 1, 5, 5, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, 3, 3, requires_grad=True)
output = F.conv3d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]
grad_input_functional = torch.nn.grad.conv3d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
with warnings.catch_warnings(record=True) as w:
torch.nn.grad._grad_input_padding(torch.rand(1, 2, 3), [1, 2, 5], (1,), (0,), (3,))
self.assertEqual(len(w), 1)
def test_flatten(self):
tensor_input = torch.randn(2, 1, 2, 3)
flatten = nn.Flatten(start_dim=1, end_dim=-1)
tensor_output = flatten(tensor_input)
self.assertEqual(tensor_output.size(), torch.Size([2, 6]))
def test_unflatten(self):
tensor_input = torch.randn(2, 50)
for us in ((2, 5, 5), [2, 5, 5]):
unflatten = nn.Unflatten(dim=1, unflattened_size=us)
tensor_output = unflatten(tensor_input)
self.assertEqual(tensor_output.size(), torch.Size([2, 2, 5, 5]))
unflatten = nn.Unflatten(dim='features', unflattened_size=(('C', 2), ('H', 5), ('W', 5)))
named_tensor_input = tensor_input.refine_names('N', 'features')
named_tensor_output = unflatten(named_tensor_input)
self.assertEqual(named_tensor_output.size(), torch.Size([2, 2, 5, 5]))
def test_unflatten_invalid_arg(self):
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of ints, but found element of type float at pos 2"):
nn.Unflatten(dim=1, unflattened_size=(2, 5, 5.0))
for us in ([['C', 2], ['W', 5], ['H', 5]], [('C', 2), ('W', 5), ('H', 5)]):
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be a tuple of tuples, but found type list"):
nn.Unflatten(dim='features', unflattened_size=us)
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of tuples, but found element of type list at pos 0"):
nn.Unflatten(dim='features', unflattened_size=(['C', 2], ['W', 5], ['H', 5]))
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of tuples, but found element of type dict at pos 0"):
nn.Unflatten(dim='features', unflattened_size=({'C': 2}, {'W': 5}, {'H': 5}))
def test_layer_norm_grads_with_create_graph_flag(self):
atol = 1e-5
rtol = 1e-3
x = torch.randn((4, 4, 16), requires_grad=True)
layer_norm = nn.LayerNorm((16,), 1e-5, True)
with torch.no_grad():
layer_norm.weight = torch.nn.Parameter(0.1 * torch.ones_like(layer_norm.weight))
grads1 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=False)[0]
grads2 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=True)[0]
self.assertEqual(grads1, grads2, rtol=rtol, atol=atol)
if TEST_CUDA:
x = x.to('cuda')
layer_norm = layer_norm.to('cuda')
grads1 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=False)[0]
grads2 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=True)[0]
self.assertEqual(grads1, grads2, rtol=rtol, atol=atol)
def test_padding_list(self):
x = torch.randn(4, 8, 32, 32)
net = torch.nn.ConvTranspose2d(8, 16, kernel_size=3, padding=[3, 3])
y = net(x)
net = torch.nn.ConvTranspose2d(8, 16, kernel_size=3, padding=(3, 3))
y = net(x)
class TestNNInit(TestCase):
def setUp(self):
super(TestNNInit, self).setUp()
random.seed(123)
def _is_normal(self, tensor, mean, std):
samples = tensor.view(-1).tolist()
p_value = stats.kstest(samples, 'norm', args=(mean, std))[1]
return p_value > 0.0001
def _is_trunc_normal(self, tensor, mean, std, a, b):
# so we need to transform our data to test it using scipy.
z_samples = (tensor.view(-1) - mean) / std
z_samples = z_samples.tolist()
a0 = (a - mean) / std
b0 = (b - mean) / std
p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1]
return p_value > 0.0001
def _is_uniform(self, tensor, a, b):
samples = tensor.view(-1).tolist()
p_value = stats.kstest(samples, 'uniform', args=(a, (b - a)))[1]
return p_value > 0.0001
def _create_random_nd_tensor(self, dims, size_min, size_max):
size = [random.randint(size_min, size_max) for _ in range(dims)]
tensor = torch.zeros(size)
return tensor
def _random_float(self, a, b):
return (b - a) * random.random() + a
def test_calculate_gain_linear(self):
for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
gain = init.calculate_gain(fn)
self.assertEqual(gain, 1)
def test_calculate_gain_nonlinear(self):
for fn in ['sigmoid', 'tanh', 'relu', 'leaky_relu']:
gain = init.calculate_gain(fn)
if fn == 'sigmoid':
self.assertEqual(gain, 1)
elif fn == 'tanh': # 5 / 3
self.assertEqual(gain, 1.6666666666666667)
elif fn == 'relu': # sqrt(2)
self.assertEqual(gain, 1.4142135623730951)
elif fn == 'leaky_relu': # sqrt(2 / 1 + slope^2))
self.assertEqual(gain, 1.4141428569978354)
elif fn == 'selu':
self.assertEqual(gain, 0.75)
def test_calculate_gain_leaky_relu(self):
for param in [None, 0, 0.01, 10]:
gain = init.calculate_gain('leaky_relu', param)
if param is None: # Default slope is 0.01
self.assertEqual(gain, 1.4141428569978354)
elif param == 0: # No slope = same gain as normal ReLU
self.assertEqual(gain, 1.4142135623730951)
elif param == 0.01:
self.assertEqual(gain, 1.4141428569978354)
elif param == 10:
self.assertEqual(gain, 0.14071950894605836)
def test_calculate_gain_leaky_relu_only_accepts_numbers(self):
for param in [True, [1], {'a': 'b'}]:
with self.assertRaises(ValueError):
init.calculate_gain('leaky_relu', param)
def test_calculate_gain_only_accepts_valid_nonlinearities(self):
for n in [2, 5, 25]:
# Generate random strings of lengths that definitely aren't supported
random_string = ''.join([random.choice(string.ascii_lowercase) for i in range(n)])
with self.assertRaises(ValueError):
init.calculate_gain(random_string)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_uniform(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
a = self._random_float(-3, 3)
b = a + self._random_float(1, 5)
init.uniform_(input_tensor, a=a, b=b)
assert self._is_uniform(input_tensor, a, b)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_normal(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
mean = self._random_float(-3, 3)
std = self._random_float(1, 5)
init.normal_(input_tensor, mean=mean, std=std)
assert self._is_normal(input_tensor, mean, std)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_trunc_normal(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
mean = self._random_float(-3, 3)
std = self._random_float(.01, 1)
a = self._random_float(mean - 2 * std, mean)
b = self._random_float(mean, mean + 2 * std)
init.trunc_normal_(input_tensor, mean=mean, std=std, a=a, b=b)
assert self._is_trunc_normal(input_tensor, mean, std, a, b)
def test_constant(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)
val = self._random_float(1, 10)
init.constant_(input_tensor, val)
self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
def test_ones_and_zeros(self):
for init_fn_, val in zip([init.ones_, init.zeros_], [1, 0]):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)
init_fn_(input_tensor)
self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
def test_eye(self):
input_tensor = self._create_random_nd_tensor(2, size_min=1, size_max=5)
init.eye_(input_tensor)
for i in range(input_tensor.size(0)):
for j in range(input_tensor.size(1)):
if i == j:
assert input_tensor[i][j] == 1
else:
assert input_tensor[i][j] == 0
def test_eye_only_works_on_2d_inputs(self):
for dims in [1, 3]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.eye_(tensor)
def test_max_unpool(self):
output, indices = F.max_pool1d(torch.randn([1, 1, 4]), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool1d(output, indices, 2), F.max_unpool1d(output, indices, 2, stride=2))
input = torch.randn([1, 1, 5], requires_grad=True)
output, indices = F.max_pool1d(input, 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool1d(output, indices, 2, stride=2, output_size=input.shape),
F.max_unpool1d(output, indices, 2, stride=2, output_size=input.size()))
gradcheck(F.max_unpool1d, (output, indices, 2), check_forward_ad=True)
output, indices = F.max_pool2d(torch.randn([1, 1, 4, 4], requires_grad=True), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool2d(output, indices, 2), F.max_unpool2d(output, indices, 2, stride=2))
gradcheck(F.max_unpool2d, (output, indices, 2), check_forward_ad=True)
output, indices = F.max_pool3d(torch.randn([4, 4, 4, 4, 4], requires_grad=True), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool3d(output, indices, 2), F.max_unpool3d(output, indices, 2, stride=2))
gradcheck(F.max_unpool3d, (output, indices, 2), check_forward_ad=True)
def test_dirac_properties(self):
for dims in [3, 4, 5]:
for groups in [1, 2, 3]:
a, c, d, e = (random.randint(1, 5) for _ in range(4))
b = random.randint(1, 5 * groups)
input_tensor = torch.randn((a * groups, b, c, d, e)[:dims])
init.dirac_(input_tensor, groups)
c_out, c_in = input_tensor.size(0) // groups, input_tensor.size(1)
min_d = min(c_out, c_in)
assert torch.nonzero(input_tensor).size(0) == min_d * groups
self.assertEqual(input_tensor.sum(), min_d * groups)
def test_dirac_identity(self):
for groups in [1, 3]:
batch, in_c, out_c, size, kernel_size = 8, 3, 9, 5, 3
eff_out_c = out_c // groups
input_var = torch.randn(batch, in_c, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv1d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data
for g in range(groups):
self.assertEqual(input_tensor[:, :, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :])
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :]).numel() == 0
input_var = torch.randn(batch, in_c, size, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv2d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data
for g in range(groups):
self.assertEqual(input_tensor[:, :, 1:-1, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :, :])
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :, :]).numel() == 0
input_var = torch.randn(batch, in_c, size, size, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size, kernel_size, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv3d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data
for g in range(groups):
self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :, :, :])
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :, :, :]).numel() == 0
def test_dirac_only_works_on_3_4_5d_inputs(self):
for dims in [1, 2, 6]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.dirac_(tensor)
def test_xavier_uniform_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
with self.assertRaises(ValueError):
init.xavier_uniform_(tensor)
def test_xavier_normal_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
with self.assertRaises(ValueError):
init.xavier_normal_(tensor)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_xavier_uniform(self):
for use_gain in [True, False]:
for dims in [2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
gain = 1
if use_gain:
gain = self._random_float(0.1, 2)
init.xavier_uniform_(input_tensor, gain=gain)
else:
init.xavier_uniform_(input_tensor)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
bounds = expected_std * math.sqrt(3)
assert self._is_uniform(input_tensor, -bounds, bounds)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_xavier_normal(self):
for use_gain in [True, False]:
for dims in [2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
gain = 1
if use_gain:
gain = self._random_float(0.1, 2)
init.xavier_normal_(input_tensor, gain=gain)
else:
init.xavier_normal_(input_tensor)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
assert self._is_normal(input_tensor, 0, expected_std)
def test_kaiming_uniform_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
init.kaiming_uniform_(tensor)
def test_kaiming_normal_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
init.kaiming_normal_(tensor)
def test_kaiming_uniform_warning_on_0element_tensor(self):
tensor = torch.empty(0, 1)
with self.assertWarnsRegex(UserWarning, "Initializing zero-element tensors is a no-op"):
_ = init.kaiming_uniform_(tensor)
def test_kaiming_normal_warning_on_0element_tensor(self):
tensor = torch.empty(0, 1)
with self.assertWarnsRegex(UserWarning, "Initializing zero-element tensors is a no-op"):
_ = init.kaiming_normal_(tensor)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_kaiming_uniform(self):
for use_a in [True, False]:
for dims in [2, 4]:
for mode in ['fan_in', 'fan_out']:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
if use_a:
a = self._random_float(0.1, 2)
init.kaiming_uniform_(input_tensor, a=a, mode=mode)
else:
a = 0
init.kaiming_uniform_(input_tensor, mode=mode)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
if mode == 'fan_in':
n = fan_in
else:
n = fan_out
expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
bounds = expected_std * math.sqrt(3.0)
assert self._is_uniform(input_tensor, -bounds, bounds)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_kaiming_normal(self):
for use_a in [True, False]:
for dims in [2, 4]:
for mode in ['fan_in', 'fan_out']:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
if use_a:
a = self._random_float(0.1, 2)
init.kaiming_normal_(input_tensor, a=a, mode=mode)
else:
a = 0
init.kaiming_normal_(input_tensor, mode=mode)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
if mode == 'fan_in':
n = fan_in
else:
n = fan_out
expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
assert self._is_normal(input_tensor, 0, expected_std)
def test_sparse_only_works_on_2d_inputs(self):
for dims in [1, 3]:
with self.assertRaises(ValueError):
sparsity = self._random_float(0.1, 0.9)
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.sparse_(tensor, sparsity)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_sparse_default_std(self):
for use_random_std in [True, False]:
input_tensor = self._create_random_nd_tensor(2, size_min=30, size_max=35)
rows, cols = input_tensor.size(0), input_tensor.size(1)
sparsity = self._random_float(0.1, 0.2)
std = 0.01
if use_random_std:
std = self._random_float(0.01, 0.2)
init.sparse_(input_tensor, sparsity=sparsity, std=std)
else:
init.sparse_(input_tensor, sparsity=sparsity)
for col_idx in range(input_tensor.size(1)):
column = input_tensor[:, col_idx]
assert column[column == 0].nelement() >= math.ceil(sparsity * rows)
assert self._is_normal(input_tensor[input_tensor != 0], 0, std)
@skipIfNoLapack
def test_orthogonal(self):
for use_gain in [True, False]:
for tensor_size in [[3, 4], [4, 3], [20, 2, 3, 4], [2, 3, 4, 5]]:
input_tensor = torch.zeros(tensor_size)
gain = 1.0
if use_gain:
gain = self._random_float(0.1, 2)
init.orthogonal_(input_tensor, gain=gain)
else:
init.orthogonal_(input_tensor)
rows, cols = tensor_size[0], reduce(mul, tensor_size[1:])
flattened_tensor = input_tensor.view(rows, cols)
if rows > cols:
self.assertEqual(torch.mm(flattened_tensor.t(), flattened_tensor),
torch.eye(cols) * gain ** 2, atol=1e-6, rtol=0)
else:
self.assertEqual(torch.mm(flattened_tensor, flattened_tensor.t()),
torch.eye(rows) * gain ** 2, atol=1e-6, rtol=0)
def test_deprecation(self):
x = torch.randn(3, 3)
def fn():
init.normal(x)
with self.assertWarnsRegex(UserWarning, 'deprecated', msg='methods not suffixed with underscore should be deprecated'):
fn()
class TestFusionEval(TestCase):
@given(X=hu.tensor(shapes=((5, 3, 5, 5),)),
running_mean=hu.tensor(shapes=(6,)),
running_var=hu.tensor(shapes=(6,)))
def test_fuse_module_eval_numerics(self, X, running_mean, running_var):
inputs, _ = X
iC, oC = inputs.shape[1], len(running_mean[0])
inputs = torch.from_numpy(inputs).to(torch.double)
kernel_size = (3, 3)
conv_ref = torch.nn.Conv2d(iC, oC, bias=True, kernel_size=kernel_size)
bn_ref = torch.nn.BatchNorm2d(oC)
bn_ref.running_mean = torch.from_numpy(running_mean[0]).to(torch.double)
bn_ref.running_var = torch.from_numpy(running_var[0]).to(torch.double)
conv_ref.eval()
bn_ref.eval()
Y_ref = bn_ref(conv_ref(inputs))
conv_bn_fused = torch.nn.utils.fusion.fuse_conv_bn_eval(conv_ref,
bn_ref)
Y_hat = conv_bn_fused(inputs)
self.assertEqual(Y_ref, Y_hat, msg="Conv+BN fusion results are off")
na_bn_ref = torch.nn.BatchNorm2d(oC, affine=False)
na_bn_ref.running_mean = torch.from_numpy(running_mean[0]).to(torch.double)
na_bn_ref.running_var = torch.from_numpy(running_var[0]).to(torch.double)
na_bn_ref.eval()
Y_ref = na_bn_ref(conv_ref(inputs))
conv_na_bn_fused = torch.nn.utils.fusion.fuse_conv_bn_eval(conv_ref,
na_bn_ref)
Y_hat = conv_na_bn_fused(inputs)
self.assertEqual(Y_ref, Y_hat, msg="Conv+BN(non-affine) fusion results are off")
class TestConstantPadNd(TestCase):
def test_constant_pad_nd(self):
a = torch.tensor([[1, 2], [3, 4]])
res = torch.constant_pad_nd(a, [1, 2, 1, 0], 9)
expected = torch.tensor([
[9, 9, 9, 9, 9],
[9, 1, 2, 9, 9],
[9, 3, 4, 9, 9]
])
self.assertEqual(res, expected)
def test_preserves_memory_format(self):
nchw_tensor = torch.rand((1, 2, 5, 3))
nchw_padded = torch.constant_pad_nd(nchw_tensor, [1, 2], 0.5)
self.assertTrue(nchw_padded.is_contiguous(memory_format=torch.contiguous_format))
nhwc_tensor = nchw_tensor.contiguous(memory_format=torch.channels_last)
nhwc_padded = torch.constant_pad_nd(nhwc_tensor, [1, 2], 0.5)
self.assertTrue(nhwc_padded.is_contiguous(memory_format=torch.channels_last))
class TestAddRelu(TestCase):
def test_add_relu(self):
a = torch.rand((7, 11))
b = torch.rand((7, 11))
a = a.float()
b = b.float()
a = a * -10
a = a + 5
add_res = a + b
relu_res = torch.relu(add_res)
add_relu_res = torch._VF._add_relu(a, b)
self.assertEqual(add_relu_res, relu_res)
def test_add_relu_broadcasting(self):
a = torch.rand((1, 32))
b = 1
b_scalar = torch.ones(1, 32)
res = torch._VF._add_relu(a, b)
broadcasted_res = torch._VF._add_relu(a, b_scalar)
self.assertEqual(broadcasted_res, res)
def add_test(test, decorator=None):
def add(test_name, fn):
if hasattr(TestNN, test_name):
raise RuntimeError('Found two tests with the same name: ' + test_name)
if decorator is not None:
fn = decorator(fn)
setattr(TestNN, test_name, fn)
test_name = test.get_name()
if not hasattr(test, 'test_cpu') or test.test_cpu:
add(test_name, lambda self, test=test: test(self))
cuda_test_name = test_name + '_cuda'
kwargs = {}
if 'extra_args' in get_function_arglist(test.test_cuda):
kwargs['extra_args'] = test.extra_args
if 'dtype' in get_function_arglist(test.test_cuda):
if tf32_is_not_fp32() and test.with_tf32:
def with_tf32_off(self, test=test, kwargs=kwargs):
with tf32_off():
test.test_cuda(self, dtype=torch.float, **kwargs)
add(cuda_test_name + '_fp32', with_tf32_off)
def with_tf32_on(self, test=test, kwargs=kwargs):
with tf32_on(self, test.tf32_precision):
test.test_cuda(self, dtype=torch.float, **kwargs)
add(cuda_test_name + '_tf32', with_tf32_on)
else:
add(cuda_test_name + '_float', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.float, **kwargs))
add(cuda_test_name + '_double', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.double, **kwargs))
def test_half(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.half, **kwargs)
if getattr(test, 'check_half', True):
add(cuda_test_name + '_half', test_half)
def test_bfloat16(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.bfloat16, **kwargs)
if getattr(test, 'check_bfloat16', True):
add(cuda_test_name + '_bfloat16', test_bfloat16)
def test_cfloat(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.cfloat, **kwargs)
def test_cdouble(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.cdouble, **kwargs)
if getattr(test, 'check_complex', False):
add(cuda_test_name + '_cfloat', test_cfloat)
add(cuda_test_name + '_cdouble', test_cdouble)
else:
def with_tf32_off(self, test=test, kwargs=kwargs):
with tf32_off():
test.test_cuda(self, **kwargs)
if tf32_is_not_fp32() and test.with_tf32:
add(cuda_test_name + '_fp32', with_tf32_off)
def with_tf32_on(self, test=test, kwargs=kwargs):
with tf32_on(self, test.tf32_precision):
test.test_cuda(self, **kwargs)
add(cuda_test_name + '_tf32', with_tf32_on)
else:
add(cuda_test_name, with_tf32_off)
for test_params in module_tests + new_module_tests:
# TODO: CUDA is not implemented yet
if 'constructor' not in test_params:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
decorator = test_params.pop('decorator', None)
test = NewModuleTest(**test_params)
add_test(test, decorator)
if 'check_eval' in test_params:
# create a new test that is identical but that sets module.training to False
desc = test_params.get('desc', None)
test_params['desc'] = 'eval' if desc is None else desc + '_eval'
def gen_eval_constructor(constructor):
def eval_constructor(*args, **kwargs):
cons = constructor(*args, **kwargs)
cons.training = False
return cons
eval_constructor.__name__ = constructor.__name__
return eval_constructor
test_params['constructor'] = gen_eval_constructor(test_params['constructor'])
test = NewModuleTest(**test_params)
add_test(test, decorator)
if 'check_with_long_tensor' in test_params:
fullname = test_params.get('fullname', None)
if fullname:
test_params['fullname'] = fullname + '_with_long_tensor'
else:
desc = test_params.get('desc', None)
test_params['desc'] = 'with_long_tensor' if desc is None else desc + '_with_long_tensor'
def double_equivalent_of_long_tensor(size):
return torch.randint(-1000, 1000, size=size).double()
def apply_to_cons(t):
if t.is_floating_point():
if isinstance(t, Parameter):
return Parameter(double_equivalent_of_long_tensor(t.size()))
elif isinstance(t, torch.Tensor):
return double_equivalent_of_long_tensor(t.size())
else:
return t
def gen_long_tensor_constructor(constructor):
def long_tensor_constructor(*args, **kwargs):
cons = constructor(*args, **kwargs)
cons._apply(apply_to_cons)
return cons
long_tensor_constructor.__name__ = constructor.__name__
return long_tensor_constructor
def gen_long_tensor_input(input_size):
def input_func():
return double_equivalent_of_long_tensor(input_size)
return input_func
def reference_fn(i, p, m):
# For bad reasons this would create LongTensors that requires gradients
# Remove requires_grad to avoid this
for p in m.parameters():
p.requires_grad_(False)
m._apply(lambda t: t.long())
input = i.long()
out = m.forward(input)
return out
test_params['constructor'] = gen_long_tensor_constructor(test_params['constructor'])
test_params['input_fn'] = gen_long_tensor_input(test_params['input_size'])
test_params['reference_fn'] = reference_fn
test_params['check_forward_only'] = True
# Currently we don't support conv2d/conv3d for LongTensor in CUDA
test_params['test_cuda'] = False
test = NewModuleTest(**test_params)
add_test(test, decorator)
for test_params in criterion_tests:
if 'constructor' not in test_params:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
test = CriterionTest(**test_params)
decorator = test_params.pop('decorator', None)
add_test(test, decorator)
if 'check_sum_reduction' in test_params:
desc = test_params.get('desc', None)
test_params['desc'] = 'sum_reduction' if desc is None else desc + '_sum_reduction'
def gen_sum_reduction_constructor(constructor):
def sum_reduction_constructor(*args, **kwargs):
cons = constructor(*args, reduction='sum', **kwargs)
return cons
sum_reduction_constructor.__name__ = constructor.__name__
return sum_reduction_constructor
test_params['constructor'] = gen_sum_reduction_constructor(test_params['constructor'])
test = CriterionTest(**test_params)
add_test(test, decorator)
class UnpoolingNet(nn.Module):
def __init__(self, pool, unpool):
super(UnpoolingNet, self).__init__()
self.pool = pool
self.unpool = unpool
def forward(self, input):
return self.unpool(*self.pool(input))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool1d(2, return_indices=True),
nn.MaxUnpool1d(2)),
input_size=(1, 1, 4),
fullname='MaxUnpool1d_net',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool2d(2, return_indices=True),
nn.MaxUnpool2d(2)),
input_size=(1, 1, 2, 4),
fullname='MaxUnpool2d_net',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool3d(2, return_indices=True),
nn.MaxUnpool3d(2)),
input_size=(1, 1, 2, 4, 6),
fullname='MaxUnpool3d_net',
check_gradgrad=False,))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool1d(2, return_indices=True),
nn.MaxUnpool1d(2)),
input_size=(1, 4),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool1d_net_no_batch_dim',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool2d(2, return_indices=True),
nn.MaxUnpool2d(2)),
input_size=(1, 2, 4),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool2d_net_no_batch_dim',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool3d(2, return_indices=True),
nn.MaxUnpool3d(2)),
input_size=(1, 2, 4, 6),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool3d_net_no_batch_dim',
check_gradgrad=False))
class _AdaptiveLogSoftmaxWithLoss(nn.AdaptiveLogSoftmaxWithLoss):
def __call__(self, input):
t = torch.tensor([0, 1, 4, 8]).to(input.device)
return nn.AdaptiveLogSoftmaxWithLoss.__call__(self, input, t).output
add_test(NewModuleTest(
constructor=lambda: _AdaptiveLogSoftmaxWithLoss(16, 10, [2, 6]),
input_size=(4, 16),
fullname='AdaptiveLogSoftmax',
with_tf32=True,
tf32_precision=0.005))
if torch.cuda.is_available():
def device_():
return ['cpu', 'cuda']
else:
def device_():
return ['cpu']
def angle_rad_():
return [r * math.pi * 2 for r in [0.0, 0.5, 0.25, 0.125, random.random()]]
def axis_vector_():
t = (random.random(), random.random(), random.random())
l = sum(x ** 2 for x in t) ** 0.5
return [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), tuple(x / l for x in t)]
def input_size2d_():
return [[1, 1, 3, 5], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 3, 4]]
def output_size2d_():
return [[1, 1, 5, 3], [1, 1, 3, 5], [1, 1, 4, 3], [1, 1, 5, 5], [1, 1, 6, 6]]
def input_size2dsq_():
return [[1, 1, 2, 2], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 6, 6]]
def output_size2dsq_():
return [[1, 1, 2, 2], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 5, 5], [1, 1, 6, 6]]
def input_size3d_():
return [[1, 1, 2, 2, 2], [1, 1, 2, 3, 4], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 3, 4, 5]]
def input_size3dsq_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 6, 6, 6]]
def output_size3dsq_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 5, 5, 5], [1, 1, 6, 6, 6]]
def output_size3d_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 3, 4, 5], [1, 1, 4, 3, 2], [1, 1, 5, 5, 5], [1, 1, 6, 6, 6]]
def _buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad):
input_center = [(x - 1) / 2.0 for x in input_size]
output_center = [(x - 1) / 2.0 for x in output_size]
s = math.sin(angle_rad)
c = math.cos(angle_rad)
intrans_ary = np.array([
[1, 0, input_center[2]],
[0, 1, input_center[3]],
[0, 0, 1],
], dtype=np.float64)
inscale_ary = np.array([
[input_center[2], 0, 0],
[0, input_center[3], 0],
[0, 0, 1],
], dtype=np.float64)
rotation_ary = np.array([
[c, -s, 0],
[s, c, 0],
[0, 0, 1],
], dtype=np.float64)
outscale_ary = np.array([
[1.0 / output_center[2], 0, 0],
[0, 1.0 / output_center[3], 0],
[0, 0, 1],
], dtype=np.float64)
outtrans_ary = np.array([
[1, 0, -output_center[2]],
[0, 1, -output_center[3]],
[0, 0, 1],
], dtype=np.float64)
reorder_ary = np.array([
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
], dtype=np.float64)
transform_ary = np.dot(np.dot(np.dot(np.dot(
intrans_ary,
inscale_ary),
rotation_ary.T),
outscale_ary),
outtrans_ary)
grid_ary = np.dot(np.dot(np.dot(reorder_ary, rotation_ary.T), outscale_ary), outtrans_ary)
transform_tensor = torch.from_numpy((rotation_ary)).to(device, torch.float32)
transform_tensor = transform_tensor[:2].unsqueeze(0)
return transform_tensor, transform_ary, grid_ary
def _buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_rad, axis_vector):
input_center = [(x - 1) / 2.0 for x in input_size]
output_center = [(x - 1) / 2.0 for x in output_size]
s = math.sin(angle_rad)
c = math.cos(angle_rad)
c1 = 1 - c
intrans_ary = np.array([
[1, 0, 0, input_center[2]],
[0, 1, 0, input_center[3]],
[0, 0, 1, input_center[4]],
[0, 0, 0, 1],
], dtype=np.float64)
inscale_ary = np.array([
[input_center[2], 0, 0, 0],
[0, input_center[3], 0, 0],
[0, 0, input_center[4], 0],
[0, 0, 0, 1],
], dtype=np.float64)
l, m, n = axis_vector
scipyRotation_ary = np.array([
[l * l * c1 + c, m * l * c1 - n * s, n * l * c1 + m * s, 0],
[l * m * c1 + n * s, m * m * c1 + c, n * m * c1 - l * s, 0],
[l * n * c1 - m * s, m * n * c1 + l * s, n * n * c1 + c, 0],
[0, 0, 0, 1],
], dtype=np.float64)
z, y, x = axis_vector
torchRotation_ary = np.array([
[x * x * c1 + c, y * x * c1 - z * s, z * x * c1 + y * s, 0],
[x * y * c1 + z * s, y * y * c1 + c, z * y * c1 - x * s, 0],
[x * z * c1 - y * s, y * z * c1 + x * s, z * z * c1 + c, 0],
[0, 0, 0, 1],
], dtype=np.float64)
outscale_ary = np.array([
[1.0 / output_center[2], 0, 0, 0],
[0, 1.0 / output_center[3], 0, 0],
[0, 0, 1.0 / output_center[4], 0],
[0, 0, 0, 1],
], dtype=np.float64)
outtrans_ary = np.array([
[1, 0, 0, -output_center[2]],
[0, 1, 0, -output_center[3]],
[0, 0, 1, -output_center[4]],
[0, 0, 0, 1],
], dtype=np.float64)
reorder_ary = np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
], dtype=np.float64)
transform_ary = np.dot(np.dot(np.dot(np.dot(
intrans_ary,
inscale_ary),
np.linalg.inv(scipyRotation_ary)),
outscale_ary),
outtrans_ary)
grid_ary = np.dot(np.dot(np.dot(reorder_ary, np.linalg.inv(scipyRotation_ary)), outscale_ary), outtrans_ary)
transform_tensor = torch.from_numpy((torchRotation_ary)).to(device, torch.float32)
transform_tensor = transform_tensor[:3].unsqueeze(0)
return transform_tensor, transform_ary, grid_ary
class TestNNDeviceType(NNTestCase):
def run_conv_double_back_test(self, kern, stride, padding, chan_in, chan_out, batch_size,
inp_size, dilation, no_weight, groups=1, use_cuda=False,
use_bias=True, dtype=torch.double):
if use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
x = torch.randn(batch_size, chan_in, inp_size, inp_size, device=device,
dtype=dtype, requires_grad=True)
weight = torch.randn(chan_out, chan_in // groups, kern, kern, device=device,
dtype=dtype, requires_grad=not no_weight)
if use_bias:
bias = torch.randn(chan_out, device=device, dtype=dtype, requires_grad=True)
else:
bias = None
def func(*inputs):
if use_bias:
lx, lweight, lbias = inputs
else:
lx, lweight = inputs
lbias = None
with cudnn.flags(enabled=False):
out = F.conv2d(lx, lweight, lbias, stride, padding, dilation, groups)
return out
if use_bias:
inputs = x, weight, bias
else:
inputs = x, weight
dummy_out = func(*inputs)
grad_y = torch.randn_like(dummy_out, device=device, dtype=dtype, requires_grad=True)
:
g, = torch.autograd.grad(dummy_out.sum(), x, create_graph=True)
return g.requires_grad
return gradgradcheck(func, inputs, (grad_y,))
def _test_dropout(self, cls, device, input, memory_format=torch.contiguous_format):
p = 0.2
input = input.to(device).fill_(1 - p)
module = cls(p)
input_var = input.clone(memory_format=memory_format).requires_grad_()
output = module(input_var)
self.assertTrue(output.is_contiguous(memory_format=memory_format))
self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)
output.backward(input)
self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))
self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)
module = cls(p, True)
input_var = input.clone(memory_format=memory_format).requires_grad_()
output = module(input_var + 0)
self.assertTrue(output.is_contiguous(memory_format=memory_format))
self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)
output.backward(input)
self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))
self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)
# check eval mode doesn't change anything
for inplace in [True, False]:
module = cls(p, inplace).eval()
self.assertEqual(input, module(input))
module.__repr__()
str(module)
def _test_dropout_discontiguous(self, cls, device, memory_format=torch.contiguous_format):
# In this test, we verify that dropout preserves the layout and data for different memory formats.
# We check whether, we get same values for the output of dropout, when the probability
# of dropout is 0 or very close to 0.
# Reference: https://github.com/pytorch/pytorch/issues/47176
close_to_zero_p = 1e-10 # Should be almost zero but not zero, as for p=0 different path is taken
for p in [0, close_to_zero_p]:
inp = torch.ones(2, 3, 3, 3, device=device)
inp_discontiguous = torch.empty(2, 3, 3, 6, device=device, memory_format=memory_format)[..., ::2]
inp_discontiguous.copy_(inp)
mod = cls(p=p)
out = mod(inp_discontiguous)
if p != 0: # Zero will keep strides as is based on input.
# When prob == 0, input stride (54, 18, 6, 2) -> output stride (54, 18, 6, 2)
# When prob != 0, input stride (54, 18, 6, 2) -> output stride (27, 9, 3, 1)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(inp_discontiguous, out)
def _test_dropout_stride_mean_preserve(self, cls, device):
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2], d[3])
inp = torch.ones(2, 3, 4, 5, device=device)
shifts = [(0, 0), (1, 0), (0, 1), (1, 1)]
for perm in itertools.permutations((0, 1, 2, 3), r=4):
for shift in shifts:
for p in [1e-10, 0.3, 0.5, 0.7]:
mod = cls(p=p)
permuted_inp = inp.permute(perm).contiguous().permute(invert_perm(perm))
permuted_inp = permuted_inp[shift[0]:, shift[1]:, :, :]
out = mod(permuted_inp)
self.assertTrue(out.permute(perm).is_contiguous())
self.assertEqual(inp.mean(), out.mean(), rtol=0.5, atol=0.5)
if p == 1e-10:
self.assertEqual(permuted_inp, out)
else:
self.assertNotEqual(permuted_inp, out)
def _test_InstanceNorm_general(self, cls, input, device, dtype=torch.float):
# default case track_running_stats=False
b, c = input.size(0), input.size(1)
input_var = input.to(device=device, dtype=dtype).requires_grad_()
IN = cls(c, eps=0).to(device, dtype)
output = IN(input_var)
out_reshaped = output.view(b * c, -1)
mean = out_reshaped.mean(1)
var = out_reshaped.var(1, unbiased=False)
self.assertEqual(torch.abs(mean.data).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), 1, atol=1e-5, rtol=0)
# check that eval mode doesn't change behavior
grad_out = torch.randn_like(output)
res1 = output.data.clone()
output.backward(grad_out)
grad1 = input_var.grad.data.clone()
IN.eval()
output = IN(input_var)
input_var.grad = None
output.backward(grad_out)
res2 = output.data
grad2 = input_var.grad.data
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
IN = cls(c, momentum=1, eps=0, track_running_stats=True).to(device, dtype)
output = IN(input_var)
input_reshaped = input_var.transpose(1, 0).reshape(c, -1)
mean = input_reshaped.mean(1)
input_reshaped = input_var.transpose(1, 0).reshape(c, b, -1)
var = input_reshaped.var(2, unbiased=True)[:, :]
self.assertEqual(torch.abs(mean.data - IN.running_mean).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var.data.mean(1) - IN.running_var).mean(), 0, atol=1e-5, rtol=0)
IN.eval()
delta = IN.running_var.sqrt() * torch.arange(c, device=device, dtype=dtype)
delta = delta.view(-1, *[1 for _ in range(2, input.dim())])
output = IN(input_var + delta)
self.assertEqual(output.transpose(0, 1).reshape(c, -1).mean(1), torch.arange(c, dtype=dtype))
def _test_InstanceNorm_cuda_half(self, cls, input, device):
input = input.to(device=device, dtype=torch.half).random_(1, 10).requires_grad_(True)
m = cls(input.size(1), affine=True, track_running_stats=True).to(device, torch.half)
thnn_output = m(input)
thnn_output.sum().backward()
thnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(thnn_output, input)
if TEST_CUDNN:
input.grad = None
m = m.float()
cudnn_output = m(input)
cudnn_output.sum().backward()
cudnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(cudnn_output, input)
self.assertEqual(cudnn_output, thnn_output, atol=1e-4, rtol=0)
self.assertEqual(cudnn_input_grad, thnn_input_grad, atol=1e-3, rtol=0)
def _test_LayerNorm_general(self, device, dtype=torch.float):
for i in range(2, 6):
shape = torch.randint(3, 6, (i,), dtype=torch.long).tolist()
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
normalized_ndim = random.randint(1, i - 1)
normalized_shape = shape[-normalized_ndim:]
unnormalized_shape = shape[:-normalized_ndim]
ln = nn.LayerNorm(normalized_shape, eps=0).to(device, dtype)
ln.weight.data.fill_(1)
ln.bias.data.fill_(0)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
delta = 1e-1 if dtype == torch.bfloat16 else 1e-5
self.assertEqual(torch.abs(mean.data).mean(), 0, atol=delta, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), 1, atol=delta, rtol=0)
scale, bias = torch.empty(2).uniform_(0.2, 2).tolist()
ln.weight.data.fill_(scale)
ln.bias.data.fill_(bias)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertEqual(torch.abs(mean.data).mean(), bias, atol=delta, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), scale ** 2, atol=delta, rtol=0)
bad_norm_shape_input_shape = {
(): (),
(2, 3): (3,),
(2,): (1, 2, 3),
(10,): (2, 3),
10: (2, 3),
}
for norm_shape, input_shape in bad_norm_shape_input_shape.items():
ln = nn.LayerNorm(norm_shape)
input = torch.empty(input_shape, device=device, dtype=dtype).uniform_(0, 10)
self.assertRaises(RuntimeError, lambda: ln(input))
def _test_LayerNorm_cuda_half(self, device):
input = torch.empty(2, 3, 3, 2, device=device, dtype=torch.half).random_(1, 10).requires_grad_(True)
m = nn.LayerNorm([3, 2]).to(device, torch.half)
output = m(input)
output.sum().backward()
self.assertEqualTypeString(output, input)
def _test_GroupNorm_general(self, device, dtype=torch.float):
good_shape_g = {
(1, 2, 3, 4): 2,
(2, 3, 10): 3,
(3, 1, 1, 1, 2): 1,
(2, 6, 4, 2, 2): 3,
(1, 256, 1, 1): 32,
}
for shape_g, grad in product(good_shape_g.items(), [True, False]):
shape, g = shape_g
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
x.requires_grad_(grad)
b = shape[0]
c = shape[1]
gn = nn.GroupNorm(g, c, eps=0).to(device, dtype)
gn.weight.data.fill_(1)
gn.bias.data.fill_(0)
output = gn(x)
out_reshaped = output.view(b, g, -1)
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-3, rtol=1e-3)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-3, rtol=1e-3)
output.backward(torch.randn_like(output))
if output.is_cuda:
torch.cuda.synchronize()
scale = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
bias = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
gn.weight.data.copy_(scale)
gn.bias.data.copy_(bias)
output = gn(x)
out_reshaped = output.view(b, c, -1)
out_normed = (out_reshaped - bias.view(c, 1)) / scale.view(c, 1)
out_normed_reshaped = out_normed.view(b, g, -1)
mean = out_normed_reshaped.mean(-1)
var = out_normed_reshaped.var(-1, unbiased=False)
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-3, rtol=1e-3)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-3, rtol=1e-3)
bad_shape_g = {
(1, 2, 3, 4): 3,
(2, 3, 10): 2,
(3, 1, 1, 1, 2): 10,
(2, 6, 4, 2, 2): 4,
}
for shape, g in bad_shape_g.items():
gn = nn.GroupNorm(g, shape[1])
input = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
self.assertRaises(RuntimeError, lambda: gn(input))
def _test_GroupNorm_cuda_half(self):
input = torch.zeros(2, 4, 3, 2, requires_grad=True).cuda().half().random_(1, 10)
m = nn.GroupNorm(2, 4).to("cuda", torch.half)
output = m(input)
output.sum().backward()
self.assertEqualTypeString(output, input)
def _test_module_empty_input(self, module, inp, check_size=True):
inp.requires_grad_(True)
out = module(inp)
gO = torch.rand_like(out)
out.backward(gO)
if check_size:
self.assertEqual(out.size(), inp.size())
for p in module.parameters():
if p.requires_grad:
self.assertEqual(p.grad, torch.zeros_like(p.grad))
self.assertEqual(inp.grad, torch.zeros_like(inp))
def _test_module_empty_inputs(self, module, inputs):
for _inp in inputs:
_inp.requires_grad_(True)
out = module(*inputs)
gO = torch.rand_like(out)
out.backward(gO)
for p in module.parameters():
if p.requires_grad:
self.assertEqual(p.grad, torch.zeros_like(p.grad))
for _inp in inputs:
self.assertEqual(_inp.grad, torch.zeros_like(_inp))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off()
def test_affine_2d_rotate0(self, device):
input_size = [1, 1, 3, 3]
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = [1, 1, 5, 5]
angle_rad = 0.
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary.mean(), gridsample_ary.mean())
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.001)
def test_affine_2d_rotate90(self, device):
for input_size2dsq, output_size2dsq in \
itertools.product(input_size2dsq_(), output_size2dsq_()):
input_size = input_size2dsq
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = output_size2dsq
angle_rad = 0.25 * math.pi * 2
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=True))
if input_size2dsq == output_size2dsq:
self.assertEqual(scipy_ary.mean(), input_ary.mean())
self.assertEqual(scipy_ary[0, 0], input_ary[0, 0, 0, -1])
self.assertEqual(scipy_ary[0, -1], input_ary[0, 0, -1, -1])
self.assertEqual(scipy_ary[-1, -1], input_ary[0, 0, -1, 0])
self.assertEqual(scipy_ary[-1, 0], input_ary[0, 0, 0, 0])
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary.mean(), gridsample_ary.mean())
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_2d_rotate45(self, device):
input_size = [1, 1, 3, 3]
input_ary = np.array(np.zeros(input_size), dtype=np.float32)
input_ary[0, 0, 0, :] = 0.5
input_ary[0, 0, 2, 2] = 1.0
output_size = [1, 1, 3, 3]
angle_rad = 0.125 * math.pi * 2
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_2d_rotateRandom(self, device):
for angle_rad, input_size2d, output_size2d in \
itertools.product(angle_rad_(), input_size2d_(), output_size2d_()):
input_size = input_size2d
input_ary = np.array(np.random.random(input_size), dtype=np.float32).round(3)
output_size = output_size2d
input_ary[0, 0, 0, 0] = 2
input_ary[0, 0, 0, -1] = 4
input_ary[0, 0, -1, 0] = 6
input_ary[0, 0, -1, -1] = 8
transform_tensor, transform_ary, grid_ary = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
affine_tensor = affine_tensor.to('cpu')
for r in range(affine_tensor.size(1)):
for c in range(affine_tensor.size(2)):
grid_out = np.dot(grid_ary, [r, c, 1])
self.assertEqual(affine_tensor[0, r, c], grid_out[:2], exact_dtype=False)
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_3d_rotateRandom(self, device):
for angle_rad, axis_vector, input_size3d, output_size3d in \
itertools.product(angle_rad_(), axis_vector_(), input_size3d_(), output_size3d_()):
input_size = input_size3d
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = output_size3d
input_ary[0, 0, 0, 0, 0] = 2
input_ary[0, 0, 0, 0, -1] = 3
input_ary[0, 0, 0, -1, 0] = 4
input_ary[0, 0, 0, -1, -1] = 5
input_ary[0, 0, -1, 0, 0] = 6
input_ary[0, 0, -1, 0, -1] = 7
input_ary[0, 0, -1, -1, 0] = 8
input_ary[0, 0, -1, -1, -1] = 9
transform_tensor, transform_ary, grid_ary = \
_buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_rad, axis_vector)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
affine_tensor = affine_tensor.to('cpu')
for i in range(affine_tensor.size(1)):
for r in range(affine_tensor.size(2)):
for c in range(affine_tensor.size(3)):
grid_out = np.dot(grid_ary, [i, r, c, 1])
self.assertEqual(affine_tensor[0, i, r, c], grid_out[:3], exact_dtype=False)
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@onlyCUDA
@skipCUDAIfNoCudnn
@dtypes(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
def test_Conv2d_deterministic_cudnn(self, device, dtype):
inputs = torch.randn(2, 3, 5, 5, device=device, dtype=dtype, requires_grad=True)
with cudnn.flags(enabled=True, benchmark=True, deterministic=True):
conv1 = torch.nn.Conv2d(3, 3, 3).to(device, dtype)
conv2 = torch.nn.Conv2d(3, 3, 3).to(device, dtype)
conv2.bias.data.copy_(conv1.bias.data)
conv2.weight.data.copy_(conv1.weight.data)
out1 = conv1(inputs)
out2 = conv2(inputs)
self.assertEqual(out1, out2, atol=0.0, rtol=0)
y = torch.randn(out1.size(), device=device, dtype=dtype)
out1.backward(y)
out2.backward(y)
self.assertEqual(conv1.bias.grad.data, conv2.bias.grad.data, atol=0.0, rtol=0)
self.assertEqual(conv1.weight.grad.data, conv2.weight.grad.data, atol=0.0, rtol=0)
@onlyCUDA
@dtypes(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
def test_Conv2d_large_workspace(self, device, dtype):
sizes = [
(1, 256, 109, 175),
(1, 256, 80, 128),
(1, 256, 120, 192),
]
def run_test(benchmark):
with torch.backends.cudnn.flags(benchmark=benchmark):
conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).to(device, dtype)
for size in sizes:
x = torch.randn(size, device=device, dtype=dtype)
out = conv(x.detach().clone().requires_grad_())
out.backward(torch.ones_like(out))
run_test(benchmark=False)
run_test(benchmark=True)
@onlyCUDA
@dtypes(torch.half, torch.float)
def test_ConvTranspose2d_large_output_padding(self, device, dtype):
net1 = torch.nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device=device, dtype=dtype)
net2 = torch.nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device=device, dtype=dtype)
net3 = torch.nn.ConvTranspose2d(32, 3, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device=device, dtype=dtype)
x = torch.rand(1, 128, 6, 6, device=device, dtype=dtype, requires_grad=True)
x = net1(x)
x = net2(x)
x = net3(x)
x.backward(torch.randn_like(x))
torch.cuda.synchronize()
@onlyCUDA
@tf32_on_and_off(0.01)
@dtypes(torch.float, torch.double, torch.half)
def test_Conv2d_depthwise_naive_groups(self, device, dtype):
for depth_multiplier in [1, 2]:
m = nn.Conv2d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 2, 6, 6, device="cuda", dtype=dtype).div_(2).requires_grad_()
output = m(i)
grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, device=device, dtype=dtype) / 2
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = i.detach()[:, :1].clone().requires_grad_()
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = i.detach()[:, 1:].clone().requires_grad_()
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data,
m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data,
m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@dtypes(torch.float, torch.double, torch.half)
@tf32_on_and_off(0.005)
def test_Conv3d_depthwise_naive_groups(self, device, dtype):
for depth_multiplier in [1, 2]:
m = nn.Conv3d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 2, 6, 6, 6, device="cuda", dtype=dtype).div_(2).requires_grad_()
output = m(i)
grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, 4, device=device, dtype=dtype) / 2
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = i.detach()[:, :1].clone().requires_grad_()
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = i.detach()[:, 1:].clone().requires_grad_()
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data,
m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data,
m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@dtypes(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
def test_noncontig_conv_grad(self, device, dtype):
module = nn.Conv2d(3, 5, kernel_size=3, padding=1).to(device, dtype)
input = torch.randn(2, 3, 10, 10, dtype=dtype, device=device, requires_grad=True)
output = module(input)
grad = torch.randn(2, 2, 5, 10, 10, dtype=dtype, device=device)[:, 1]
assert not grad.is_contiguous()
output.backward(grad, retain_graph=True)
self.assertIsNotNone(input.grad)
result = input.grad.data.clone()
input.grad.data.zero_()
output.backward(grad.contiguous())
self.assertEqual(result, input.grad.data, atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@dtypes(torch.float, torch.half)
def test_batchnorm_large_batch(self, device, dtype):
bn = nn.BatchNorm2d(1).to(device, dtype)
data = torch.rand(880801, 1, 1, 1, device=device, dtype=dtype)
out = bn(data).sum().backward()
@onlyCUDA
@dtypes(torch.double)
def test_conv_double_backward(self, device, dtype):
with torch.backends.cudnn.flags(deterministic=True):
batch_size = 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 9, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([1], [2], [2], [3], dilations):
no_weight = stride == 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_cuda=True, dtype=dtype)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))
def test_conv_double_backward_no_bias(self):
kern = 3
stride = 2
chan_in, chan_out = 2, 4
batch_size = 2
inp_size = 5
padding = 1
dilation = 1
no_weight = False
use_bias = True
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_bias=use_bias)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))
def test_conv_double_backward_groups(self):
kern = 3
stride = 1
padding = 2
chan_in, chan_out = 2, 4
batch_size = 2
inp_size = 6
dilation = 1
no_weight = False
groups = 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in * groups, chan_out * groups,
batch_size, inp_size, dilation,
no_weight, groups=groups)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation) +
"\ngroups: " + str(groups))
def test_conv_double_backward_stride(self):
batch_size = 2
for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([2], [0, 1], [1], [2], dilations):
no_weight = False
self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight)
def test_conv1d_same_padding(self, device):
test_args = [
range(50, 55),
[1, 2, 3, 8],
range(1, 4),
[1],
]
for in_size, k_size, dilation, stride in itertools.product(*test_args):
x = torch.rand(1, 1, in_size, device=device)
y = torch.rand(1, 1, k_size, device=device)
z = F.conv1d(x, y, padding='same', dilation=dilation, stride=stride)
self.assertEqual(z.size(2), int(math.ceil(in_size / stride)))
x = torch.rand(1, 1, 12, device=device)
y = torch.rand(1, 1, 3, device=device)
expect = F.conv1d(x, y, padding=1)
actual = F.conv1d(x, y, padding='same')
self.assertEqual(expect, actual)
x = torch.rand(1, 1, 12, device=device)
y = torch.rand(1, 1, 4, device=device)
expect = F.conv1d(x, y, padding=3, dilation=2)
actual = F.conv1d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual)
expect = F.conv1d(x, y, padding=5, dilation=3)[..., 1:]
actual = F.conv1d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual)
def test_conv2d_same_padding(self, device):
x = torch.rand(1, 1, 10, 11, device=device)
y = torch.rand(1, 1, 4, 5, device=device)
expect = F.conv2d(x, y, padding=(2, 2))[..., 1:, :]
actual = F.conv2d(x, y, padding='same')
self.assertEqual(expect, actual)
y = torch.rand(1, 1, 3, 4, device=device)
expect = F.conv2d(x, y, padding=(2, 3), dilation=2)
actual = F.conv2d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual)
y = torch.rand(1, 1, 4, 4, device=device)
expect = F.conv2d(x, y, padding=5, dilation=3)[..., 1:, 1:]
actual = F.conv2d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual)
def test_conv3d_same_padding(self, device):
x = torch.rand(1, 1, 10, 11, 12, device=device)
y = torch.rand(1, 1, 1, 2, 5, device=device)
expect = F.conv3d(x, y, padding=(0, 1, 2))[..., :, 1:, :]
actual = F.conv3d(x, y, padding='same')
self.assertEqual(expect, actual)
expect = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)
actual = F.conv3d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual)
y = torch.rand(1, 1, 4, 4, 4, device=device)
expect = F.conv3d(x, y, padding=5, dilation=3)[..., 1:, 1:, 1:]
actual = F.conv3d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual)
def test_conv1d_valid_padding(self, device):
x = torch.rand(1, 1, 10, device=device)
y = torch.rand(1, 1, 4, device=device)
expect = F.conv1d(x, y)
actual = F.conv1d(x, y, padding='valid')
self.assertEqual(expect, actual)
def test_conv2d_valid_padding(self, device):
x = torch.rand(1, 1, 1, 10, device=device)
y = torch.rand(1, 1, 1, 4, device=device)
expect = F.conv2d(x, y)
actual = F.conv2d(x, y, padding='valid')
self.assertEqual(expect, actual)
def test_conv3d_valid_padding(self, device):
x = torch.rand(1, 1, 1, 1, 10, device=device)
y = torch.rand(1, 1, 1, 1, 4, device=device)
expect = F.conv3d(x, y)
actual = F.conv3d(x, y, padding='valid')
self.assertEqual(expect, actual)
def test_conv1d_same_padding_backward(self, device):
x = torch.rand(1, 1, 12, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, device=device, requires_grad=True)
z = F.conv1d(x, y, padding=3, dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding=2)[..., 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
def test_conv2d_same_padding_backward(self, device):
x = torch.rand(1, 1, 10, 11, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, 5, device=device, requires_grad=True)
z = F.conv2d(x, y, padding=(3, 4), dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv2d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
y = torch.rand(1, 1, 4, 4, device=device, requires_grad=True)
z = F.conv2d(x, y, padding=2)[..., 1:, 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv2d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
def test_conv3d_same_padding_backward(self, device):
check_forward_ad = torch.device(device).type != 'xla'
x = torch.rand(1, 1, 1, 11, 12, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 2, 5, device=device, requires_grad=True)
z = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv3d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
gradcheck(lambda x, y: F.conv3d(x, y, padding='same', dilation=2), (x, y),
check_forward_ad=check_forward_ad, nondet_tol=1e-5)
if torch.device(device).type != 'cuda':
gradgradcheck(lambda x, y: F.conv3d(x, y, padding='same', dilation=2), (x, y),
check_fwd_over_rev=True)
y = torch.rand(1, 1, 1, 4, 4, device=device, requires_grad=True)
z = F.conv3d(x, y, padding=2)[..., 1:, 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv3d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
gradcheck(lambda x, y: F.conv3d(x, y, padding='same'), (x, y),
check_forward_ad=check_forward_ad, nondet_tol=1e-5)
if torch.device(device).type != 'cuda':
gradgradcheck(lambda x, y: F.conv3d(x, y, padding='same'), (x, y),
check_fwd_over_rev=True)
def test_conv1d_valid_padding_backward(self, device):
x = torch.rand(1, 1, 10, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, device=device, requires_grad=True)
F.conv1d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv1d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
def test_conv2d_valid_padding_backward(self, device):
x = torch.rand(1, 1, 1, 10, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 4, device=device, requires_grad=True)
F.conv2d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv2d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
def test_conv3d_valid_padding_backward(self, device):
check_forward_ad = torch.device(device).type != 'xla'
x = torch.rand(1, 1, 1, 1, 10, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 1, 4, device=device, requires_grad=True)
F.conv3d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv3d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
gradcheck(lambda x, y: F.conv3d(x, y, padding='valid'), (x, y), check_forward_ad=check_forward_ad)
gradgradcheck(lambda x, y: F.conv3d(x, y, padding='valid'), (x, y), check_fwd_over_rev=check_forward_ad)
@skipMeta
@parametrize_test("input_shape,transposed,dilated,groups,layout,backend_expected", [
subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Slow2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d'),
subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_transposed'),
subtest(((2, 6, 7), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_dilated'),
subtest(((2, 6, 7), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_dilated_transposed'),
subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Slow2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d'),
subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_transposed'),
subtest(((2, 6, 7, 8), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_dilated'),
subtest(((2, 6, 7, 8), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_dilated_transposed'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Slow3d),
decorators=[onlyCPU, disableMkldnn], name='slow3d_cpu'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.SlowDilated3d),
decorators=[onlyCUDA, disablecuDNN], name='slow3d_cuda'),
subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose3d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_transposed'),
subtest(((2, 6, 7, 8, 9), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated3d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_dilated'),
subtest(((2, 6, 7, 8, 9), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose3d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_dilated_transposed'),
subtest(((0, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch1d'),
subtest(((2, 0, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel1d'),
subtest(((0, 0, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel1d'),
subtest(((0, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch2d'),
subtest(((2, 0, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel2d'),
subtest(((0, 0, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel2d'),
subtest(((0, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch3d'),
subtest(((2, 0, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel3d'),
subtest(((0, 0, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel3d'),
# === cuda ===
# Note that disablecuDNN disables miopen as well.
subtest(((2, 6, 7), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise2d),
decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise1d'),
subtest(((2, 6, 7, 8), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise2d),
decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise2d'),
subtest(((2, 6, 7, 8, 9), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise3d),
decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise3d'),
# === cudnn ===
subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn1d'),
subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn2d'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn3d'),
subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn1d_transposed'),
subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn2d_transposed'),
subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn3d_transposed'),
# === miopen ===
subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen1d'),
subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen2d'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen3d'),
subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen1d_transposed'),
subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen2d_transposed'),
subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen3d_transposed'),
subtest(((2, 6, 7), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise1d'),
subtest(((2, 6, 7, 8), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise2d'),
subtest(((2, 6, 7, 8, 9), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise3d'),
# === mkldnn ===
subtest(((2, 6, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn1d'),
subtest(((2, 6, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn2d'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn3d'),
# Transposed convolution is broken for mkldnn. See https://github.com/pytorch/pytorch/issues/68775.
subtest(((2, 6, 7), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn1d_transposed'),
subtest(((2, 6, 7, 8), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn2d_transposed'),
subtest(((2, 6, 7, 8, 9), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn3d_transposed'),
subtest(((2, 6, 7), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn1d_cpu_input'),
subtest(((2, 6, 7, 8), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn2d_cpu_input'),
subtest(((2, 6, 7, 8, 9), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn3d_cpu_input'),
subtest(((0, 6, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch1d'),
subtest(((2, 0, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel1d'),
subtest(((0, 0, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel1d'),
subtest(((0, 6, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch2d'),
subtest(((2, 0, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel2d'),
subtest(((0, 0, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel2d'),
subtest(((0, 6, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch3d'),
subtest(((2, 0, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel3d'),
subtest(((0, 0, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel3d'),
# Note: Tests for mobile backends are not currently supported. This comprises
# NnpackSpatial, Winograd3x3Depthwise, and Xnnpack2d backends. Testing these
# requires the ability to gate tests by whether PyTorch is built with USE_MOBILE=1.
])
# Test with both bias and no bias.
@parametrize_test("has_bias", [False, True])
# Test with both stride=1 and stride>1 cases.
@parametrize_test("strided", [False, True])
# Test with both contiguous and non-contiguous inputs.
@parametrize_test("contiguous", [False, True])
def test_conv_backend(
self, device, input_shape, has_bias, strided, contiguous, transposed, dilated, groups,
layout, backend_expected):
# Build up inputs.
dtype = torch.float32
C_in, C_out, dim, kernel_size = input_shape[1], 12, len(input_shape) - 2, 3
x = torch.randn(*input_shape, device=device, dtype=dtype, requires_grad=True)
weight = torch.randn(C_in if transposed else C_out,
C_out // groups if transposed else C_in // groups,
*[kernel_size for _ in range(dim)],
device=device, dtype=dtype, requires_grad=True)
bias = torch.randn(C_out, device=device, dtype=dtype, requires_grad=True) if has_bias else None
def _make_noncontiguous(inp):
if inp is None:
return None
old_requires_grad = inp.requires_grad
inp = torch.repeat_interleave(inp, 2, dim=-1)
inp = inp[..., ::2].detach().requires_grad_(old_requires_grad)
return inp
if not contiguous:
x = _make_noncontiguous(x)
weight = _make_noncontiguous(weight)
bias = _make_noncontiguous(bias)
if layout is torch._mkldnn:
x = x.to_mkldnn()
# Note that weight and bias are not supported as mkldnn tensors during training.
stride = (2,) * dim if strided else (1,) * dim
padding = (0,) * dim
dilation = (2,) * dim if dilated else (1,) * dim
output_padding = (0,) * dim
inputs = [x, weight, bias, stride, padding, dilation, transposed, output_padding, groups]
# Ensure correct backend is selected.
backend_actual = torch._C._select_conv_backend(*inputs)
self.assertEqual(backend_actual, backend_expected)
# Ensure backward call succeeds.
convolution = torch.ops.aten.convolution
output = convolution(*inputs)
grad_output = torch.randn(output.shape, device=device, dtype=dtype)
if not contiguous:
grad_output = _make_noncontiguous(grad_output)
if layout is torch._mkldnn:
grad_output = grad_output.to_mkldnn()
output.backward(grad_output)
# mkldnn doesn't support gradcheck :(
if layout is torch._mkldnn:
return
x = x.to(torch.float64).detach().requires_grad_(True)
weight = weight.to(torch.float64).detach().requires_grad_(True)
if bias is not None:
bias = bias.to(torch.float64).detach().requires_grad_(True)
inputs = [x, weight, bias, stride, padding, dilation, transposed, output_padding, groups]
gradcheck_nondet_tol = 0.0
if torch.backends.cudnn.is_available():
gradcheck_nondet_tol = GRADCHECK_NONDET_TOL
self.assertTrue(gradcheck(convolution, inputs, nondet_tol=gradcheck_nondet_tol))
if bias is not None:
bias.requires_grad_(False)
self.assertTrue(gradgradcheck(convolution, inputs, nondet_tol=gradcheck_nondet_tol))
def test_Dropout(self, device):
input = torch.empty(1000)
self._test_dropout(nn.Dropout, device, input)
self._test_dropout_discontiguous(nn.Dropout, device)
self._test_dropout_discontiguous(nn.Dropout, device, memory_format=torch.channels_last)
self._test_dropout_stride_mean_preserve(nn.Dropout, device)
if self.device_type == 'cuda' or self.device_type == 'cpu':
input = input.bfloat16()
self._test_dropout(nn.Dropout, device, input)
def _test_dropoutNd_no_batch(self, dropout, input):
input_clone = input.clone()
with freeze_rng_state():
res_no_batch = dropout(input)
with freeze_rng_state():
res_batched = dropout(input_clone.unsqueeze(0)).squeeze(0)
self.assertEqual(res_no_batch, res_batched)
def _test_dropoutNd_channel_zero(self, dropout, input):
# Verify the number of zeros in a channel is 0 or the number of elements in the channel
# for a fully positive input tensor
shape = input.shape
B = shape[0]
C = shape[1]
channel_numel = torch.tensor(shape[2:]).prod()
result = dropout(input)
for b, c in product(range(B), range(C)):
self.assertTrue(result[b, c].count_nonzero() in (0, channel_numel))
@expectedFailureXLA # seems like freeze_rng_state is not honoured by XLA
def test_Dropout2d(self, device):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
num_features = 1000
input = torch.empty(num_features, b, w, h)
self._test_dropout(nn.Dropout2d, device, input)
self._test_dropout(nn.Dropout2d, device, input, memory_format=torch.channels_last)
self._test_dropout_discontiguous(nn.Dropout2d, device)
self._test_dropout_discontiguous(nn.Dropout2d, device, memory_format=torch.channels_last)
with self.assertWarnsRegex(UserWarning, "Received a 5-D input to dropout2d"):
nn.Dropout2d(p=0.5)(torch.rand(1, 2, 2, 2, 2, device=device))
with self.assertWarnsRegex(UserWarning, "Received a 2-D input to dropout2d"):
nn.Dropout2d(p=0.5)(torch.rand(1, 2, device=device))
# no batch dims
input = torch.rand(50, 2, 2, device=device)
self._test_dropoutNd_no_batch(nn.Dropout2d(p=0.5), input)
self._test_dropoutNd_no_batch(nn.Dropout2d(p=0.5, inplace=True), input)
# check that complete channels are dropped
input = torch.ones(10, 4, 2, 2, device=device)
self._test_dropoutNd_channel_zero(nn.Dropout2d(p=0.5), input)
self._test_dropoutNd_channel_zero(nn.Dropout2d(p=0.5, inplace=True), input)
@expectedFailureXLA # seems like freeze_rng_state is not honoured by XLA
def test_Dropout3d(self, device):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
d = random.randint(1, 2)
num_features = 1000
input = torch.empty(num_features, b, d, w, h)
self._test_dropout(nn.Dropout3d, device, input)
self._test_dropout_discontiguous(nn.Dropout3d, device)
self._test_dropout_discontiguous(nn.Dropout3d, device, memory_format=torch.channels_last)
with self.assertWarnsRegex(UserWarning, "Received a 6-D input to dropout3d"):
nn.Dropout3d(p=0.5)(torch.rand(1, 2, 2, 2, 2, 2, device=device))
with self.assertWarnsRegex(UserWarning, "Received a 3-D input to dropout3d"):
nn.Dropout3d(p=0.5)(torch.rand(1, 2, 2, device=device))
# no batch dims
input = torch.rand(50, 2, 2, 2, device=device)
self._test_dropoutNd_no_batch(nn.Dropout3d(p=0.5), input)
self._test_dropoutNd_no_batch(nn.Dropout3d(p=0.5, inplace=True), input)
# check that complete channels are dropped
input = torch.ones(10, 4, 2, 2, 2, device=device)
self._test_dropoutNd_channel_zero(nn.Dropout3d(p=0.5), input)
self._test_dropoutNd_channel_zero(nn.Dropout3d(p=0.5, inplace=True), input)
def test_InstanceNorm1d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
d = random.randint(8, 10)
input = torch.rand(b, c, d)
self._test_InstanceNorm_general(nn.InstanceNorm1d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm1d, input, device)
def test_InstanceNorm2d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
w = random.randint(3, 6)
h = random.randint(6, 8)
input = torch.rand(b, c, h, w)
self._test_InstanceNorm_general(nn.InstanceNorm2d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm2d, input, device)
def test_InstanceNorm3d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
w = random.randint(2, 5)
h = random.randint(2, 5)
d = random.randint(2, 5)
input = torch.rand(b, c, h, w, d)
self._test_InstanceNorm_general(nn.InstanceNorm3d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm3d, input, device)
def test_instancenorm_raises_error_if_less_than_one_value_per_channel(self, device):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.InstanceNorm1d(10)(x).to(device)
def test_instancenorm_raises_error_for_single_spatial_element_during_training(self, device):
BATCH_SIZE = 10
NUM_CHANNELS = 3
norms = [torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d]
for i, norm in enumerate(norms):
m = norm(NUM_CHANNELS, track_running_stats=True)
m.to(device)
# Create an appropriately-sized input with a single spatial element.
input = torch.randn(BATCH_SIZE, NUM_CHANNELS, *[1 for _ in range(i + 1)],
device=device)
with self.assertRaises(ValueError):
m(input)
# Single spatial element should be fine in eval.
m.eval()
m(input)
def test_LayerNorm_general(self, device):
self._test_LayerNorm_general(device)
if self.device_type == 'cuda' or self.device_type == 'cpu':
self._test_LayerNorm_general(device, dtype=torch.bfloat16)
if self.device_type == 'cuda':
self._test_LayerNorm_cuda_half(device)
@onlyNativeDeviceTypes
def test_LayerNorm_numeric(self, device):
def layer_norm_ref(X, gamma, beta, normalized_shape, eps):
feature_size = np.prod(normalized_shape)
X_view = X.view(-1, feature_size)
mean = X_view.mean(dim=-1, keepdim=True)
var = X_view.var(dim=-1, unbiased=False, keepdim=True)
Y = (X_view - mean) / torch.sqrt(var + eps)
Y = Y * gamma.view(-1) + beta.view(-1)
return Y.view(*X.size())
normalized_shape = [256, 256, 144]
layer_norm = nn.LayerNorm(normalized_shape).float().to(device)
X = torch.rand(2, *normalized_shape, dtype=torch.float32,
device=device)
Y = layer_norm(X)
Y_ref = layer_norm_ref(X, layer_norm.weight.data, layer_norm.bias.data,
normalized_shape, layer_norm.eps)
self.assertEqual(Y, Y_ref, rtol=0, atol=1e-5)
if self.device_type == 'cuda':
layer_norm.cpu()
Y_cpu = layer_norm(X.cpu())
self.assertEqual(Y_cpu, Y, rtol=0, atol=1e-5)
@onlyNativeDeviceTypes
def test_GroupNorm_general(self, device):
self._test_GroupNorm_general(device)
if self.device_type == 'cuda':
self._test_GroupNorm_cuda_half()
def test_GroupNorm_raises_error_if_one_value_per_group(self, device):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.GroupNorm(10, 10)(x).to(device)
def test_GroupNorm_empty(self, device):
mod = torch.nn.GroupNorm(2, 4).to(device)
inp = torch.randn(0, 4, 2, 2, device=device)
self._test_module_empty_input(mod, inp)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp)
@onlyCPU
@dtypes(torch.float, torch.double)
def test_groupnorm_nhwc(self, device, dtype):
def helper(self, size, groups):
channels = size[1]
input = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device=device)
grad = grad.contiguous(memory_format=torch.channels_last)
gn = nn.GroupNorm(groups, channels).to(device).to(dtype)
gn.weight.data.uniform_()
gn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_gn = nn.GroupNorm(groups, channels).to(device).to(dtype)
ref_gn.load_state_dict(gn.state_dict())
out = gn(input)
out.backward(grad)
ref_out = ref_gn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(gn.weight.grad, ref_gn.weight.grad)
self.assertEqual(gn.bias.grad, ref_gn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
helper(self, (4, 8, 10, 10), 4)
helper(self, (2, 30, 9, 9), 3)
@onlyNativeDeviceTypes
def test_GroupNorm_numeric(self, device):
def group_norm_ref(X, gamma, beta, groups, channels, eps):
batch_size = X.size()[0]
X_view = X.view(batch_size, groups, -1)
mean = X_view.mean(dim=-1, keepdim=True)
var = X_view.var(dim=-1, unbiased=False, keepdim=True)
Y = ((X_view - mean) / torch.sqrt(var + eps)).view(
batch_size, channels, -1)
Y = Y * gamma.view(channels, 1) + beta.view(channels, 1)
return Y.view(*X.size())
batch_size = 1
groups = 2
channels = 8
group_norm = nn.GroupNorm(groups, channels).float().to(device)
X = torch.rand(batch_size, channels, 256, 256, 72,
dtype=torch.float32, device=device)
Y = group_norm(X)
Y_ref = group_norm_ref(
X, group_norm.weight.data, group_norm.bias.data, groups,
channels, group_norm.eps)
self.assertEqual(Y, Y_ref, rtol=0, atol=1e-5)
if self.device_type == 'cuda':
group_norm.cpu()
Y_cpu = group_norm(X.cpu())
self.assertEqual(Y_cpu, Y, rtol=0, atol=1e-5)
@onlyNativeDeviceTypes
@dtypes(torch.float64, torch.complex128)
def test_pad(self, device, dtype):
# Assert assertion errors are raised for invalid circular padding values
inputs = torch.randn(1, 1, 4, device=device, dtype=dtype, requires_grad=True)
# Should raise error when trying to wrap around more than once
self.assertRaises(AssertionError, lambda: F.pad(inputs, (5, 4), mode='circular'))
self.assertRaises(AssertionError, lambda: F.pad(inputs, (3, 6), mode='circular'))
# Should raise error when negative padding results in negative output shape
self.assertRaises(AssertionError, lambda: F.pad(inputs, (-3, -2), mode='circular'))
# assert that relfection padding errors when pad >= input size
expected_err_msg = r"Padding size should be less than the corresponding input dimension"
inputs = torch.randn(1, 1, 2, 3, device=device, dtype=dtype)
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(inputs, (1, 1, 3, 0), mode='reflect'))
inputs = torch.randn(1, 1, 2, device=device, dtype=dtype)
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(inputs, (2, 1), mode='reflect'))
inputs = torch.rand(1, 3, 4, 4, device=device, dtype=dtype)
# assert that pad doesn't return a view into the input tensor
for mode in 'constant', 'reflect', 'replicate', 'circular':
out = F.pad(inputs, (0, 0, 0, 0), mode=mode)
out.fill_(4)
self.assertTrue(torch.all(torch.abs(inputs) < 2))
out = F.pad(inputs, (0, 0, -1, -1), mode=mode)
out.fill_(4)
self.assertTrue(torch.all(torch.abs(inputs) < 2))
@onlyNativeDeviceTypes
@dtypes(torch.float64, torch.complex128)
def test_ReplicationPad_empty(self, device, dtype):
for mod, inp in [
(torch.nn.ReplicationPad1d(3), torch.randn(0, 3, 10, device=device, dtype=dtype)),
(torch.nn.ReplicationPad2d(3), torch.randn(0, 3, 10, 10, device=device, dtype=dtype)),
(torch.nn.ReplicationPad3d(3), torch.randn(0, 3, 10, 10, 10, device=device, dtype=dtype))]:
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, 'Expected 2D or 3D'):
mod = torch.nn.ReplicationPad1d(2)
inp = torch.randn(3, 0, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, 'Expected 3D or 4D'):
mod = torch.nn.ReplicationPad2d((2, 2, 2, 2))
inp = torch.randn(43, 0, 10, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, 'Expected 4D or 5D'):
mod = torch.nn.ReplicationPad3d((2, 2, 2, 2, 2, 2))
inp = torch.randn(3, 0, 10, 10, 10, device=device, dtype=dtype)
mod(inp)
def test_ReplicationPad1d_large(self, device):
shapes = ([2, 65736, 4], [65736, 2, 4])
pl, pr = 3, 4
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad1d((pl, pr))
out = model(x)
self.assertEqual(out[:, :, pl : -pr], x)
left_padding = out[:, :, : pl]
self.assertEqual(left_padding, x[:, :, :1].expand_as(left_padding))
right_padding = out[:, :, -pr :]
self.assertEqual(right_padding, x[:, :, -1:].expand_as(right_padding))
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1 : -1], g[:, :, pl + 1 : -pr - 1])
self.assertEqual(x.grad[:, :, 0], g[:, :, : pl + 1].sum(-1))
self.assertEqual(x.grad[:, :, -1], g[:, :, -pr - 1:].sum(-1))
def test_ReplicationPad2d_large(self, device):
shapes = ([2, 65736, 4, 4], [65736, 2, 4, 4])
pl, pr, pt, pb = 3, 4, 5, 6
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad2d((pl, pr, pt, pb))
out = model(x)
self.assertEqual(out[:, :, pt : -pb, pl : -pr], x)
left_padding = out[:, :, pt : -pb, : pl]
self.assertEqual(left_padding, x[:, :, :, :1].expand_as(left_padding))
right_padding = out[:, :, pt : -pb, -pr :]
self.assertEqual(right_padding, x[:, :, :, -1:].expand_as(right_padding))
top_padding = out[:, :, : pt, pl : -pr]
self.assertEqual(top_padding, x[:, :, :1, :].expand_as(top_padding))
bottom_padding = out[:, :, -pb : , pl : -pr]
self.assertEqual(bottom_padding, x[:, :, -1:, :].expand_as(bottom_padding))
tl_padding = out[:, :, : pt + 1, : pl + 1]
self.assertEqual(tl_padding, x[:, :, :1, :1].expand_as(tl_padding))
tr_padding = out[:, :, : pt + 1, -pr - 1:]
self.assertEqual(tr_padding, x[:, :, :1, -1:].expand_as(tr_padding))
bl_padding = out[:, :, -pb - 1:, : pl + 1]
self.assertEqual(bl_padding, x[:, :, -1:, :1].expand_as(bl_padding))
br_padding = out[:, :, -pb - 1:, -pr - 1:]
self.assertEqual(br_padding, x[:, :, -1:, -1:].expand_as(br_padding))
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1:-1, 1:-1], g[:, :, pt + 1 : -pb - 1, pl + 1 : -pr - 1])
self.assertEqual(x.grad[:, :, 1:-1, 0], g[:, :, pt + 1 : -pb - 1, : pl + 1].sum(-1))
self.assertEqual(x.grad[:, :, 1:-1, -1], g[:, :, pt + 1 : -pb - 1, -pr - 1 :].sum(-1))
self.assertEqual(x.grad[:, :, 0, 1:-1], g[:, :, : pt + 1, pl + 1 : -pr - 1].sum(-2))
self.assertEqual(x.grad[:, :, -1, 1:-1], g[:, :, -pb - 1 :, pl + 1 : -pr - 1].sum(-2))
self.assertEqual(x.grad[:, :, 0, 0], g[:, :, : pt + 1, : pl + 1].sum((-2, -1)))
self.assertEqual(x.grad[:, :, 0, -1], g[:, :, : pt + 1, -pr - 1 :].sum((-2, -1)))
self.assertEqual(x.grad[:, :, -1, 0], g[:, :, -pb - 1 :, : pl + 1].sum((-2, -1)))
self.assertEqual(x.grad[:, :, -1, -1], g[:, :, -pb - 1 :, -pr - 1 :].sum((-2, -1)))
@largeTensorTest("6GB")
def test_ReplicationPad3d_large(self, device):
shapes = ([1, 65736, 2, 2, 2], [65736, 1, 2, 2, 2])
pl, pr, pt, pbt, pf, pbk = 3, 4, 5, 6, 7, 8
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad3d((pl, pr, pt, pbt, pf, pbk))
out = model(x)
self.assertEqual(out[:, :, pf : -pbk, pt : -pbt, pl : -pr], x)
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1:-1, 1:-1, 1:-1], g[:, :, pf + 1 : -pbk - 1, pt + 1 : -pbt - 1, pl + 1 : -pr - 1])
@onlyNativeDeviceTypes
def test_Bilinear_empty(self, device):
mod = torch.nn.Bilinear(20, 30, 40).to(device)
inp1 = torch.randn(0, 10, 20, requires_grad=True, device=device)
inp2 = torch.randn(0, 10, 30, requires_grad=True, device=device)
output = mod(inp1, inp2)
output.sum().backward()
self.assertEqual(inp1, torch.zeros_like(inp1))
self.assertEqual(inp2, torch.zeros_like(inp2))
self.assertEqual(inp1.grad, torch.zeros_like(inp1))
self.assertEqual(inp2.grad, torch.zeros_like(inp2))
@expectedFailureMeta
@onlyNativeDeviceTypes
def test_TransformerEncoderLayer_empty(self, device):
for batch_first, input_shape in [(True, (0, 10, 512)),
(False, (10, 0, 512))]:
input = torch.rand(*input_shape, device=device)
encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
self._test_module_empty_input(encoder_layer, input, check_size=False)
@expectedFailureMeta
@onlyNativeDeviceTypes
def test_TransformerEncoder_empty(self, device):
for batch_first, input_shape in [(True, (0, 10, 512)),
(False, (10, 0, 512))]:
input = torch.rand(*input_shape, device=device)
encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6).to(device)
self._test_module_empty_input(transformer_encoder, input, check_size=False)
@expectedFailureMeta
@onlyNativeDeviceTypes
def test_TransformerDecoderLayer_empty(self, device):
for batch_first, memory_shape, tgt_shape in [(True, (0, 10, 512), (0, 20, 512)),
(False, (10, 0, 512), (20, 0, 512))]:
memory = torch.rand(*memory_shape, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
self._test_module_empty_inputs(decoder_layer, [tgt, memory])
@expectedFailureMeta
@onlyNativeDeviceTypes
def test_TransformerDecoder_empty(self, device):
for batch_first, memory_shape, tgt_shape in [(True, (0, 10, 512), (0, 20, 512)),
(False, (10, 0, 512), (20, 0, 512))]:
memory = torch.rand(*memory_shape, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6).to(device)
self._test_module_empty_inputs(transformer_decoder, [tgt, memory])
@expectedFailureMeta
@onlyNativeDeviceTypes
def test_Transformer_empty(self, device):
for batch_first, src_shape, tgt_shape in [(True, (10, 0, 512), (20, 0, 512))]:
transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12).to(device)
src = torch.rand(*src_shape, requires_grad=True, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
self._test_module_empty_inputs(transformer_model, [src, tgt])
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.complex64)
def test_ReflectionPad_empty(self, device, dtype):
for mod, inp in [
(torch.nn.ReflectionPad1d(2), torch.randn(0, 3, 10, device=device, dtype=dtype)),
(torch.nn.ReflectionPad2d(2), torch.randn(0, 3, 10, 10, device=device, dtype=dtype)),
(torch.nn.ReflectionPad3d(3), torch.randn(0, 3, 10, 10, 10, device=device, dtype=dtype))]:
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, '2D or 3D'):
mod = torch.nn.ReflectionPad1d(2)
inp = torch.randn(3, 0, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, '3D or 4D'):
mod = torch.nn.ReflectionPad2d(2)
inp = torch.randn(3, 0, 10, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, '4D or 5D'):
mod = torch.nn.ReflectionPad3d(3)
inp = torch.randn(3, 0, 10, 10, 10, device=device, dtype=dtype)
mod(inp)
@onlyCUDA
def test_ReflectionPad2d_large(self, device):
shapes = ([2, 65736, 6, 6], [65736, 2, 6, 6])
pad = (1, 2, 3, 4)
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
ref_x = x.detach().cpu().requires_grad_()
out = F.pad(x, pad, mode='reflect')
ref_out = F.pad(ref_x, pad, mode='reflect')
self.assertEqual(out, ref_out)
g = torch.randn_like(out)
ref_g = g.cpu()
out.backward(g)
ref_out.backward(ref_g)
self.assertEqual(x.grad, ref_x.grad)
@onlyNativeDeviceTypes
def test_LocalResponseNorm_empty(self, device):
mod = torch.nn.LocalResponseNorm(2).to(device)
inp = torch.ones(0, 5, 24, 24, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
@onlyCUDA
def test_ReflectionPad3d_large(self, device):
shapes = ([2, 1000, 7, 7, 7], [1000, 2, 7, 7, 7])
pad = (1, 2, 3, 4, 5, 6)
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
ref_x = x.detach().cpu().requires_grad_()
out = F.pad(x, pad, mode='reflect')
ref_out = F.pad(ref_x, pad, mode='reflect')
self.assertEqual(out, ref_out)
g = torch.randn_like(out)
ref_g = g.cpu()
out.backward(g)
ref_out.backward(ref_g)
self.assertEqual(x.grad, ref_x.grad)
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_MarginLoss_empty(self, device, dtype):
for mod, x, y in [
(torch.nn.MultiMarginLoss().to(device),
torch.randn(0, 10, requires_grad=True, device=device, dtype=dtype),
torch.ones(0, device=device).type(torch.long)),
(torch.nn.MultiLabelMarginLoss().to(device),
torch.randn(0, 10, requires_grad=True, device=device, dtype=dtype),
torch.ones(0, 10, device=device).type(torch.long))]:
out = mod(x, y)
out.sum().backward()
self.assertEqual(x, torch.zeros_like(x))
self.assertEqual(x.grad, torch.zeros_like(x))
with self.assertRaisesRegex(RuntimeError, 'Expected'):
x = torch.randn(0, requires_grad=True, device=device, dtype=dtype)
y = torch.ones(10, device=device).type(torch.long)
mod(x, y)
with self.assertRaisesRegex(RuntimeError, 'Expected'):
x = torch.randn(10, 0, requires_grad=True, device=device, dtype=dtype)
y = torch.ones(10, 0, device=device).type(torch.long)
mod(x, y)
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_adaptive_pooling_zero_batch(self, dtype, device):
inp = torch.ones(0, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool1d(5).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
inp = torch.ones(0, 10, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool2d((5, 5)).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
inp = torch.ones(0, 10, 10, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool3d((5, 5, 5)).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
@onlyNativeDeviceTypes
def test_FractionalMaxPool2d_zero_batch(self, device):
mod = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
inp = torch.ones(0, 16, 50, 32, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected input"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
@onlyNativeDeviceTypes
def test_FractionalMaxPool3d_zero_batch(self, device):
mod = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5)).to(device)
inp = torch.ones(0, 16, 50, 32, 32, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected input"):
inp = torch.randn(1, 0, 50, 32, 32, device=device)
mod(inp)
@onlyNativeDeviceTypes
def test_Unfold_empty(self, device):
inp = torch.randn(0, 3, 3, 4, device=device)
unfold = torch.nn.Unfold(kernel_size=(2, 3)).to(device)
self._test_module_empty_input(unfold, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, 'Expected 3D or 4D'):
inp = torch.randn(3, 0, 3, 4, device=device)
unfold = torch.nn.Unfold(kernel_size=(2, 3)).to(device)
unfold(inp)
@onlyNativeDeviceTypes
def test_MaxPool_zero_batch_dim(self, device):
inp = torch.randn(0, 16, 50, device=device)
mod = torch.nn.MaxPool1d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
inp = torch.randn(0, 16, 50, 32, device=device)
mod = torch.nn.MaxPool2d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
inp = torch.ones(0, 16, 50, 44, 31, device=device)
mod = torch.nn.MaxPool3d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.ones(1, 0, 50, 44, 31, device=device)
mod(inp)
@onlyNativeDeviceTypes
def test_MaxUnpool_zero_batch_dim(self, device):
pool = torch.nn.MaxPool1d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool1d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
output.requires_grad_(True)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
pool = torch.nn.MaxPool2d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool2d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
pool = torch.nn.MaxPool3d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool3d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
output.requires_grad_(True)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
@onlyNativeDeviceTypes
def test_AdaptiveMaxPool_zero_batch_dim(self, device):
inp = torch.randn(0, 16, 50, device=device)
mod = torch.nn.AdaptiveMaxPool1d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, device=device)
mod(inp)
inp = torch.randn(0, 16, 50, 32, device=device)
mod = torch.nn.AdaptiveMaxPool2d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
inp = torch.ones(0, 16, 50, 44, 31, device=device)
mod = torch.nn.AdaptiveMaxPool3d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.ones(1, 0, 50, 44, 31, device=device)
mod(inp)
@onlyCUDA
@dtypes(torch.float, torch.double)
@tf32_on_and_off(0.005)
def test_rnn_fused(self, device, dtype):
def copy_rnn(rnn1, rnn2):
for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):
for x, y in zip(x_layer, y_layer):
x.data.copy_(y.data)
def check_rnn_grads(rnn1, rnn2):
for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):
for x, y in zip(x_layer, y_layer):
self.assertEqual(x.grad, y.grad, atol=5e-5, rtol=0)
input_size = 10
hidden_size = 6
num_layers = 2
seq_length = 7
batch = 6
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, hidden_size, dtype=dtype)
hx_val = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
with torch.backends.cudnn.flags(enabled=False, allow_tf32=None):
for module in (nn.GRU, nn.LSTM):
for bias in (True, False):
rnn = module(input_size, hidden_size, num_layers, bias=bias).to(dtype)
rnn_device = module(input_size, hidden_size, num_layers, bias=bias).to(device, dtype)
copy_rnn(rnn, rnn_device)
is_lstm = isinstance(rnn, nn.LSTM)
if is_lstm:
hx = (hx_val.clone().requires_grad_(True),
hx_val.clone().add(1).requires_grad_(True))
hx_device = (hx_val.clone().to(device).requires_grad_(True),
hx_val.clone().to(device).add(1).requires_grad_(True))
else:
hx = hx_val.clone().requires_grad_(True)
hx_device = hx_val.clone().to(device).requires_grad_(True)
inp = input_val.clone().requires_grad_(True)
inp_cu = input_val.clone().to(device).requires_grad_(True)
output1, hy1 = rnn(inp, hx)
output2, hy2 = rnn_device(inp_cu, hx_device)
if is_lstm:
torch.autograd.backward(
[output1, hy1[0], hy1[1]], [grad_output, grad_hy, grad_hy + 1]
)
torch.autograd.backward(
[output2, hy2[0], hy2[1]],
[grad_output.to(device), grad_hy.to(device), (grad_hy + 1).to(device)]
)
else:
torch.autograd.backward([output1, hy1], [grad_output, grad_hy])
torch.autograd.backward([output2, hy2], [grad_output.to(device), grad_hy.to(device)])
self.assertEqual(output1, output2)
self.assertEqual(hy1, hy2)
check_rnn_grads(rnn, rnn_device)
self.assertEqual(inp.grad, inp_cu.grad)
if is_lstm:
self.assertEqual(hx[0].grad, hx_device[0].grad)
self.assertEqual(hx[1].grad, hx_device[1].grad)
else:
self.assertEqual(hx.grad, hx_device.grad)
def test_BatchNorm_empty(self, device):
mod = torch.nn.BatchNorm2d(3).to(device)
inp = torch.randn(0, 3, 2, 2, device=device)
self._test_module_empty_input(mod, inp)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp)
self.assertEqual(mod.running_mean, torch.tensor([0., 0, 0], device=device))
self.assertEqual(mod.running_var, torch.tensor([1., 1, 1], device=device))
self.assertEqual(mod.weight.grad, torch.tensor([0., 0, 0], device=device))
self.assertEqual(mod.bias.grad, torch.tensor([0., 0, 0], device=device))
def test_conv_empty_channel(self, device):
in_channels = 0
mod = torch.nn.Conv1d(in_channels, 8, 2, stride=2).to(device)
inp = torch.randn(2, 0, 15, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 0, device=device)
mod(inp)
mod = torch.nn.Conv2d(in_channels, 33, 3, stride=2).to(device)
inp = torch.randn(2, 0, 50, 100, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 40, 0, device=device)
mod(inp)
mod = torch.nn.Conv3d(in_channels, 33, 3, stride=2).to(device)
inp = torch.randn(2, 0, 50, 20, 40, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 50, 0, 40, device=device)
mod(inp)
def test_group_conv_empty(self, device):
mod = torch.nn.Conv2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
def test_group_convTranspose_empty(self, device):
mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
def test_convTranspose_empty(self, device):
mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
@onlyNativeDeviceTypes
def test_AvgPool2d_empty(self, device):
avgpool = torch.nn.AvgPool2d(3, stride=2).to(device)
inp = torch.randn(0, 16, 20, 32, device=device)
self._test_module_empty_input(avgpool, inp, check_size=False)
clast_inp = torch.randn(0, 16, 20, 32, device=device).contiguous(memory_format=torch.channels_last)
self._test_module_empty_input(avgpool, clast_inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, '3D or 4D'):
inp = torch.randn(16, 0, 20, 32, device=device)
avgpool(inp)
@onlyCUDA
@largeTensorTest('16GB')
def test_prelu_backward_32bit_indexing(self, device):
m = torch.nn.PReLU().cuda().half()
input_ = torch.ones((1024, 1024, 1024, 2), dtype=torch.half, device=device)
output = m(input_)
output.backward(input_)
def test_linear_empty(self, device):
mod = torch.nn.Linear(7, 7).to(device)
inp = torch.randn(0, 7, device=device)
self._test_module_empty_input(mod, inp)
def test_one_hot(self, device):
if self.device_type != 'cuda':
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, -1, 0], device=device), -1)
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), 3)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device))
expected = torch.tensor([[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), -1)
expected = torch.tensor([[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), 6)
expected = torch.tensor([[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([[3, 4], [1, 0]], device=device))
expected = torch.tensor([[[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]],
[[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor(4, device=device))
expected = torch.tensor([0, 0, 0, 0, 1], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.empty([4, 0], dtype=torch.long, device=device), 100)
expected = torch.empty([4, 0, 100], dtype=torch.long)
self.assertEqual(t, expected)
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.empty([4, 0], dtype=torch.long, device=device))
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), -2)
def test_nn_scalars(self, device):
def verify_scalars(input, output):
if input.dim() == 0:
self.assertEqual((), output.shape)
else:
self.assertNotEqual((), output.shape)
output.sum().backward()
self.assertEqual(input.shape, input.grad.shape)
for input_shape in [(5, 6), ()]:
for module in [torch.nn.ELU, torch.nn.Hardtanh, torch.nn.LeakyReLU, torch.nn.LogSigmoid,
torch.nn.RReLU, torch.nn.Softshrink, torch.nn.Softplus, torch.nn.Sigmoid,
torch.nn.Tanh]:
input = torch.randn(input_shape, device=device, requires_grad=True)
m = module()
output = m(input)
verify_scalars(input, output)
def test_nn_scalars_reductions(self, device):
def verify_reduction_scalars(input, reduction, output):
if reduction != 'none' or input.dim() == 0:
self.assertEqual((), output.shape)
else:
self.assertNotEqual((), output.shape)
output.sum().backward()
self.assertEqual(input.shape, input.grad.shape)
for input_shape in [(5, 6), ()]:
for reduction in ['none', 'mean', 'sum']:
for module in [torch.nn.BCELoss, torch.nn.L1Loss, torch.nn.MSELoss,
torch.nn.SmoothL1Loss, torch.nn.SoftMarginLoss]:
input = torch.randn(input_shape, device=device, requires_grad=True)
target = torch.empty(input_shape, device=device).random_(2)
sigmoid = nn.Sigmoid()
input = torch.randn(input_shape, device=device, requires_grad=True)
m = module(reduction=reduction)
output = m(sigmoid(input), target)
verify_reduction_scalars(input, reduction, output)
@onlyNativeDeviceTypes
def test_invalid_reduction_strings(self, device):
input = torch.randn(3, 5, requires_grad=True, device=device)
cinput = torch.randn(3, 5, requires_grad=True, device=device, dtype=torch.cfloat)
target = torch.tensor([1, 0, 4], device=device)
var = torch.ones(size=input.size(), requires_grad=True, device=device)
for reduction in ['none', 'invalid']:
def v(fn):
if reduction == 'invalid':
self.assertRaises(ValueError, lambda: fn())
else:
fn()
v(lambda: F.nll_loss(input, target, reduction=reduction))
v(lambda: F.cross_entropy(input, target, reduction=reduction))
v(lambda: F.multi_margin_loss(input, target, reduction=reduction))
v(lambda: F.kl_div(input, input, reduction=reduction))
v(lambda: F.huber_loss(input, input, reduction=reduction))
v(lambda: F.smooth_l1_loss(input, input, reduction=reduction))
v(lambda: F.l1_loss(input, input, reduction=reduction))
v(lambda: F.l1_loss(cinput, cinput, reduction=reduction))
v(lambda: F.mse_loss(input, input, reduction=reduction))
v(lambda: F.hinge_embedding_loss(input, input, reduction=reduction))
v(lambda: F.poisson_nll_loss(input, input, reduction=reduction))
v(lambda: F.gaussian_nll_loss(input, input, var, reduction=reduction))
v(lambda: F.binary_cross_entropy(torch.sigmoid(input), input, reduction=reduction))
v(lambda: F.binary_cross_entropy_with_logits(input, input, reduction=reduction))
zeros = torch.zeros_like(input).to(torch.int64)
v(lambda: F.multilabel_soft_margin_loss(input, zeros, reduction=reduction))
v(lambda: F.multilabel_margin_loss(input, zeros, reduction=reduction))
v(lambda: F.triplet_margin_loss(input, input, input, reduction=reduction))
v(lambda: F.triplet_margin_with_distance_loss(input, input, input, reduction=reduction))
v(lambda: F.margin_ranking_loss(input, input, input.sign(), reduction=reduction))
v(lambda: F.cosine_embedding_loss(input, input, input[:, 0].sign(), reduction=reduction))
log_probs = torch.randn(50, 16, 20, requires_grad=True, device=device).log_softmax(2)
targets = torch.randint(1, 20, (16, 30), dtype=torch.long, device=device)
input_lengths = torch.full((16,), 50, dtype=torch.long, device=device)
target_lengths = torch.randint(10, 30, (16,), dtype=torch.long, device=device)
v(lambda: F.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction=reduction))
v(lambda: F.soft_margin_loss(input, input.sign().detach(), reduction=reduction))
@onlyNativeDeviceTypes
def test_smooth_l1_loss_vs_huber_loss(self, device):
def _make_test_tensor(shape, contiguous=True):
if contiguous:
test_tensor = torch.randn(shape, device=device)
else:
doubled_shape = list(shape)
doubled_shape[-1] *= 2
test_tensor = torch.randn(doubled_shape, device=device)
test_tensor = test_tensor[..., ::2]
return test_tensor
def _test_smooth_l1_loss_vs_huber_loss_helper(input, target, beta, require_equal):
for reduction in ['mean', 'sum', 'none']:
smooth_l1 = torch.nn.SmoothL1Loss(beta=beta, reduction=reduction)
huber = torch.nn.HuberLoss(delta=beta, reduction=reduction)
smooth_l1_loss = smooth_l1(input, target)
huber_loss = huber(input, target)
if require_equal:
self.assertEqual(smooth_l1_loss, huber_loss)
else:
self.assertEqual(smooth_l1_loss * beta, huber_loss)
def _test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta, require_equal):
shape = (2, 2)
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape),
target=_make_test_tensor(shape),
beta=beta,
require_equal=require_equal)
shape = (64, 64)
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape),
target=_make_test_tensor(shape),
beta=beta,
require_equal=require_equal)
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape, contiguous=False),
target=_make_test_tensor(shape, contiguous=False),
beta=beta,
require_equal=require_equal)
def test_equal_when_beta_is_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=1.0, require_equal=True)
def test_unequal_when_beta_is_less_than_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=0.5, require_equal=False)
def test_unequal_when_beta_is_greater_than_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=1.5, require_equal=False)
test_equal_when_beta_is_one()
test_unequal_when_beta_is_less_than_one()
test_unequal_when_beta_is_greater_than_one()
# these easy ones, we should make them do so.
def test_nonlinearity_propagate_nan(self, device):
def test(nonlinearity, *args, **kwargs):
x = torch.tensor([nan], device=device)
fn = getattr(F, nonlinearity)
try:
self.assertTrue(math.isnan(fn(x, *args, **kwargs).item()))
except Exception as e:
if 'not implemented' not in str(e):
raise
test('relu')
test('relu', inplace=True)
test('relu6')
test('elu')
test('selu')
test('celu')
test('rrelu')
test('rrelu', inplace=True)
test('hardtanh')
test('tanh')
test('sigmoid')
test('logsigmoid')
test('hardshrink')
test('tanhshrink')
test('softsign')
test('softmin', 0)
test('softmax', 0)
test('log_softmax', 0)
test('leaky_relu', 0.2)
test('threshold', 3, 2)
test('threshold', 3, 2, inplace=True)
def test_pooling_shape(self, device):
# Checks output shape against expected for 1D, 2D and 3D
def check(expected_out_shape, sizes, *args, **kwargs):
for kernel in ['max', 'avg']:
for i in [1, 2, 3]:
if hasattr(torch.nn.functional, f'{kernel}_pool{i}d'):
op = getattr(torch.nn.functional, f'{kernel}_pool{i}d')
t = torch.randn(sizes[:i + 2], device=device)
self.assertEqual(op(t, *args, **kwargs).shape, expected_out_shape[:i + 2])
check((1, 1, 3, 3, 4), (1, 1, 5, 6, 7), kernel_size=1, stride=2, padding=0, ceil_mode=True)
check((1, 1, 2, 3, 3), (1, 1, 3, 4, 5), kernel_size=2, stride=2, padding=1, ceil_mode=False)
check((1, 1, 2, 3, 3), (1, 1, 3, 4, 5), kernel_size=2, stride=2, padding=1, ceil_mode=True)
# Test case from issue https://github.com/pytorch/pytorch/issues/45357
x = torch.randn(1, 1, 6, 7, device=device)
y = torch.nn.functional.max_pool2d(x, 1, stride=(2, 2), padding=0, ceil_mode=True)
self.assertEqual(y.size(), (1, 1, 3, 4))
@onlyNativeDeviceTypes # TODO: fix on XLA
def test_adaptive_avg_pool2d_output_size_one(self, device):
def helper(size, memory_format):
x = torch.randint(1, 10, size, dtype=torch.float, device=device, requires_grad=True)
if memory_format == 'non_contiguous':
x = x[::2, ::2, ::2, ::2]
else:
x = x.to(memory_format=memory_format)
net = torch.nn.AdaptiveAvgPool2d((1, 1))
out = net(x)
ref_out = x.contiguous().mean((-1, -2)).view((x.size(0), x.size(1), 1, 1))
out.sum().backward() # make sure it doesn't crash
self.assertEqual(out, ref_out)
if memory_format == torch.channels_last:
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, c, c])
else:
self.assertTrue(out.is_contiguous())
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, 1, 1])
for mf in (torch.contiguous_format, torch.channels_last, 'non_contiguous'):
helper((2, 3, 6, 6), mf)
@onlyNativeDeviceTypes
def test_adaptive_avg_pool3d_output_size_one(self, device):
x = torch.randn((2, 3, 6, 6, 6) , dtype=torch.float, device=device, requires_grad=True)
net = torch.nn.AdaptiveAvgPool3d(1)
out = net(x)
ref_out = x.contiguous().mean((-1, -2, -3)).view(out.shape)
out.sum().backward()
self.assertEqual(out, ref_out)
self.assertTrue(out.is_contiguous())
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, 1, 1, 1])
@expectedFailureMeta # Runtime Error not raised for meta
@onlyNativeDeviceTypes
@dtypes(torch.uint8, torch.int8, torch.short, torch.int, torch.long)
def test_adaptive_pooling_no_suppot_input(self, device, dtype):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * numel
module = module_cls(output_size)
input = torch.randn((4,) * (numel + 1), device=device).to(dtype)
with self.assertRaisesRegex(RuntimeError, "not implemented"):
output = module(input)
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
def test_avg_pool2d_nhwc(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride=None,
count_include_pad=True, divisor_override=None, padding=0):
if stride is None:
stride = kernel_size
input = torch.randn(n, c, h, w, dtype=dtype, device=device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(n, c, (h - kernel_size) // stride + 1, (w - kernel_size) // stride + 1,
dtype=dtype, device=device)
pool = torch.nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=count_include_pad,
divisor_override=divisor_override).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=count_include_pad,
divisor_override=divisor_override).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 3)
helper(4, 8, 8, 8, 3, count_include_pad=False, padding=1)
helper(4, 8, 8, 8, 3, count_include_pad=False, padding=2, stride=2)
helper(4, 8, 8, 8, 3, divisor_override=42)
helper(4, 8, 8, 8, 7)
# ROCm 16GB MI25 hits OOM error. Clear caching allocator prior to running large subtest.
if TEST_WITH_ROCM and 'cuda' in device:
torch.cuda.empty_cache()
helper(200, 512, 28, 28, 2)
helper(4, 8, 7, 7, 3, stride=1)
helper(4, 8, 7, 7, 3, padding=2, stride=1)
helper(10, 512, 31, 31, 3, stride=2)
helper(1, 129, 8, 8, 3, stride=2)
@onlyCPU
@dtypes(torch.float)
def test_max_pool1d_errors(self, device, dtype):
def check(x, args, message):
model = torch.nn.MaxPool1d(*args)
with self.assertRaisesRegex(RuntimeError, r'max_pool1d\(\) ' + message):
model(torch.tensor(x, device=device, dtype=dtype))
# Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)
check(0, (1,), "Expected 2D or 3D input tensor, but got")
check([], (1,), "Expected 2D or 3D input tensor, but got")
check([[]], (1, 0), "stride must be greater than zero, but got 0")
check([[]], (1, 1, -1), "padding must be non-negative, but got -1")
check([[]], (1, 1, 2), "padding should be at most half of kernel size, but got padding=2 and kernel_size=1")
check([[]], (1, 1, 0, 0), "dilation must be greater than zero, but got 0")
check([[]], (5, 1, 0, 1), "Invalid computed output size: -4")
@onlyCPU
@dtypes(torch.float, torch.double)
def test_max_pool1d_corner_cases(self, device, dtype):
def check(x, args, expected):
model = torch.nn.MaxPool1d(*args)
if isinstance(x, list):
x = torch.tensor(x, device=device, dtype=dtype)
expected = torch.tensor(expected, device=device, dtype=dtype)
self.assertEqual(model(x), expected)
# Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)
check([[]], (1, None, 0, 1, False, False), [[]])
check([[[]]], (1, None, 0, 1, False, False), [[[]]])
check([[[]]], (2, 1, 1, 2, False, True), [[[]]])
check([[1]], (1, None, 0, 1, False, False), [[1]])
check([[1]], (2, None, 1, 2, False, False), [[float('-inf')]])
check([[1], [1]], (2, None, 1, 2, False, False), [[float('-inf')], [float('-inf')]])
check([[1, 2]], (2, 1, 1, 2, False, False), [[2, 1]])
check([[1, 2]], (2, 2, 1, 2, False, True), [[2, 2]])
empty_tensor = torch.empty((2, 0, 1), device=device, dtype=dtype)
check(empty_tensor, (1, None, 0, 1, False, False), empty_tensor)
@onlyCPU
@dtypes(torch.float, torch.double)
def test_max_pool1d(self, device, dtype):
# FIXME For now compare against max_pool1d with indices
def check(x, *args, **kwargs):
model = torch.nn.MaxPool1d(*args, **kwargs)
ref_model = torch.nn.MaxPool1d(*args, **kwargs, return_indices=True)
self.assertEqual(model(x), ref_model(x)[0])
sizes = [random.sample(range(8, 128), 3) for _ in range(3)]
kernel_sizes = random.sample(range(1, 5), 3)
strides = random.sample(range(1, 5), 3)
dilations = random.sample(range(1, 5), 3)
ceil_modes = [True, False]
for size, kernel_size, stride, dilation, ceil_mode in \
itertools.product(sizes, kernel_sizes, strides, dilations, ceil_modes):
padding = random.sample(range(0, math.floor(kernel_size / 2) + 1), 1)
check(torch.randn(size, device=device, dtype=dtype),
kernel_size, stride, padding, dilation, ceil_mode=ceil_mode)
# Non-contiguous test
tensor = torch.randn(5, 151, 33, device=device, dtype=dtype)[::2, ::3, ::2]
check(tensor, 3, 2, 1, 2, ceil_mode=True)
check(tensor.transpose(1, 2), 3, 2, 1, 2, ceil_mode=True)
@onlyCUDA
def test_max_pool2d(self, device):
def helper(n, c, h, w, ks):
x = torch.randn(n, c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
ref_x = x.detach().clone().cpu().requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size=ks)
y = pool(x)
ref_y = pool(ref_x)
y.sum().backward()
ref_y.sum().backward()
self.assertEqual(y, ref_y)
self.assertEqual(x.grad, ref_x.grad)
helper(2, 8, 4, 4, ks=2)
helper(1, 100000, 32, 32, ks=4)
helper(1, 100000, 1, 4, ks=(1, 4)) # test for max_pool1d
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
def test_max_pool2d_nhwc(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride=None):
if stride is None:
stride = kernel_size
input = torch.randn(n, c, h, w, dtype=dtype, device=device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(n, c, (h - kernel_size) // stride + 1, (w - kernel_size) // stride + 1,
dtype=dtype, device=device)
pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 7)
helper(200, 512, 28, 28, 2)
helper(4, 8, 7, 7, 3, stride=1)
helper(10, 512, 31, 31, 3, stride=2)
helper(1, 129, 8, 8, 3, stride=2)
@onlyCPU
def test_max_pool2d_bfloat16(self, device):
def helper(n, c, h, w, kernel_size, stride, memory_format):
input = torch.randn(n, c, h, w, dtype=torch.float32, device=device).bfloat16()
input = input.to(memory_format=memory_format).requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)
input2 = input.detach().clone().float().requires_grad_(True)
out, ind = pool(input)
out.sum().backward()
out2, ind2 = pool(input2)
out2.sum().backward()
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(out.dtype, torch.bfloat16)
self.assertEqual(input.grad.dtype, torch.bfloat16)
self.assertEqual(out, out2.bfloat16())
self.assertEqual(ind, ind2)
self.assertEqual(input.grad, input2.grad.bfloat16())
helper(4, 30, 8, 8, 7, 1, torch.contiguous_format)
helper(4, 65, 8, 8, 7, 1, torch.channels_last)
helper(1, 19, 20, 10, 8, 2, torch.contiguous_format)
helper(1, 19, 20, 10, 8, 2, torch.channels_last)
@onlyCUDA
def test_max_pool2d_indices(self, device):
def helper(n, c, h, w, ks):
if n is None:
x = torch.randn(c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
else:
x = torch.randn(n, c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
ref_x = x.detach().clone().cpu().requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size=ks, return_indices=True)
y, idx = pool(x)
ref_y, ref_idx = pool(ref_x)
y.sum().backward()
ref_y.sum().backward()
self.assertEqual(y, ref_y)
self.assertEqual(idx, ref_idx) # assertEqual implicitly compares shape for tensors
self.assertEqual(x.grad, ref_x.grad)
helper(2, 8, 4, 4, ks=2)
helper(None, 3, 50, 50, ks=5)
@onlyCPU
def test_avg_pool2d_bfloat16(self, device):
def helper(n, c, h, w, kernel_size, stride, memory_format):
input = torch.randn(n, c, h, w, dtype=torch.float32, device=device).bfloat16()
input = input.to(memory_format=memory_format).requires_grad_()
pool = torch.nn.AvgPool2d(kernel_size, stride).to(device)
input2 = input.detach().clone().float().requires_grad_(True)
out = pool(input)
out.sum().backward()
out2 = pool(input2)
out2.sum().backward()
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(out.dtype, torch.bfloat16)
self.assertEqual(input.grad.dtype, torch.bfloat16)
self.assertEqual(out, out2.bfloat16())
self.assertEqual(input.grad, input2.grad.bfloat16())
helper(4, 30, 8, 8, 7, 1, torch.contiguous_format)
helper(4, 65, 8, 8, 7, 1, torch.channels_last)
helper(1, 19, 20, 10, 8, 2, torch.contiguous_format)
helper(1, 19, 20, 10, 8, 2, torch.channels_last)
def test_upsamplingNearest1d(self, device):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
def helper(mode):
m = nn.Upsample(size=4, mode=mode)
in_t = torch.ones(1, 1, 2, device=device)
in_uint8_t = torch.ones(1, 1, 2, dtype=torch.uint8, device=device)
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
out_uint8_t = m(in_uint8_t)
self.assertEqual(torch.ones(1, 1, 4, device=device), out_t.data)
self.assertEqual(torch.ones(1, 1, 4, dtype=torch.uint8, device=device), out_uint8_t.data)
input = torch.randn(1, 1, 2, requires_grad=True, device=device)
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
input = torch.randn(1, 1, 20, requires_grad=True, device=device)
gradcheck(lambda x: F.interpolate(x, 11, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
if torch.device(device).type == 'cuda':
input_cuda = torch.randn(1, 1, 20, device=device)
input_cpu = input_cuda.cpu()
output_cuda = F.interpolate(input_cuda, 4, mode=mode)
output_cpu = F.interpolate(input_cpu, 4, mode=mode)
self.assertEqual(output_cuda.cpu(), output_cpu)
output_cuda = F.interpolate(input_cuda, 24, mode=mode)
output_cpu = F.interpolate(input_cpu, 24, mode=mode)
self.assertEqual(output_cuda.cpu(), output_cpu)
helper("nearest")
helper("nearest-exact")
def test_upsamplingNearest1d_correctness(self, device):
def helper(isize, osize):
in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
out_t = F.interpolate(
in_t, size=(osize, ), recompute_scale_factor=False, mode="nearest"
)
# compute expected output as OpenCV
expected_out = torch.zeros(osize, dtype=torch.float).unsqueeze(0).unsqueeze(0)
scale = 1.0 * isize / osize
for o in range(osize):
i_f32 = o * scale
i = int(i_f32)
expected_out[0, 0, o] = in_t[0, 0, i]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(20, 11)
helper(10, 15)
def test_upsamplingNearestExact1d_rescale(self, device):
# Checks https://github.com/pytorch/pytorch/issues/62237
isize = 20
in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
# for s in [1.00001, 0.99999]: # 0.9999 case is broken
# See issue: https://github.com/pytorch/pytorch/issues/62396
for s in [1.00001, ]:
out_t = F.interpolate(
in_t, scale_factor=s, recompute_scale_factor=False, mode="nearest-exact"
)
expected_out = in_t
self.assertEqual(out_t, expected_out, msg=f"scale: {s}")
# checks data duplication if output_size == 2 * input_size
# for s in [2.00001, 1.99999]: # 1.99999 case is broken
# See issue: https://github.com/pytorch/pytorch/issues/62396
for s in [2.00001, ]:
out_t = F.interpolate(
in_t, scale_factor=s, recompute_scale_factor=False, mode="nearest-exact"
)
# input is [[[0, 1, 2, 3, ..., 9]]]
# expected out is [[[0, 0, 1, 1, 2, 2, ..., 9, 9]]]
expected_out = in_t.repeat_interleave(2, dim=-1)
self.assertEqual(out_t, expected_out)
def test_upsamplingNearestExact1d_correctness(self, device):
# Here we check if output matches Scikit-Image/Scipy-like result
# Checks https://github.com/pytorch/pytorch/issues/34808
def helper(isize, osize):
in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
out_t = F.interpolate(
in_t, size=(osize, ), recompute_scale_factor=False, mode="nearest-exact"
)
# compute expected output as scikit-image/scipy
expected_out = torch.zeros(osize, dtype=torch.float).unsqueeze(0).unsqueeze(0)
scale = 1.0 * isize / osize
for o in range(osize):
i_f32 = (o + 0.5) * scale
i = int(i_f32)
expected_out[0, 0, o] = in_t[0, 0, i]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(20, 11)
helper(10, 15)
def test_upsamplingNearest2d(self, device):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
def helper(memory_format, mode):
in_t = torch.ones(1, 2, 2, 2, device=device).contiguous(memory_format=memory_format)
in_uint8_t = torch.ones(1, 2, 2, 2, dtype=torch.uint8, device=device).contiguous(memory_format=memory_format)
with warnings.catch_warnings(record=True) as w:
out_t = F.interpolate(in_t, size=4, mode=mode)
out_uint8_t = F.interpolate(in_uint8_t, size=4, mode=mode)
self.assertEqual(len(w), 0)
self.assertEqual(torch.ones(1, 2, 4, 4, device=device), out_t)
self.assertEqual(torch.ones(1, 2, 4, 4, dtype=torch.uint8, device=device), out_uint8_t)
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
in_t = torch.ones(1, 2, 2, 1, device=device).contiguous(memory_format=memory_format).requires_grad_()
out_t = F.interpolate(in_t, size=(4, 2), mode=mode)
self.assertEqual(torch.ones(1, 2, 4, 2, device=device), out_t)
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
out_t.backward(torch.randn_like(out_t))
self.assertTrue(in_t.grad.is_contiguous(memory_format=memory_format))
# test backward when input's height is not same as width
input = torch.ones(1, 2, 2, 1, requires_grad=True, device=device).contiguous(memory_format=memory_format)
gradcheck(lambda x: F.interpolate(x, size=(4, 2), mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, size=(4, 2), mode=mode), [input], check_fwd_over_rev=check_forward_ad)
input = torch.randn(1, 2, 2, 2, requires_grad=True, device=device).contiguous(memory_format=memory_format)
self.assertEqual(
F.interpolate(input, 4, mode=mode),
F.interpolate(input, scale_factor=2, mode=mode))
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
if torch.device(device).type == 'cuda':
for shapes, scale_factor in product([
(2, 2, 3, 4), (2, 3, 4, 5), (3, 1, 2, 2), (1, 5, 3, 2)
], [0.5, 1.5, 2]):
a_cuda = torch.randn(*shapes, device=device).contiguous(memory_format=memory_format).requires_grad_()
a_cpu = a_cuda.detach().cpu().requires_grad_()
out_cuda = F.interpolate(a_cuda, scale_factor=scale_factor, mode=mode)
out_cpu = F.interpolate(a_cpu, scale_factor=scale_factor, mode=mode)
self.assertEqual(out_cpu.cuda(), out_cuda)
g_cuda = torch.randn_like(out_cuda)
g_cpu = g_cuda.cpu()
out_cuda.backward(g_cuda)
out_cpu.backward(g_cpu)
self.assertEqual(a_cuda.grad, a_cpu.grad)
helper(torch.contiguous_format, "nearest")
helper(torch.channels_last, "nearest")
helper(torch.contiguous_format, "nearest-exact")
helper(torch.channels_last, "nearest-exact")
def test_upsamplingNearest2d_correctness(self, device):
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize, dtype=torch.float, device=device).reshape(1, 1, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize), recompute_scale_factor=False, mode="nearest"
)
# compute expected output as OpenCV
expected_out = torch.zeros(1, 1, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = o1 * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = o2 * scale
i2 = int(i2_f32)
expected_out[0, 0, o1, o2] = in_t[0, 0, i1, i2]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last, 10, 15)
def test_upsamplingNearestExact2d_correctness(self, device):
# Here we check if output matches Scikit-Image/Scipy-like result
# Checks https://github.com/pytorch/pytorch/issues/34808
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize, dtype=torch.float, device=device).reshape(1, 1, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize), recompute_scale_factor=False, mode="nearest-exact"
)
# compute expected output as Scikit-Image/Scipy
expected_out = torch.zeros(1, 1, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = (o1 + 0.5) * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = (o2 + 0.5) * scale
i2 = int(i2_f32)
expected_out[0, 0, o1, o2] = in_t[0, 0, i1, i2]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last, 10, 15)
def test_upsamplingNearest3d(self, device):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
def helper(memory_format, mode):
m = nn.Upsample(size=4, mode=mode)
in_t = torch.ones(1, 2, 2, 2, 2, device=device).contiguous(memory_format=memory_format)
in_uint8_t = torch.ones(
1, 2, 2, 2, 2, dtype=torch.uint8, device=device
).contiguous(memory_format=memory_format)
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
out_uint8_t = m(in_uint8_t)
expected_output = torch.ones(1, 2, 4, 4, 4, device=device)
self.assertEqual(expected_output, out_t)
self.assertEqual(expected_output.to(torch.uint8), out_uint8_t)
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
input = torch.randn(
1, 2, 2, 2, 2, requires_grad=True, device=device
).contiguous(memory_format=memory_format)
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
if torch.device(device).type == 'cuda':
a = torch.ones(
2, 2, 2, 3, 4, device=device, requires_grad=True
).contiguous(memory_format=torch.channels_last_3d)
a[1][1][1][2][2] = a[1][1][1][2][3] = 0
out_cuda = torch.nn.functional.interpolate(a, scale_factor=2, mode=mode)
out_cpu = torch.nn.functional.interpolate(a.to('cpu'), scale_factor=2, mode=mode)
self.assertEqual(out_cpu, out_cuda.to('cpu'))
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a], check_fwd_over_rev=check_forward_ad)
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a.to('cuda')], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a.to('cuda')], check_fwd_over_rev=check_forward_ad)
helper(torch.contiguous_format, "nearest")
helper(torch.channels_last_3d, "nearest")
helper(torch.contiguous_format, "nearest-exact")
helper(torch.channels_last_3d, "nearest-exact")
def test_upsamplingNearest3d_correctness(self, device):
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize * isize, dtype=torch.float, device=device)
in_t = in_t.reshape(1, 1, isize, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize, osize), recompute_scale_factor=False, mode="nearest"
)
# compute expected output as OpenCV
expected_out = torch.zeros(1, 1, osize, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = o1 * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = o2 * scale
i2 = int(i2_f32)
for o3 in range(osize):
i3_f32 = o3 * scale
i3 = int(i3_f32)
expected_out[0, 0, o1, o2, o3] = in_t[0, 0, i1, i2, i3]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last_3d, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last_3d, 10, 15)
def test_upsamplingNearestExact3d_correctness(self, device):
# Here we check if output matches Scikit-Image/Scipy-like result
# Checks https://github.com/pytorch/pytorch/issues/34808
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize * isize, dtype=torch.float, device=device)
in_t = in_t.reshape(1, 1, isize, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize, osize), recompute_scale_factor=False, mode="nearest-exact"
)
# compute expected output as Scikit-Image/Scipy
expected_out = torch.zeros(1, 1, osize, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = (o1 + 0.5) * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = (o2 + 0.5) * scale
i2 = int(i2_f32)
for o3 in range(osize):
i3_f32 = (o3 + 0.5) * scale
i3 = int(i3_f32)
expected_out[0, 0, o1, o2, o3] = in_t[0, 0, i1, i2, i3]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last_3d, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last_3d, 10, 15)
@parametrize_test("antialias", [True, False])
@parametrize_test("align_corners", [True, False])
def test_upsamplingBilinear2d(self, device, antialias, align_corners):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
kwargs = dict(mode='bilinear', align_corners=align_corners, antialias=antialias)
for memory_format in [torch.contiguous_format, torch.channels_last]:
for scale_factor in [0.5, 1.5, 2]:
in_t = torch.ones(2, 3, 8, 8, device=device).contiguous(memory_format=memory_format).requires_grad_()
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
self.assertEqual(torch.ones(2, 3, out_size, out_size, device=device), out_t.data)
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
out_t.backward(torch.randn_like(out_t))
self.assertTrue(in_t.grad.is_contiguous(memory_format=memory_format))
if torch.device(device).type == 'cuda':
nondet_tol = 1e-5
else:
nondet_tol = 0.0
input = torch.randn(2, 3, 8, 8, device=device).contiguous(memory_format=memory_format).requires_grad_()
gradcheck(
lambda x: F.interpolate(x, out_size, **kwargs),
[input],
check_forward_ad=check_forward_ad, nondet_tol=nondet_tol
)
gradgradcheck(
lambda x: F.interpolate(x, out_size, **kwargs),
[input],
check_fwd_over_rev=check_forward_ad, nondet_tol=nondet_tol
)
if torch.device(device).type == 'cuda':
for shapes in [
(2, 2, 3, 4), (2, 3, 4, 5), (3, 1, 2, 2), (1, 5, 3, 2)
]:
a_cuda = torch.randn(
*shapes, device=device
).contiguous(memory_format=memory_format).requires_grad_()
a_cpu = a_cuda.detach().cpu().requires_grad_()
with warnings.catch_warnings(record=True):
out_cuda = F.interpolate(a_cuda, scale_factor=scale_factor, **kwargs)
out_cpu = F.interpolate(a_cpu, scale_factor=scale_factor, **kwargs)
self.assertEqual(out_cpu, out_cuda.cpu())
g_cuda = torch.randn_like(out_cuda)
g_cpu = g_cuda.cpu()
out_cuda.backward(g_cuda)
out_cpu.backward(g_cpu)
self.assertEqual(a_cuda.grad, a_cpu.grad)
@parametrize_test("memory_format", [torch.contiguous_format, torch.channels_last])
def test_upsamplingBilinear2d_aa_correctness(self, device, memory_format):
t_in = torch.arange(3 * 8 * 8, dtype=torch.float, device=device).reshape(1, 3, 8, 8)
t_in = t_in.contiguous(memory_format=memory_format)
expected_out = torch.tensor([
17.035713, 20.25, 42.75, 45.964287, 81.03572, 84.25,
106.75, 109.96428, 145.0357, 148.25, 170.75, 173.9643
], device=device, dtype=t_in.dtype).reshape(1, 3, 2, 2)
t_out = F.interpolate(t_in, size=(2, 2), mode="bilinear", align_corners=False, antialias=True)
self.assertEqual(expected_out, t_out)
@parametrize_test("antialias", [True, False])
@parametrize_test("align_corners", [True, False])
def test_upsamplingBicubic2d(self, device, antialias, align_corners):
kwargs = dict(mode='bicubic', align_corners=align_corners, antialias=antialias)
for scale_factor in [2, ]:
in_t = torch.ones(2, 3, 8, 8, device=device)
print("dtype: ", in_t.dtype)
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
print(out_t)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
expected_out = torch.ones(2, 3, out_size, out_size, device=device)
self.assertEqual(expected_out, out_t, atol=1e-5, rtol=0)
if torch.device(device).type == 'cuda':
nondet_tol = 1e-5
else:
nondet_tol = 0.0
inpt = torch.ones(2, 3, 8, 8, requires_grad=True, device=device)
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [inpt], nondet_tol=nondet_tol)
def test_upsamplingBicubic2d_correctness(self, device):
in_t = torch.arange(8., device=device).view(1, 2, 2, 2)
expected_out_t = torch.tensor(
[[[[-0.31641, 0.01562, 0.56250, 0.89453],
[0.34766, 0.67969, 1.22656, 1.55859],
[1.44141, 1.77344, 2.32031, 2.65234],
[2.10547, 2.43750, 2.98438, 3.31641]],
[[3.68359, 4.01562, 4.56250, 4.89453],
[4.34766, 4.67969, 5.22656, 5.55859],
[5.44141, 5.77344, 6.32031, 6.65234],
[6.10547, 6.43750, 6.98438, 7.31641]]]], device=device)
out_t = F.interpolate(in_t, scale_factor=2, mode='bicubic', align_corners=False)
torch.set_printoptions(precision=5)
self.assertEqual(out_t, expected_out_t, atol=1e-5, rtol=0)
@parametrize_test("memory_format", [torch.contiguous_format, torch.channels_last])
def test_upsamplingBicubic2d_aa_correctness(self, device, memory_format):
t_in = torch.arange(3 * 8 * 8, dtype=torch.float, device=device).reshape(1, 3, 8, 8)
t_in = t_in.contiguous(memory_format=memory_format)
expected_out = torch.tensor([
15.1205635, 18.760439, 44.23956, 47.879436, 79.12056, 82.76044,
108.23956, 111.87944, 143.12057, 146.76044, 172.23956, 175.87943
], device=device, dtype=t_in.dtype).reshape(1, 3, 2, 2)
t_out = F.interpolate(t_in, size=(2, 2), mode="bicubic", align_corners=False, antialias=True)
self.assertEqual(expected_out, t_out)
@dtypes(torch.float, torch.double)
def test_adaptive_pooling_max_nhwc(self, device, dtype):
def helper(n, c, h, w, output_height, output_width, contig):
input = torch.randint(1, 10, (n, c, h, w), device=device, dtype=dtype)
input = input.contiguous(memory_format=torch.channels_last)
grad = torch.randint(1, 10, (4, 8, output_height, output_width), device=device, dtype=dtype)
grad = grad.contiguous(memory_format=torch.channels_last)
if not contig:
input = input[:, ::2, :, :]
grad = grad[:, ::2, :, :]
input.requires_grad_(True)
pool = torch.nn.AdaptiveMaxPool2d((output_height, output_width), return_indices=True).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveMaxPool2d((output_height, output_width), return_indices=True).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
for contig in [True, False]:
helper(4, 8, 10, 10, 7, 7, contig)
helper(4, 8, 9, 14, 5, 8, contig)
helper(4, 8, 11, 11, 1, 1, contig)
def test_embedding_dense_grad(self, device):
embd = nn.Embedding(20, 20).to(device)
weight = embd.weight
def fn_wrapper(device):
def fn(weight):
inp = torch.tensor([[0, 1, 1, 2], [3, 5, 7, 11]], dtype=torch.long).to(device)
return torch.nn.functional.embedding(inp, weight)
return fn
fn = fn_wrapper(device)
_assertGradAndGradgradChecks(self, fn, (weight, ))
def test_embedding_scalar_weight_error(self, device):
indices = torch.rand(2, 2, device=device).long()
weights = [
torch.tensor(1.0, device=device),
torch.tensor(1.0, device=device).reshape(1, 1, 1),
]
for weight in weights:
with self.assertRaisesRegex(RuntimeError, "'weight' must be 2-D"):
torch.nn.functional.embedding(indices, weight)
@dtypesIfCUDA(torch.float16, torch.float64)
@dtypes(torch.float64)
def test_embedding_backward(self, device, dtype):
embedding = nn.Embedding(10, 3, sparse=True)
tensor = torch.tensor([[7, 1, 3]])
ones = torch.tensor(1., dtype=dtype).expand(3, 3)
tensorTwice = tensor.repeat(1, 2)
onesTwice = torch.cat((ones, ones))
embedding = embedding.to(dtype=dtype).to(device)
tensor = tensor.to(device)
ones = ones.to(device)
tensorTwice = tensorTwice.to(device)
onesTwice = onesTwice.to(device)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
self.assertEqual(embedding.weight.grad._indices(), tensor)
self.assertEqual(embedding.weight.grad._values(), ones)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
embedding(tensor[0]).sum().backward()
self.assertEqual(embedding.weight.grad._indices(), tensorTwice)
self.assertEqual(embedding.weight.grad._values(), onesTwice)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
tensor[0, 0] = 8
embedding(tensor[0]).sum().backward()
tensorTwice[0, 3] = 8
self.assertEqual(embedding.weight.grad._indices(), tensorTwice)
self.assertEqual(embedding.weight.grad._values(), onesTwice)
@dtypesIfCUDA(*((torch.float, torch.double, torch.bfloat16, torch.half)
if TEST_WITH_ROCM else (torch.float, torch.double, torch.half)))
@dtypes(torch.float32)
def test_embedding_padding_idx(self, device, dtype):
embedding = nn.Embedding(10, 20, padding_idx=0).to(device, dtype)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][0].sum(), 0)
self.assertEqual(output[1][2].sum(), 0)
embedding = nn.Embedding(10, 20, padding_idx=0, sparse=True).to(device, dtype)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][0].sum(), 0)
self.assertEqual(output[1][2].sum(), 0)
embedding = nn.Embedding(10, 20, padding_idx=-2).to(device, dtype)
input = torch.tensor([[0, 2, 8, 5], [4, 8, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][2].sum(), 0)
self.assertEqual(output[1][1].sum(), 0)
embedding = nn.Embedding(10, 20, padding_idx=-2, sparse=True).to(device, dtype)
input = torch.tensor([[0, 2, 8, 5], [4, 8, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][2].sum(), 0)
self.assertEqual(output[1][1].sum(), 0)
padding_vector = torch.ones(20, dtype=dtype, device=device)
embedding = nn.Embedding(10, 20, padding_idx=2, sparse=True).to(device, dtype)
with torch.no_grad():
embedding.weight[2] = padding_vector
input = torch.tensor([0, 2], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[1], padding_vector)
self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=25)
self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=-25)
padding_idx = 0
embedding = nn.Embedding(5, 2, padding_idx=padding_idx).to(device, dtype)
for n in (1, 2, 1000):
for other_indices in ([], [1, 3], [2]):
indices = torch.tensor(other_indices + [padding_idx] * n, dtype=torch.long).to(device)
pre = embedding.weight[padding_idx].clone()
embedding(indices).sum().backward()
after = (embedding.weight + embedding.weight.grad)[padding_idx]
embedding.zero_grad()
self.assertEqual(after, pre)
emb_sum = embedding(indices).sum()
emb_grad = torch.autograd.grad(outputs=emb_sum, inputs=list(embedding.parameters()), retain_graph=True)
scalar = emb_grad[0].sum() + emb_sum
scalar.backward()
after = (embedding.weight + embedding.weight.grad)[padding_idx]
embedding.zero_grad()
self.assertEqual(after, pre)
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
@dtypesIfCUDA(torch.half, torch.bfloat16)
def test_embedding_bag_1D_padding_idx(self, device, dtype):
num_features = 3
max_indices_per_bag = 10
num_bags = 10
num_words = 100
def gen_1D_indices_offsets(include_last_offset, allpad):
indices = []
offsets = []
cur_offset = 0
empty_bag = random.randint(0, num_bags - 1)
full_bag = empty_bag
while full_bag == empty_bag:
full_bag = random.randint(0, num_bags - 1)
for bag in range(num_bags):
offsets.append(cur_offset)
if bag == full_bag:
bag_size = max_indices_per_bag
elif bag == empty_bag:
bag_size = 0
else:
bag_size = random.randint(1, max_indices_per_bag - 1)
indices += [1 if allpad else random.randint(0, num_words - 1) for _ in range(bag_size)]
cur_offset += bag_size
assert offsets[0] == 0
indices = torch.tensor(indices, device=device)
if include_last_offset:
offsets.append(indices.size(0))
offsets = torch.tensor(offsets, device=device)
return indices, offsets
def gen_2D_indices_from_1D(indices_1D, offsets, include_last_offset, padding_idx):
assert offsets[0] == 0
if include_last_offset:
offsets = offsets[:-1]
indices_2D = torch.empty(num_bags, max_indices_per_bag, device=device, dtype=torch.long)
for bag in range(num_bags):
start = offsets[bag]
end = len(indices_1D) if bag + 1 == num_bags else offsets[bag + 1]
end = min(len(indices_1D), end)
# remaining space with padding indices
indices_in_bag = []
for item_pos in range(0, max_indices_per_bag):
if (start + item_pos) < end:
indices_in_bag.append(indices_1D[start + item_pos])
else:
indices_in_bag.append(padding_idx)
indices_2D[bag] = torch.tensor(indices_in_bag, device=device)
return indices_2D
test_cases = product(['max', 'mean', 'sum'], [False, True], [False, True], [False, True])
for mode, sparse, include_last_offset, allpad in test_cases:
# Max sparse and bfloat16 are not supported
if mode == 'max':
if sparse or (dtype == torch.bfloat16):
continue
indices_1D, offsets = gen_1D_indices_offsets(include_last_offset, allpad)
for padding_idx_1D in list(set(indices_1D.tolist())) + [None]:
msg = (
f"mode: '{mode}', sparse: {sparse}, include_last_offset: {include_last_offset}, "
f"padding_idx_1D: {padding_idx_1D}")
# If 1D input does not use a padding index, we still need one for the 2D input,
# so we can add one dummy word to the weights to act as the padded word
padding_idx_2D = padding_idx_1D if padding_idx_1D is not None else num_words
num_words_with_padding = num_words if padding_idx_1D is not None else num_words + 1
indices_2D = gen_2D_indices_from_1D(
indices_1D,
offsets,
include_last_offset,
padding_idx_2D)
weights = torch.randn(
num_words_with_padding,
num_features,
dtype=dtype,
device=device,
requires_grad=True)
weights_check = weights.clone().detach().requires_grad_(True)
bag = torch.nn.functional.embedding_bag(
indices_1D,
weights,
offsets,
padding_idx=padding_idx_1D,
mode=mode,
sparse=sparse,
include_last_offset=include_last_offset)
bag_check = torch.nn.functional.embedding_bag(
indices_2D,
weights_check,
padding_idx=padding_idx_2D,
mode=mode,
sparse=sparse)
self.assertEqual(bag, bag_check, msg=msg)
bag.sum().backward()
bag_check.sum().backward()
# Sometimes, half dtype gradients mismatch by a greater amount
# than other dtypes
if dtype in [torch.half, torch.bfloat16]:
atol = 0.01
rtol = 0.01
else:
atol = None
rtol = None
self.assertEqual(weights.grad, weights_check.grad, msg=msg, atol=atol, rtol=rtol)
# Check correctness of torch.nn.functional.embedding_bag forward and
# backward functions with padding_idx, given a 2D indices input. Compare
# against torch.nn.functional.embedding followed by a reduction.
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
@dtypesIfCUDA(torch.half, torch.bfloat16)
def test_embedding_bag_2D_padding_idx(self, device, dtype):
# Use a Python implementation of embedding_bag with padding_idx support
# to check torch.nn.functional.embedding_bag correctness
def embedding_bag_check(indices, weights, mode, sparse, padding_idx):
assert padding_idx is not None
embedding = torch.nn.functional.embedding(
indices,
weights,
padding_idx=padding_idx,
sparse=sparse)
reduction_dim = indices.dim() - 1
if mode == 'sum' or mode == 'mean':
# We must avoid including elements at padding_idx in the
# sum/mean, so multiply those elements by 0, and multiply
# all other elements by 1
per_sample_weights = indices.ne(padding_idx).to(dtype).unsqueeze(-1)
res = embedding.mul(per_sample_weights).sum(dim=reduction_dim)
if mode == 'mean':
weights_sum = per_sample_weights.sum(dim=reduction_dim)
res = res.div(weights_sum)
elif mode == 'max':
# We must avoid allowing elements at padding_idx to be chosen
# as the max, so set those elements to negative infinity
res = embedding.masked_fill(
indices.unsqueeze(-1) == padding_idx, -float('inf')
).amax(dim=reduction_dim)
else:
raise RuntimeError(f"mode '{mode}' is not available")
# If a row is all padding, set its corresponding result row to 0.
# This is needed because the above mean and max mode
# implementations set these elements to nan and -inf, respectively
if mode in ['mean', 'max']:
res = res.masked_fill(
indices.eq(padding_idx).all(dim=-1).unsqueeze(-1),
0)
return res
num_features = 3
num_words = 10
indices_dim1 = 10
for mode, sparse, allpad, indices_dim0 in product(['max', 'mean', 'sum'], [False, True], [False, True], [1, 10]):
# Max sparse and bfloat16 are not supported
if mode == 'max':
if sparse or (dtype == torch.bfloat16):
continue
if allpad:
indices = torch.empty(indices_dim0, indices_dim1, dtype=torch.long, device=device).fill_(1)
else:
indices = torch.randint(0, num_words, (indices_dim0, indices_dim1), device=device)
if indices_dim0 > 1:
# Fill one row with duplicate index so we can test with a fully
# padded row
duplicate_row = random.randint(0, indices_dim0 - 1)
indices[duplicate_row] = indices[duplicate_row][0]
for padding_idx in list(set(indices.flatten(0, -1).tolist())):
weights = torch.randn(num_words, num_features, dtype=dtype, device=device, requires_grad=True)
weights_check = weights.clone().detach().requires_grad_(True)
msg = (
f"mode: '{mode}', sparse: {sparse}, padding_idx: {padding_idx}, "
f"allpad: {allpad}, indices.size(): {indices.size()}")
# Check forward with a Python implementation of padding_idx embedding_bag
bag_check = embedding_bag_check(
indices,
weights_check,
mode,
sparse,
padding_idx)
bag = torch.nn.functional.embedding_bag(
indices,
weights,
padding_idx=padding_idx,
mode=mode,
sparse=sparse)
self.assertEqual(bag, bag_check, msg=msg)
bag_check.sum().backward()
grad_check = weights_check.grad
bag.sum().backward()
grad = weights.grad
# Sometimes, half dtype gradients mismatch by a greater amount
# than other dtypes
if dtype in [torch.half, torch.bfloat16]:
atol = 0.01
rtol = 0.01
else:
atol = None
rtol = None
self.assertEqual(grad, grad_check, msg=msg, atol=atol, rtol=rtol)
def test_masked_softmax(self, device):
sizes = [(1, 1, 32), (3, 16, 310), (12, 4, 1024), (4, 2, 1200)]
for (B, num_heads, L) in sizes:
input = torch.randn((B, num_heads, L, L))
mask = torch.randint(0, 2, (B, L))
if (self.device_type == "cuda"):
input = input.cuda()
mask = mask.cuda()
mask = mask.reshape(B, 1, 1, L).expand(B, num_heads, L, L).bool()
native_res = torch._masked_softmax(input, mask)
mask = mask.float()
def slow_masked_softmax(input, mask):
exp = torch.exp(input)
exp = exp * mask
s = exp.sum(dim=3, keepdim=True).expand(exp.size())
return exp / s
pt_res = slow_masked_softmax(input, mask)
self.assertEqual(pt_res, native_res, exact_dtype=True)
@onlyCUDA
def test_masked_softmax_transformer_layout(self, device):
B = 211
num_heads = 16
L = 42
input = torch.randn((B, num_heads, L, L))
mask = torch.randint(0, 2, (B, L))
if (self.device_type == "cuda"):
input = input.cuda()
mask = mask.cuda()
mask = mask.bool()
native_res = torch._masked_softmax(input, mask)
mask = mask.reshape(B, 1, 1, L).expand(B, num_heads, L, L)
mask = mask.float()
def slow_masked_softmax(input, mask):
exp = torch.exp(input)
exp = exp * mask
s = exp.sum(dim=3, keepdim=True).expand(exp.size())
return exp / s
pt_res = slow_masked_softmax(input, mask)
self.assertEqual(pt_res, native_res, exact_dtype=True)
# Test fails on Vg20
@skipCUDAIfRocm
@dtypesIfCUDA(torch.half, torch.float)
@dtypes(torch.float)
def test_softmax_results(self, device, dtype):
# Non-even sizes and non-zero shifts test fallback paths in vectorized kernel
# Note: dim1 > 1024 is needed to exercise the vectorized (non-persistent) path, (16, 30576) is BERT-esque
sizes = [(0, 10), (32, 20), (10, 0), (31, 20), (32, 21), (31, 23), (32, 1536), (31, 2048), (33, 2049), (16, 30576)]
shifts = [(0, 0), (1, 0), (0, 1), (1, 1)]
for fn in [F.softmax, F.log_softmax]:
for size in sizes:
for shift in shifts:
input = torch.rand(size, device=device, dtype=dtype)
# Note: With the largest tests we can hit upper limit of fp16 when we
# sum, so scale the input down to stay in a nicer range.
if dtype == torch.float16:
input = input / 100.
input = input[shift[0]:, shift[1]:]
# Note; Don't want to bprop back through slice op
input = input.detach().requires_grad_(True)
ref_input = input.clone().cpu().detach().requires_grad_(True)
for dim in [0, 1]:
ref_output = fn(ref_input, dtype=torch.float, dim=dim)
output = fn(input, dtype=torch.float, dim=dim)
grad_output = torch.rand(size, device=device, dtype=dtype)
grad_output = grad_output[shift[0]:, shift[1]:]
ref_grad_output = grad_output.clone().cpu().detach()
grad_input, = torch.autograd.grad(output, input, grad_outputs=(grad_output), create_graph=True)
ref_grad_input, = torch.autograd.grad(ref_output, ref_input,
grad_outputs=(ref_grad_output), create_graph=True)
grad_input.sum().backward()
ref_grad_input.sum().backward()
self.assertEqual(output, ref_output)
self.assertEqual(grad_input, ref_grad_input)
self.assertEqual(input.grad, ref_input.grad)
@onlyCUDA
@dtypes(torch.float, torch.half)
@largeTensorTest("20GB")
@largeTensorTest("90GB", "cpu")
@precisionOverride({torch.half: 0.001})
def test_softmax_64bit_indexing(self, device, dtype):
def run_test(*shape):
x = torch.randn(shape, device="cuda", dtype=torch.float16, requires_grad=True)
y = F.log_softmax(x, dim=-1, dtype=dtype)
y.backward(y)
with torch.no_grad():
xx = x.cpu().requires_grad_()
yy = F.log_softmax(xx.float(), dim=-1).to(dtype)
yy.backward(yy)
self.assertEqual(y, yy)
self.assertEqual(x.grad, xx.grad)
run_test(1100000000, 2)
run_test(2200000000, 1)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.half)
def test_log_softmax_big(self, device, dtype):
def _test_helper(shape):
x_small = torch.randint(100, shape, dtype=dtype, device=device)
offset = 1.5e3 if dtype == torch.half else 1e7
x_big = x_small + offset
self.assertEqual(F.log_softmax(x_small, -1), F.log_softmax(x_big, -1))
_test_helper((16, 4))
if self.device_type == 'cuda':
_test_helper((4, 1536))
@onlyCUDA
@largeTensorTest('12GB')
def test_conv_large_nosplit(self, device):
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv1 = nn.Conv2d(2, 2, 8, 8).to(device).to(dtype)
input_large = torch.randn(1, 2, 1024, 1024 * 1024, dtype=dtype, device=device)
conv1(input_large)
conv2 = torch.nn.Conv2d(1, 1024, 1, 1).to(device).to(dtype)
input_large = torch.randn(1, 1, 2048, 1024 , dtype=dtype, device=device)
conv2(input_large)
def test_conv_noncontig_weights(self, device):
for dim in (1, 2, 3):
for grouped in (False, True):
nc = 3
groups = 3 if grouped else 1
w = torch.randn([3] * dim, device=device)
w = w.expand([nc, int(nc / groups)] + list(w.shape))
w = w.detach().requires_grad_()
x = torch.randn([1, nc] + ([5] * dim), device=device, requires_grad=True)
y = getattr(F, 'conv{}d'.format(dim))(x, w, groups=groups)
y.sum().backward()
y = getattr(F, 'conv_transpose{}d'.format(dim))(x, w, groups=groups)
y.sum().backward()
def test_conv_noncontig_weights_and_bias(self, device):
for bias in [True, False]:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=bias).to(device, torch.float)
input_nc = torch.randn((1, 3, 224, 224, 2), device=device, dtype=torch.float)[:, :, :, :, 1]
input_c = input_nc.contiguous()
weight_nc = torch.randn((64, 3, 7, 7, 2), device=device, dtype=torch.float)[:, :, :, :, 1]
conv1.weight = nn.Parameter(weight_nc)
weight_c = conv1.weight.contiguous()
if bias:
bias_nc = torch.randn((64, 2), device=device, dtype=torch.float)[:, 1]
conv1.bias = nn.Parameter(bias_nc)
bias_c = conv1.bias.contiguous()
out1 = conv1(input_nc)
conv1.weight = nn.Parameter(weight_c)
if bias:
conv1.bias = nn.Parameter(bias_c)
out2 = conv1(input_c)
self.assertEqual(out1, out2)
def test_save_lstm_compatibility(self, device):
model = nn.LSTM(2, 3)
x = torch.randn(32, 5, 2)
expected = model(x)
assert model.proj_size == 0
state_dict = model.__dict__
del state_dict['proj_size']
# load a model
loaded_model = nn.LSTM(2, 3)
loaded_model.__setstate__(state_dict)
result = loaded_model(x)
self.assertEqual(result, expected)
@onlyCUDA
@tf32_on_and_off(0.005)
def test_grid_sample_large(self, device):
def issue_35202():
input_tensor = torch.rand(1, 1, 480, 640, dtype=torch.float, device=device, requires_grad=True)
coords = torch.tensor([[-10059144, 67680944], [67680944, 67680944]], dtype=torch.float, device=device)
coords = coords.unsqueeze(0).unsqueeze(0).repeat(1, 1, 1, 1)
result = torch.nn.functional.grid_sample(input_tensor, coords)
self.assertEqual(result, torch.tensor([[[[0., 0.]]]], dtype=torch.float, device=device))
result.backward(torch.ones_like(result))
torch.cuda.synchronize()
issue_35202()
def issue_24823_1(dtype):
image = torch.arange(27, 0, -1, dtype=dtype, device=device).view(1, 1, 3, 3, 3)
image.requires_grad_()
grid = torch.nn.functional.affine_grid(
torch.tensor([[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]], dtype=dtype, device=device),
(1, 1, 3, 3, 3))
grid[:, 1, 1, 1, 0] = float('inf')
result = torch.nn.functional.grid_sample(image, grid, padding_mode='zeros')
self.assertEqual(result, torch.tensor([[[[[27., 26., 25.], [24., 23., 22.], [21., 20., 19.]],
[[18., 17., 16.], [15., 0., 13.], [12., 11., 10.]],
[[9., 8., 7.], [6., 5., 4.], [3., 2., 1.]]]]],
device=device, dtype=dtype))
result.backward(torch.ones_like(result))
expected_grad = torch.ones_like(image)
expected_grad[0, 0, 1, 1, 1] = 0
self.assertEqual(image.grad, expected_grad, atol=0.005, rtol=0)
issue_24823_1(torch.half)
issue_24823_1(torch.float)
issue_24823_1(torch.double)
def issue_24823_2():
param = torch.tensor([[[-1.0e+20, 0.0, 0.0], [0.0, -1.0e+20, 0.0]]], dtype=torch.float, device=device)
img = torch.zeros((1, 1, 4, 4), dtype=torch.float, device=device, requires_grad=True)
grid = torch.nn.functional.affine_grid(param, img.size())
result = torch.nn.functional.grid_sample(img, grid)
self.assertEqual(result, torch.zeros(1, 1, 4, 4, device=device, dtype=torch.float))
result.backward(torch.ones_like(result))
torch.cuda.synchronize()
issue_24823_2()
@dtypes(torch.float, torch.double)
@largeTensorTest(lambda self, device, dtype:
# Compute sum of the large tensor sizes:
# (im.numel() + small_image.numel() + small_image.grad.numel() +
# large_view.grad.numel()) * sizeof(dtype)
32769 * (65536 + 3 * 65536 / 128) *
torch.tensor([], dtype=dtype).element_size())
def test_grid_sample_large_index_2d(self, device, dtype):
# Test 64-bit indexing with grid_sample (gh-41656)
# Try accessing the corners, there should be no segfault
coords = torch.tensor([[[-1., -1.],
[+1., -1.]],
[[-1., +1.],
[+1., +1.]]], device=device, dtype=dtype)
coords = coords.expand(1, 2, 2, 2)
im = torch.zeros([1, 1, 32769, 65536], device=device, dtype=dtype)
# Compare sampling with large strides to the same op on a contiguous tensor
coords = torch.rand(1, 4, 4, 2, device=device, dtype=dtype)
large_view = im[..., 127::128]
small_image = torch.rand_like(large_view)
large_view[...] = small_image
large_view.requires_grad, small_image.requires_grad = True, True
self.assertTrue(
sum(i * s for i, s in zip(large_view.size(), large_view.stride())) >= 2 ** 31,
msg="View must use 64-bit indexing")
for mode, padding_mode, align_corners in itertools.product(
('nearest', 'bilinear', 'bicubic'), ('zeros', 'border', 'reflection'), (True, False)):
a = F.grid_sample(
small_image, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
a.sum().backward()
b = F.grid_sample(
large_view, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
b.sum().backward()
self.assertEqual(a, b)
self.assertEqual(small_image.grad, large_view.grad)
small_image.grad.zero_()
large_view.grad.zero_()
@dtypes(torch.float, torch.double)
@largeTensorTest(lambda self, device, dtype:
# Compute sum of the large tensor sizes:
# (im.numel() + small_image.numel() + small_image.grad.numel() +
# large_view.grad.numel()) * sizeof(dtype)
2 * 32769 * (32768 + 3 * 32768 / 128) *
torch.tensor([], dtype=dtype).element_size())
def test_grid_sample_large_index_3d(self, device, dtype):
# Test 64-bit indexing with grid_sample (gh-41656)
# Try accessing the corners, there should be no segfault
coords = torch.full((1, 2, 2, 2, 3), 1., device=device, dtype=dtype)
im = torch.zeros([1, 1, 2, 32769, 32768], device=device, dtype=dtype)
result = F.grid_sample(im, coords, align_corners=False)
self.assertEqual(result, torch.zeros((1, 1, 2, 2, 2), device=device, dtype=dtype))
# Compare sampling with large strides to the same op on a contiguous tensor
coords = torch.rand(1, 1, 4, 4, 3, device=device, dtype=dtype)
large_view = im[..., 127::128]
small_image = torch.rand_like(large_view)
large_view[...] = small_image
small_image.requires_grad, large_view.requires_grad = True, True
self.assertTrue(
sum(i * s for i, s in zip(large_view.size(), large_view.stride())) >= 2 ** 31,
msg="View must use 64-bit indexing")
for mode, padding_mode, align_corners in itertools.product(
('nearest', 'bilinear'), ('zeros', 'border', 'reflection'), (True, False)):
a = F.grid_sample(
small_image, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
a.sum().backward()
b = F.grid_sample(
large_view, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
b.sum().backward()
self.assertEqual(a, b)
self.assertEqual(small_image.grad, large_view.grad)
small_image.grad.zero_()
large_view.grad.zero_()
@onlyCUDA
@largeTensorTest('12GB')
def test_conv_transposed_large(self, device):
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv = nn.ConvTranspose2d(1, 1, 1, 1, bias=False).to(device).to(dtype)
input_large = torch.randn(4096, 1, 512, 1024, dtype=dtype, device=device)
# forward
ret = conv(input_large)
maxdiff0 = (ret.narrow(0, 0, 1024) - conv(input_large.narrow(0, 0, 1024))).abs_().max().item()
maxdiff1 = (ret.narrow(0, 1024, 1024) - conv(input_large.narrow(0, 1024, 1024))).abs_().max().item()
maxdiff2 = (ret.narrow(0, 2048, 1024) - conv(input_large.narrow(0, 2048, 1024))).abs_().max().item()
maxdiff3 = (ret.narrow(0, 3072, 1024) - conv(input_large.narrow(0, 3072, 1024))).abs_().max().item()
self.assertEqual(maxdiff0, 0)
self.assertEqual(maxdiff1, 0)
self.assertEqual(maxdiff2, 0)
self.assertEqual(maxdiff3, 0)
@onlyCUDA
@skipCUDAIfRocm
@largeTensorTest('12GB')
def test_conv_large(self, device):
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv = nn.Conv2d(2, 2, 8, 8, bias=False).to(device).to(dtype)
conv.weight = torch.nn.Parameter(torch.randn(2, 2, 8, 8, device=device, dtype=dtype) / 64)
input_large = torch.randn(4097, 2, 512, 512, dtype=dtype, device=device)
# forward
ret = conv(input_large)
self.assertEqual(ret[:2048], conv(input_large[:2048]))
self.assertEqual(ret[2048:4096], conv(input_large[2048:4096]))
self.assertEqual(ret[4096:], conv(input_large[4096:]))
# backward
conv.zero_grad()
# When computing the backward, we are using the `max(dim=1)`` to create
# some sparsity. Without this sparsity, the rounding error would be
# too large (as large as 1e-5) to satisfy the creterion (1e-6) of `assertEqual`
ret.view(4097, -1).max(dim=1).values.sum().backward()
del ret
grad1 = conv.weight.grad.detach().clone()
conv.zero_grad()
conv(input_large[:2048]).view(2048, -1).max(dim=1).values.sum().backward()
conv(input_large[2048:4096]).view(2048, -1).max(dim=1).values.sum().backward()
conv(input_large[4096:]).view(1, -1).max(dim=1).values.sum().backward()
grad2 = conv.weight.grad.detach().clone()
# gradients are at the order of hundreds, we need to scale it to
# the order of one so that we can compare
scale = 1 / grad2.abs().mean()
grad1 = grad1 * scale
grad2 = grad2 * scale
self.assertEqual(grad1, grad2, atol=5e-2, rtol=5e-3)
def _test_gumbel_softmax_st_shapes(self, device, dtype, shape, dim, count_expected):
logits = torch.randn(shape, dtype=torch.float, device=device)
logits = logits.to(dtype)
y_draw = F.gumbel_softmax(logits, hard=True, dim=dim)
# All values positive
self.assertGreaterEqual(y_draw.min(), 0)
# Shape unchanged
self.assertTrue(y_draw.shape == logits.shape)
# One choice per draw
self.assertEqual(y_draw.sum(), count_expected, atol=torch.finfo(y_draw.dtype).eps, rtol=0)
def _test_gumbel_softmax_straight_through(self, device, dtype):
num_draws = 100
logits = torch.tensor([[0.2, 0.8, 0.1]], device=device)
logits = logits.reshape([1, 3])
logits = logits.to(dtype).requires_grad_()
probs = logits.softmax(dim=-1)
counts = torch.zeros_like(logits)
for _ in range(num_draws):
y_draw = F.gumbel_softmax(logits, hard=True)
counts = counts + y_draw
# All values positive
self.assertGreaterEqual(y_draw.min(), 0)
# Each experiment should result in 1 draw.
self.assertEqual(counts.sum(), num_draws, atol=torch.finfo(counts.dtype).eps, rtol=0)
# check results is asymptotically as expected.
expected = probs * num_draws
# ~z is approximately N(0,1) for unbiased count
z = (counts - expected) / (expected * (1 - probs)).sqrt()
# A (lazy) approximate 99% two-sided test:
# occurs with prob alpha~>=0.01 if unbiased
self.assertLess(z.abs().max().item(), 2.58)
def _test_gumbel_softmax_grad(self, device, dtype):
# "hard" and "not hard" should propagate same gradient.
logits_soft = torch.zeros(10, 10, dtype=dtype, device=device, requires_grad=True)
logits_hard = torch.zeros(10, 10, dtype=dtype, device=device, requires_grad=True)
seed = torch.random.get_rng_state()
y_soft = F.gumbel_softmax(logits_soft, hard=False)
torch.random.set_rng_state(seed)
y_hard = F.gumbel_softmax(logits_hard, hard=True)
y_soft.sum().backward()
y_hard.sum().backward()
# 2eps = 1x addition + 1x subtraction.
tol = 2 * torch.finfo(dtype).eps
self.assertEqual(logits_soft.grad, logits_hard.grad, atol=tol, rtol=0)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_gumbel_softmax(self, device, dtype):
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5], dim=0, count_expected=1)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5], dim=-1, count_expected=1)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4], dim=1, count_expected=5)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4, 3], dim=1, count_expected=5 * 3)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4, 3], dim=-1, count_expected=5 * 4)
self._test_gumbel_softmax_straight_through(device, dtype)
self._test_gumbel_softmax_grad(device, dtype)
def _test_rnn_retain_variables(self, device, dtype):
rnns = [nn.LSTM(10, 20, num_layers=2).to(device, dtype),
nn.GRU(10, 20, num_layers=2).to(device, dtype),
nn.RNN(10, 20, num_layers=2).to(device, dtype)]
for rnn in rnns:
input = torch.randn(5, 6, 10, device=device, dtype=dtype, requires_grad=True)
output = rnn(input)
output[0].sum().backward(retain_graph=True)
grads = [input.grad.data.clone()] + [p.grad.data.clone() for p in rnn.parameters()]
for _ in range(4):
rnn.zero_grad()
input.grad.data.zero_()
output[0].sum().backward(retain_graph=True)
grads2 = [input.grad.data] + [p.grad.data for p in rnn.parameters()]
self.assertEqual(grads, grads2)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.double)
def test_rnn_retain_variables(self, device, dtype):
self._test_rnn_retain_variables(device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_rnn_retain_variables(device, dtype)
@onlyCUDA
@dtypes(torch.double)
def test_lstmcell_backward_only_one_output_grad(self, device, dtype):
# checks that undefined gradients doen't hamper the backward
l = torch.nn.LSTMCell(2, 3).to(device).to(dtype=dtype)
s = torch.randn(1, 2, device=device, dtype=dtype, requires_grad=True)
for i in range(2):
out = l(s)[i]
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
def _test_rnn_mod(self, mod, inp):
def flatten_out(mod, inp):
out = mod(inp)
return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])
gradcheckfunc = partial(flatten_out, mod)
with torch.backends.cudnn.flags(enabled=False):
gradcheck(gradcheckfunc, inp, check_batched_grad=False)
gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)
if inp.is_cuda and not TEST_WITH_ROCM:
with torch.backends.cudnn.flags(enabled=True):
result = gradcheckfunc(inp)
result[0].sum().backward(create_graph=True)
grad0 = next(mod.parameters()).grad
with self.assertRaisesRegex(RuntimeError,
"please disable the CuDNN backend temporarily"):
grad0.sum().backward()
for param in mod.parameters():
param.grad = None
inp.grad = None
@skipMeta
@dtypes(torch.double)
def test_LSTM_grad_and_gradgrad(self, device, dtype):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=dtype, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(dtype)
self._test_rnn_mod(mod, inp)
@skipMeta
@dtypes(torch.double)
def test_GRU_grad_and_gradgrad(self, device, dtype):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=dtype, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(dtype)
self._test_rnn_mod(mod, inp)
@onlyCUDA
def test_upsamplingNearest1d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@onlyCUDA
def test_upsamplingNearest2d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@onlyCUDA
def test_upsamplingNearest3d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@unittest.expectedFailure
@skipIfRocm
@onlyCUDA
def test_upsamplingNearest2d_launch_fail(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(1, 1, 2**15, 2**8, device=device)
out = m(inp)
@onlyCUDA
@skipCUDAIfNotRocm
def test_upsamplingNearest2d_launch_rocm(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(1, 1, 2**15, 2**8, device=device)
out = m(inp)
@onlyCUDA
@skipCUDAIfCudnnVersionLessThan(7600)
def test_CTCLoss_cudnn(self, device):
def _helper(zero_infinity):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device=device).log_softmax(2).requires_grad_()
log_probs_ref = log_probs.detach().clone().requires_grad_()
with torch.backends.cudnn.flags(enabled=True):
res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, zero_infinity=zero_infinity)
res.backward()
expected = ctcloss_reference(log_probs, targets.cuda(), input_lengths, target_lengths).float()
with torch.backends.cudnn.flags(enabled=False):
res2 = torch.nn.functional.ctc_loss(log_probs_ref, targets.cuda().long(), input_lengths, target_lengths,
zero_infinity=zero_infinity)
res2.backward()
self.assertEqual(res, expected)
self.assertEqual(res2, res)
self.assertEqual(log_probs.grad, log_probs_ref.grad)
_helper(zero_infinity=True)
_helper(zero_infinity=False)
def _CTCLoss_gen_losses(self, device, input_length, vocab_size, target_length, reduction, use_module_form):
batch_size = 1
log_probs = torch.randn(input_length, batch_size, vocab_size, dtype=torch.float, device=device) \
.log_softmax(2).requires_grad_()
targets = torch.randint(low=1, high=vocab_size - 1, size=(batch_size, target_length),
dtype=torch.int, device=device)
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
log_probs_no_bd = log_probs.squeeze(1).detach().clone().requires_grad_()
targets_no_bd = targets.squeeze(0).detach().clone()
input_lengths_no_bd = torch.tensor(input_length)
target_lengths_no_bd = torch.tensor(target_length)
log_probs_refs = [log_probs.detach().clone().requires_grad_() for _ in range(2)]
log_probs_no_bd_refs = [log_probs_no_bd.detach().clone().requires_grad_() for _ in range(1)]
losses = []
losses_no_bd = []
has_cuda = torch.cuda.is_available()
has_cudnn = has_cuda and 'cuda' in device and self.has_cudnn()
if has_cuda and has_cudnn:
targets = targets.cpu()
targets_no_bd = targets_no_bd.cpu()
ctc_loss = (
nn.CTCLoss(reduction=reduction, zero_infinity=True)
if use_module_form
else partial(torch.nn.functional.ctc_loss, reduction=reduction, zero_infinity=True)
)
with torch.backends.cudnn.flags(enabled=has_cudnn):
losses.append(ctc_loss(log_probs_refs[0], targets, input_lengths, target_lengths))
losses.append(ctc_loss(log_probs_refs[1], targets_no_bd, input_lengths, target_lengths))
losses_no_bd.append(ctc_loss(log_probs_no_bd_refs[0], targets_no_bd,
input_lengths_no_bd, target_lengths_no_bd))
for loss in losses + losses_no_bd:
loss.backward()
return losses, losses_no_bd, log_probs_refs, log_probs_no_bd_refs
def _assertEqual_list(self, expected, list_to_compare, atol=None, rtol=None):
for ele in list_to_compare:
self.assertEqual(expected, ele, atol=atol, rtol=rtol)
@parametrize_test("reduction", ['none', 'mean', 'sum'])
@parametrize_test("use_module_form", [True, False])
def test_CTCLoss_no_batch_dim(self, device, reduction, use_module_form):
input_length = 40
vocab_size = 3
target_length = 12
args = self._CTCLoss_gen_losses(device, input_length, vocab_size, target_length, reduction, use_module_form)
losses, losses_no_bd, log_probs_refs, log_probs_no_bd_refs = args
self._assertEqual_list(losses[0], losses[1:], atol=1e-4, rtol=0)
self._assertEqual_list(losses[0].squeeze(0), losses_no_bd, atol=1e-4, rtol=0)
self._assertEqual_list(log_probs_refs[0].grad, [t.grad for t in log_probs_refs[1:]], atol=1e-4, rtol=0)
self._assertEqual_list(
log_probs_refs[0].grad.squeeze(1),
[t.grad for t in log_probs_no_bd_refs],
atol=1e-4,
rtol=0,
)
# batch dim case should be (N,). no batch dim case should be ()
self._assertEqual_list((1,) if reduction == 'none' else (), [loss.shape for loss in losses])
self._assertEqual_list((), [loss.shape for loss in losses_no_bd])
# checking the gradient's shape
self._assertEqual_list((input_length, 1, vocab_size), [t.grad.shape for t in log_probs_refs])
self._assertEqual_list((input_length, vocab_size), [t.grad.shape for t in log_probs_no_bd_refs])
@onlyCUDA
@skipCUDAIfNoCudnn
def test_contig_wrong_stride_cudnn(self, device):
x = torch.randn(1, 16, 5, 5, device=device)
stride = list(x.stride())
stride[0] = 20
x.set_(x.storage(), 0, x.size(), stride)
self.assertTrue(x.is_contiguous())
F.conv_transpose2d(x, torch.randn(16, 1, 1, 1, device=device))
F.conv2d(x, torch.randn(1, 16, 1, 1, device=device))
@onlyCUDA
def test_Conv2d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 5, 5)
conv_cpu = torch.nn.Conv2d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.Conv2d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
@onlyCUDA
def test_ConvTranspose2d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 5, 5)
conv_cpu = torch.nn.ConvTranspose2d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.ConvTranspose2d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
@onlyCUDA
def test_ConvTranspose3d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 3, 5, 5)
conv_cpu = torch.nn.ConvTranspose3d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.ConvTranspose3d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
def _ordered_sequence(self, device, dtype):
seqs = [torch.empty(random.randint(1, 6), device=device, dtype=dtype)
for _ in range(5)]
seqs = [s.random_(-128, 128) for s in seqs]
ordered = sorted(seqs, key=len, reverse=True)
return ordered
def _padded_sequence(self, device, dtype):
ordered = self._ordered_sequence(device, dtype)
lengths = [len(i) for i in ordered]
padded_tensor = rnn_utils.pad_sequence(ordered)
return padded_tensor, lengths
@onlyCUDA
def test_device_mask(self, device):
for enforce_sorted in [True, False]:
padded, lengths = self._padded_sequence('cpu', torch.float)
packed = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted)
self.assertFalse(packed.is_cuda)
packed = packed.to(device)
self.assertTrue(packed.is_cuda)
unpacked, _ = rnn_utils.pad_packed_sequence(packed)
self.assertTrue(unpacked.is_cuda)
self.assertEqual(unpacked.dtype, torch.float)
@onlyCUDA
def test_overwrite_module_params_on_conversion_cpu_device(self, device):
# its base variable after converting the module to a different device.
m = nn.Linear(20, 10)
mw = m.weight[:]
m.to(device)
with torch.no_grad():
# Without using `torch.no_grad()`, this will leak CUDA memory.
# (Issue is filed at https://github.com/pytorch/pytorch/issues/21875)
mw[0][0] = 5
self.assertTrue(mw[0][0].device.type == "cpu")
self.assertTrue(mw._base[0][0].device.type == "cuda")
try:
torch.__future__.set_overwrite_module_params_on_conversion(True)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# a view to a module's parameters is still pointing to the same storage as
m = nn.Linear(20, 10)
mw = m.weight[:]
m.to(device)
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0] == mw._base[0][0])
# `cpu_module`'s parameters or gradients.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20)
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m.to(device)
self.assertNotEqual(weight_ref.device, m.weight.device)
self.assertNotEqual(weight_grad_ref.device, m.weight.grad.device)
finally:
torch.__future__.set_overwrite_module_params_on_conversion(False)
@onlyCUDA
@dtypes(*((torch.float, torch.double, torch.bfloat16, torch.half)
if TEST_WITH_ROCM else (torch.float, torch.double, torch.half)))
def test_embedding_max_norm_device(self, device, dtype):
embedding = nn.Embedding(22, 5, max_norm=1.0).to(device, dtype=dtype)
input = torch.tensor([2, 8, 8, 6], device=device, dtype=torch.long)
output = embedding(input)
self.assertEqual(output[1], output[2])
self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())
@skipCUDAIfRocm
@onlyCUDA
@dtypes(torch.half, torch.float)
def test_softmax(self, device, dtype):
input = torch.rand(32, 100, device=device, dtype=dtype, requires_grad=True)
inputf = input.to(torch.float).detach().requires_grad_(True)
out = F.softmax(input, dim=-1, dtype=torch.float)
outf = F.softmax(inputf, dim=-1)
self.assertEqual(out, outf, atol=0, rtol=0)
gO = torch.empty_like(outf).uniform_()
out.backward(gO)
outf.backward(gO)
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=0, rtol=0)
@onlyCUDA
def test_pool3d_size_one_feature_dim(self, device):
x = torch.randn(7, 1, 5, 3, 2, device=device)
strange_strides = [30, 1234, 6, 2, 1]
y = x.as_strided(x.size(), strange_strides)
x = x.cpu().as_strided(x.size(), strange_strides)
to_test = {
'max_pool3d': lambda t: F.max_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
'avg_pool3d': lambda t: F.avg_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
}
for test, fn in to_test.items():
out_y = fn(y)
out_x = fn(x)
self.assertEqual(out_y, out_x.to(device), msg=test)
@onlyCUDA
@largeTensorTest('6GB')
def test_pool3d_large_size_int64(self, device):
x = torch.randn(70, 32, 100, 100, 100, dtype=torch.half, device=device)
y = torch.nn.functional.max_pool3d(x, 5)
torch.cuda.synchronize()
ref_x = x.cpu().float()
ref_y = torch.nn.functional.max_pool3d(ref_x, 5)
self.assertEqual(y, ref_y, exact_dtype=False)
@onlyCUDA
def test_AvgPool3d_backward_after_cat_dim1_device(self, device):
x = torch.randn(1, 3, 4, 4, 4, device=device, requires_grad=True)
y = F.avg_pool3d(x, kernel_size=3, padding=1, stride=2)
grad = torch.randn(y.size(), device=device)
stride = list(grad.stride())
stride[0] = stride[0] * 2
grad.set_(grad.storage(), 0, grad.size(), stride)
assert grad.is_contiguous()
y.backward(grad)
def test_pooling_size_empty(self, device):
t = torch.rand([1, 2, 3, 4], device=device)
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool1d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool2d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool3d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool1d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool2d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool3d(t, []))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_embedding_bag_empty_input(self, device, dtypes):
m = 4
n = 3
x = torch.tensor([], device=device, dtype=dtypes[0])
for sparse in [True, False]:
Embed = torch.nn.EmbeddingBag(m, n, sparse=sparse)
Embed.to(device)
output = Embed(input=x, offsets=torch.tensor([0], device=device, dtype=dtypes[1]))
self.assertEqual(output, torch.zeros_like(output))
output = Embed(input=x, offsets=torch.tensor([0, 0], device=device, dtype=dtypes[1]))
self.assertEqual(output, torch.zeros_like(output))
@skipCUDAIf(True, "cuda assert is not recovarable.")
@dtypes(*itertools.product((torch.float, torch.double), (torch.int, torch.long)))
@parametrize_test("padding_idx", [None, 0])
@parametrize_test("mode", ["sum", "mean", "max"])
def test_embedding_bag_out_of_bounds_idx(self, device, dtypes, padding_idx, mode):
padding_idx = 0
w_dtype, idx_dtype = dtypes
idx1 = torch.tensor([[-1, 1]], device=device, dtype=idx_dtype)
idx2 = torch.tensor([[11, 8]], device=device, dtype=idx_dtype)
weight = torch.randn(10, 2, device=device, dtype=w_dtype)
if mode == 'sum':
per_sample_weights = (None, torch.randn_like(idx1, device=device, dtype=w_dtype))
else:
per_sample_weights = (None,)
for p_s_weights, idx in itertools.product(per_sample_weights, (idx1, idx2)):
msg = "Expected idx >= 0 && idx < num_embeddings"
with self.assertRaisesRegex(RuntimeError, msg):
torch.nn.functional.embedding_bag(idx, weight,
per_sample_weights=p_s_weights, padding_idx=padding_idx,
mode=mode)
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_EmbeddingBag_per_sample_weights_failures(self, device, dtypes):
es = nn.EmbeddingBag(5, 2, mode='sum').to(dtype=torch.float, device=device)
input = torch.tensor([3, 1, 1, 1, 4, 0], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 0, 3, 3, 6], dtype=dtypes[1], device=device)
per_sample_weights = torch.randn_like(input, dtype=torch.double, device=device)
if device == 'cpu':
with self.assertRaisesRegex(RuntimeError, 'have the same type as'):
es(input, offsets, per_sample_weights)
else:
with self.assertRaisesRegex(RuntimeError, 'expected scalar type'):
es(input, offsets, per_sample_weights)
input = torch.tensor([3, 1, 1, 1, 4, 0], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 0, 3, 3, 6], dtype=dtypes[1], device=device)
per_sample_weights = torch.randn(5, dtype=torch.float, device=device)
with self.assertRaisesRegex(ValueError, 'same shape as the input'):
es(input, offsets, per_sample_weights)
input = torch.randint(5, (7, 3), dtype=dtypes[0], device=device)
offsets = None
per_sample_weights = torch.randn(7 * 3, dtype=torch.float, device=device)
with self.assertRaisesRegex(ValueError, 'same shape as the input'):
es(input, offsets, per_sample_weights)
for unsupported_mode in ('max', 'mean'):
es = nn.EmbeddingBag(5, 2, mode=unsupported_mode).to(
dtype=torch.float, device=device)
input = torch.randint(5, (7, 3), dtype=dtypes[0], device=device)
offsets = None
per_sample_weights = torch.randn(7, 3, dtype=torch.float, device=device)
with self.assertRaisesRegex(NotImplementedError,
"only supported for mode='sum'"):
es(input, offsets, per_sample_weights)
def _embedding_bag_reference_impl(self, input, weight, offsets=None, mode='sum',
per_sample_weights=None, include_last_offset=False):
assert mode == 'sum' or per_sample_weights is None
assert offsets is not None
if per_sample_weights is None:
per_sample_weights = torch.ones(input.size()).to(
dtype=weight.dtype, device=weight.device
)
assert input.numel() == per_sample_weights.numel()
bags = []
long_input = input.to(torch.long)
embeddings = weight.index_select(0, long_input) * per_sample_weights.unsqueeze(1)
if include_last_offset:
for index in range(len(offsets) - 1):
offset = offsets[index]
next_offset = offsets[index + 1]
length = next_offset - offset
if length == 0:
bags.append(
torch.tensor([0] * weight.size(1)).to(
dtype=embeddings.dtype, device=embeddings.device
)
)
else:
if mode == 'sum':
bags.append(embeddings.narrow(0, offset, length).sum(0))
elif mode == 'mean':
bags.append(embeddings.narrow(0, offset, length).sum(0).div(length))
else:
assert mode == 'max'
bags.append(embeddings.narrow(0, offset, length).max(0)[0])
else:
for index, offset in enumerate(offsets):
if index + 1 < len(offsets):
next_offset = offsets[index + 1]
else:
next_offset = len(long_input)
length = next_offset - offset
if length == 0:
bags.append(
torch.tensor([0] * weight.size(1)).to(
dtype=embeddings.dtype, device=embeddings.device
)
)
else:
if mode == 'sum':
bags.append(embeddings.narrow(0, offset, length).sum(0))
elif mode == 'mean':
bags.append(embeddings.narrow(0, offset, length).sum(0).div(length))
else:
assert mode == 'max'
bags.append(embeddings.narrow(0, offset, length).max(0)[0])
return torch.stack(bags)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_empty_per_sample_weights_and_offsets(self, device, dtypes):
def test_per_sample_weights(mode, trainable_scale):
es = nn.EmbeddingBag(5, 2, mode=mode).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device, dtype=dtypes[2]).view_as(es.weight))
input = torch.tensor([], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 0, 0, 0], device=device, dtype=dtypes[1])
per_sample_weights = torch.randn_like(input, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected)
result.backward(grad)
# simply be a zero tensor
ref_weights_grad = torch.zeros_like(es.weight)
self.assertEqual(es.weight.grad, ref_weights_grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if trainable_scale:
ref_per_sample_weights_grad = torch.empty_like(per_sample_weights)
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights_grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
modes = ('sum',)
trainable_scale = (True, False)
for mode, trainable in itertools.product(modes, trainable_scale):
test_per_sample_weights(mode, trainable)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_per_sample_weights_and_offsets(self, device, dtypes):
def test_per_sample_weights(mode, trainable_scale):
es = nn.EmbeddingBag(5, 2, mode=mode).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device, dtype=dtypes[2]).view_as(es.weight))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=dtypes[1])
per_sample_weights = torch.randn_like(input, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected).to(dtype=dtypes[2], device=device)
result.backward(grad)
expected.backward(grad)
self.assertEqual(es.weight.grad, reference_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if trainable_scale:
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
modes = ('sum',)
trainable_scale = (True, False)
for mode, trainable in itertools.product(modes, trainable_scale):
test_per_sample_weights(mode, trainable)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_per_sample_weights_and_new_offsets(self, device, dtypes):
def test_per_sample_weights_new_offsets(mode, trainable_scale, include_last_offset, has_weight=True):
es = nn.EmbeddingBag(5, 2, mode=mode, include_last_offset=include_last_offset).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device, dtype=dtypes[2]).view_as(es.weight))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=dtypes[1])
if include_last_offset:
offsets = torch.cat((offsets, torch.tensor([input.size(0)], device=device, dtype=dtypes[1])), 0)
if has_weight:
per_sample_weights = torch.randn_like(input, device=device, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
else:
per_sample_weights = None
ref_per_sample_weights = None
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights, include_last_offset)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected)
result.backward(grad)
expected.backward(grad)
self.assertEqual(es.weight.grad, reference_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if has_weight and trainable_scale:
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
trainable_scale = (True, False)
include_last_offset = (True, False)
modes = (('sum', False), ('sum', True), ('max', False), ('mean', False))
for (mode, has_weight), trainable, include_last_offset in itertools.product(
modes, trainable_scale, include_last_offset
):
test_per_sample_weights_new_offsets(
mode, trainable, include_last_offset, has_weight
)
def _test_EmbeddingBag_vs_Embedding(self, N, D, B, L, max_norm=None,
mode='mean',
device='cpu',
wdtype=torch.float,
dtype=torch.long,
test_per_sample_weights=False,
trainable_per_sample_weights=False,
sparse=False,
test_backward=True,
backward_prec=None):
es = nn.EmbeddingBag(N, D, mode=mode, sparse=sparse, max_norm=max_norm).to(device, wdtype)
e = nn.Embedding(N, D, max_norm=max_norm).to(device, wdtype)
e.weight.data.copy_(es.weight)
input = torch.randint(N, (B, L), device=device, dtype=dtype)
offsets = torch.arange(0, B, device=device, dtype=dtype).mul_(L)
grad_output = torch.rand(B, D, device=device, dtype=wdtype)
if test_per_sample_weights:
# To prevent large gradients, weights should sum to 1 for each bag
per_sample_weights = \
torch.randn(B, L, device=device, dtype=wdtype).softmax(dim=-1)
per_sample_weights_reference = \
per_sample_weights.clone().requires_grad_(trainable_per_sample_weights)
per_sample_weights.requires_grad_(trainable_per_sample_weights)
output = es(input.view(-1), offsets, per_sample_weights.view(-1))
else:
output = es(input.view(-1), offsets)
per_sample_weights = None
per_sample_weights_reference = None
if mode == 'sum':
if test_per_sample_weights:
ref_output = (e(input) * per_sample_weights_reference.unsqueeze(-1)).sum(1)
else:
ref_output = e(input).sum(1)
elif mode == 'mean':
assert not test_per_sample_weights
ref_output = e(input).mean(1)
elif mode == 'max':
assert not test_per_sample_weights
ref_output = e(input).max(1)[0]
self.assertEqual(output, ref_output, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
if not test_backward:
return
output.backward(grad_output)
ref_output.backward(grad_output)
es_weight_grad = es.weight.grad.data
if sparse:
es_weight_grad = es.weight.grad.data.to_dense()
# We have more floating point error here because we are dealing with larger numbers
if backward_prec is None:
needed_prec = dtype2prec_DONTUSE[wdtype] * 5
else:
needed_prec = backward_prec
self.assertEqual(es_weight_grad, e.weight.grad, atol=needed_prec, rtol=0)
if test_per_sample_weights and trainable_per_sample_weights:
self.assertEqual(per_sample_weights.grad, per_sample_weights_reference.grad,
atol=dtype2prec_DONTUSE[wdtype], rtol=0)
@skipCUDAIf(True, "Temporarily disabled. See t54369166")
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.half, torch.float, torch.double)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_per_sample_weights_and_no_offsets(self, device, dtypes):
def run_tests(mode, sparse, trainable_per_sample_weights):
kwargs = dict(test_per_sample_weights=True, device=device,
mode=mode, wdtype=dtypes[1], dtype=dtypes[0], sparse=sparse,
trainable_per_sample_weights=trainable_per_sample_weights)
# Simple case
self._test_EmbeddingBag_vs_Embedding(2, 3, 5, 7, **kwargs)
# B * L > 1000
self._test_EmbeddingBag_vs_Embedding(2, 5, 53, 23, **kwargs)
# Large num_embedding
self._test_EmbeddingBag_vs_Embedding(101, 5, 3, 7, **kwargs)
# Large embedding_dim
self._test_EmbeddingBag_vs_Embedding(2, 101, 3, 7, **kwargs)
modes = ('sum',)
sparsity = (True, False)
trainable_scale = (True, False)
for mode, sparse, trainable_per_sample_weights in \
itertools.product(modes, sparsity, trainable_scale):
run_tests(mode, sparse, trainable_per_sample_weights)
# Test CUDA Dense on half precision
if device == 'cuda':
modes = ('sum',)
sparsity = (False,)
trainable_scale = (True, False)
for mode, sparse, trainable_per_sample_weights in \
itertools.product(modes, sparsity, trainable_scale):
run_tests(mode, sparse, trainable_per_sample_weights)
def _test_EmbeddingBag(
self,
device,
mode,
sparse,
wdtype=torch.double,
dtype=torch.long,
odtype=torch.long,
test_backward=True,
):
# check a known test example
es = nn.EmbeddingBag(5, 2, mode=mode, sparse=sparse).to(device, wdtype)
es.weight.data.copy_(torch.arange(1, 11, device=device, dtype=wdtype).view_as(es.weight))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtype)
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=odtype)
grad_output = torch.tensor(
[1, 2,
3, 4], device=device, dtype=wdtype).view(2, 2)
grad_output_with_empty = torch.tensor(
[99, 99,
1, 2,
99, 99,
3, 4,
99, 99], device=device, dtype=wdtype).view(5, 2)
if mode == "sum" or mode == "mean":
denominator = 1 if mode == "sum" else 3
expected_output = torch.tensor(
[[13, 16],
[13, 16]], device=device, dtype=wdtype) / denominator
expected_output_with_empty = torch.tensor(
[[0, 0],
[13, 16],
[0, 0],
[13, 16],
[0, 0]], device=device, dtype=wdtype) / denominator
expected_grad_weight = torch.tensor(
[[3, 4],
[5, 8],
[0, 0],
[1, 2],
[3, 4]], device=device, dtype=wdtype) / denominator
elif mode == "max":
expected_output = torch.tensor(
[[7, 8],
[9, 10]], device=device, dtype=wdtype)
expected_output_with_empty = torch.tensor(
[[0, 0],
[7, 8],
[0, 0],
[9, 10],
[0, 0]], device=device, dtype=wdtype)
expected_grad_weight = torch.tensor(
[[0, 0],
[0, 0],
[0, 0],
[1, 2],
[3, 4]], device=device, dtype=wdtype)
output = es(input, offsets)
output.backward(grad_output_with_empty)
es_weight_grad = es.weight.grad.data
if sparse:
es_weight_grad = es.weight.grad.to_dense()
self.assertEqual(output, expected_output_with_empty)
self.assertEqual(es_weight_grad, expected_grad_weight, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
# check same example except as 2D (2 x 3)
input = input.view(2, -1)
es.zero_grad()
output = es(input)
output.backward(grad_output)
es_weight_grad = es.weight.grad
if sparse:
es_weight_grad = es.weight.grad.to_dense()
self.assertEqual(output, expected_output)
self.assertEqual(es_weight_grad, expected_grad_weight, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
# test all empty bags
es.zero_grad()
inputs = torch.tensor([], dtype=dtype, device=device)
offsets = torch.tensor([0, 0, 0, 0], dtype=odtype, device=device)
es(inputs, offsets).sum().backward()
dense_grad = es.weight.grad
if dense_grad.is_sparse:
dense_grad = dense_grad.to_dense()
self.assertEqual(dense_grad, torch.zeros_like(es.weight))
# now compare EmbeddingBag vs Embedding + Sum/Mean, for constant bag length
N, D, B, L = random.randint(1, 100), random.randint(1, 100), random.randint(1, 50), random.randint(1, 50)
kwargs = dict(mode=mode, sparse=sparse, device=device, wdtype=wdtype, dtype=dtype, test_backward=test_backward)
self._test_EmbeddingBag_vs_Embedding(N, D, B, L, **kwargs)
for max_norm in (None, 3):
for p in itertools.product([1, 2], repeat=4):
self._test_EmbeddingBag_vs_Embedding(*p, max_norm=max_norm, **kwargs)
# check that giving illegal input combos raises error
es = nn.EmbeddingBag(10, 20, mode=mode, sparse=sparse)
input = torch.ones(3, 4, dtype=dtype)
offset = torch.arange(0, 3, dtype=odtype)
self.assertRaises(ValueError, lambda: es(input, offset))
self.assertRaises(ValueError, lambda: es(input.view(-1)))
offset[0] = 1
if self.device_type == "cpu":
self.assertRaises(RuntimeError, lambda: es(input.view(-1), offset))
offset[0] = 0
offset[-1] = 100
self.assertRaises(RuntimeError, lambda: es(input.view(-1), offset))
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_embedding_bag_device(self, device, dtypes):
self._test_EmbeddingBag(device, 'sum', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
self._test_EmbeddingBag(device, 'mean', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
self._test_EmbeddingBag(device, 'max', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
test_backward = False
if self.device_type == 'cuda':
# see 'todo' in test_embedding_bag.
test_backward = dtypes[2] is not torch.float16
elif self.device_type == 'cpu':
# TODO: figure out why precision on sparse embeddings isn't the
test_backward = dtypes[2] is not torch.float
self._test_EmbeddingBag(
device,
'sum',
True,
wdtype=dtypes[2],
dtype=dtypes[0],
odtype=dtypes[1],
test_backward=test_backward,
)
self._test_EmbeddingBag(
device,
'mean',
True,
wdtype=dtypes[2],
dtype=dtypes[0],
odtype=dtypes[1],
test_backward=test_backward,
)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_embedding_bag_non_contiguous_weight(self, device, dtypes):
weight_tensor = torch.randn(3, 4, dtype=dtypes[2], device=device)
weight_tensor_non_contig = weight_tensor[:, :3]
weight_tensor_contig = weight_tensor_non_contig.clone().contiguous()
index = torch.tensor([0, 1, 2], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 2], dtype=dtypes[1], device=device)
for mode in ['sum', 'mean', 'max']:
output_non_contig = F.embedding_bag(
input=index,
weight=weight_tensor_non_contig,
offsets=offsets,
mode=mode,
)
output_contig = F.embedding_bag(
input=index,
weight=weight_tensor_contig,
offsets=offsets,
mode=mode,
)
self.assertEqual(output_non_contig, output_contig)
@onlyCUDA
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_embedding_bag_bfloat16(self, device, dtypes):
self._test_EmbeddingBag(device, 'sum', True, wdtype=torch.bfloat16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)
self._test_EmbeddingBag(device, 'mean', True, wdtype=torch.bfloat16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)
@onlyCUDA
@dtypes(torch.half, torch.float, torch.double)
def test_multihead_attention_dtype(self, device, dtype):
embed_dim = 128
num_heads = 8
sl = 10
bs = 8
model = nn.MultiheadAttention(embed_dim, num_heads).cuda().to(dtype)
q = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
k = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
v = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
out = model(q, k, v)
self.assertEqual(q.size(), out[0].size())
self.assertEqual(dtype, out[0].dtype)
@dtypesIfCUDA(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
@dtypes(torch.float)
def test_Conv2d_naive_groups(self, device, dtype):
m = nn.Conv2d(4, 4, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
m1.bias.data.copy_(m.bias.data[:2])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
m2.bias.data.copy_(m.bias.data[2:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
@dtypes(torch.double)
def test_Conv2d_backward_depthwise(self, device, dtype):
x = torch.randn(2, 2, 4, 20, device=device, dtype=dtype, requires_grad=True)
weight = torch.randn(2, 1, 3, 5, device=device, dtype=dtype, requires_grad=True)
def conv2d_depthwise(x, weight):
return torch.nn.functional.conv2d(
x, weight, bias=None, stride=(1, 10), groups=2)
for cudnn_enabled in [False, True]:
with torch.backends.cudnn.flags(enabled=cudnn_enabled):
torch.autograd.gradcheck(conv2d_depthwise, (x, weight))
def _test_batchnorm_grad(self, device, dtype=torch.double):
bs, n_feat, size_feat = 4, 5, 6
input = torch.arange(bs * n_feat * size_feat, device=device,
requires_grad=True, dtype=dtype).view(bs, n_feat, size_feat)
weight = torch.arange(1, n_feat + 1, device=device, requires_grad=True, dtype=dtype)
bias = torch.arange(n_feat, device=device, requires_grad=True, dtype=dtype)
running_mean = 1 - torch.arange(n_feat, device=device, dtype=dtype)
running_var = 2 * torch.arange(n_feat, device=device, dtype=dtype)
for training in [False, True]:
_assertGradAndGradgradChecks(self, F.batch_norm, (input, running_mean, running_var, weight, bias,
training, 0.1, 0.0001))
def test_batchnorm_grad(self, device):
self._test_batchnorm_grad(device)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_grad(device)
@onlyCUDA
def test_layernorm_half_precision(self):
width = 128
input = torch.rand(1, 5, width, device="cuda", dtype=torch.half) * 0.1
normalized_shape = (width,)
weight = torch.ones(width, device="cuda", dtype=torch.half)
bias = torch.zeros(width, device="cuda", dtype=torch.half)
eps = 1e-5
output_fp16 = torch.layer_norm(input, normalized_shape, weight, bias, eps)
output_fp32 = torch.layer_norm(input.float(), normalized_shape, weight.float(), bias.float(), eps).half()
self.assertEqual(output_fp16, output_fp32, atol=0, rtol=0)
@onlyCUDA
def test_layernorm_weight_bias(self):
width = 128
input = torch.rand(1, 5, width, device="cuda", dtype=torch.float32) * 0.1
normalized_shape = (width,)
data = torch.randn(width, device="cuda", dtype=torch.float32)
weight = torch.ones(width, device="cuda", dtype=torch.float32)
bias = torch.zeros(width, device="cuda", dtype=torch.float32)
eps = 1e-5
out_none_weight = torch.layer_norm(input, normalized_shape, None, data, eps)
out_one_weight = torch.layer_norm(input, normalized_shape, weight, data, eps)
self.assertEqual(out_none_weight, out_one_weight)
out_none_bias = torch.layer_norm(input, normalized_shape, data, None, eps)
out_zero_bias = torch.layer_norm(input, normalized_shape, data, bias, eps)
self.assertEqual(out_none_bias, out_zero_bias)
def test_hardsigmoid_grad(self, device):
inputs = (torch.randn(4, 16, 16, device=device) - 0.5) * 10
inputs.requires_grad = True
self.assertTrue(gradcheck(F.hardsigmoid, (inputs,)))
@onlyNativeDeviceTypes
def test_hardswish_grad(self, device):
inputs = (torch.randn(4, 16, 16, device=device) - 0.5) * 10
inputs.requires_grad = True
self.assertTrue(gradcheck(F.hardswish, (inputs,)))
def _test_batchnorm_eval(self, ndim, device, dtype, module_dtype=None):
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3).to(device, module_dtype)
module.eval()
data = torch.rand([3] * ndim, device=device, dtype=dtype, requires_grad=True)
grad = torch.rand([3] * ndim, device=device, dtype=dtype)
res1 = module(data)
res1.backward(grad)
grad1 = data.grad.clone()
if data.grad is not None:
data.grad.data.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad.clone()
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
module = nn.BatchNorm1d(3, track_running_stats=False).to(device, module_dtype)
data = torch.rand(4, 3, device=device, dtype=dtype, requires_grad=True)
grad = torch.rand(4, 3, device=device, dtype=dtype)
res1 = module(data)
res1.backward(grad)
grad1 = data.grad.clone()
module.eval()
if data.grad is not None:
data.grad.data.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad.clone()
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_eval(self, device, dtype):
self._test_batchnorm_eval(2, device, dtype)
self._test_batchnorm_eval(3, device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_eval(2, device, dtype)
self._test_batchnorm_eval(3, device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_eval_mixed(self, device, dtype):
self._test_batchnorm_eval(2, device, dtype, torch.float)
self._test_batchnorm_eval(3, device, dtype, torch.float)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_eval(2, device, dtype, torch.float)
self._test_batchnorm_eval(3, device, dtype, torch.float)
def _test_batchnorm_affine(self, ndim, device, dtype, module_dtype=None):
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3, affine=False).to(device, module_dtype)
module_affine = nn.BatchNorm1d(3, affine=True).to(device, module_dtype)
with torch.no_grad():
module_affine.weight.fill_(1.0)
module_affine.bias.zero_()
data = torch.rand([3] * ndim, device=device, dtype=dtype, requires_grad=True)
grad = torch.ones_like(data, requires_grad=False)
res1 = module_affine(data)
res1.backward(grad)
grad1 = data.grad.clone()
data.grad.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_affine(self, device, dtype):
self._test_batchnorm_affine(2, device, dtype)
self._test_batchnorm_affine(3, device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_affine(2, device, dtype)
self._test_batchnorm_affine(3, device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_affine_mixed(self, device, dtype):
cudnn_enabled = [False]
if self.device_type == 'cuda' and self.has_cudnn():
pass
for enabled in cudnn_enabled:
with torch.backends.cudnn.flags(enabled=enabled):
self._test_batchnorm_affine(2, device, dtype, torch.float)
self._test_batchnorm_affine(3, device, dtype, torch.float)
def _test_batchnorm_simple_average(self, device, dtype, module_dtype=None):
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3, momentum=None).to(dtype=module_dtype, device=device)
zeros = torch.zeros(3, dtype=module_dtype, device=device)
ones = torch.ones(3, dtype=module_dtype, device=device)
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
data1 = torch.rand(4, 3, dtype=dtype, device=device)
data2 = torch.rand(4, 3, dtype=dtype, device=device)
res1 = module(data1)
running_mean1 = module.running_mean.clone()
running_var1 = module.running_var.clone()
self.assertNotEqual(running_mean1, zeros)
self.assertNotEqual(running_var1, ones)
module.reset_running_stats()
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
res2 = module(data2)
running_mean2 = module.running_mean.clone()
running_var2 = module.running_var.clone()
self.assertNotEqual(running_mean2, zeros)
self.assertNotEqual(running_var2, ones)
module.reset_running_stats()
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
res3 = module(data1)
res4 = module(data2)
self.assertEqual(res3, res1)
self.assertEqual(res4, res2)
self.assertEqual(module.running_mean, (running_mean1 + running_mean2) / 2)
self.assertEqual(module.running_var, (running_var1 + running_var2) / 2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_simple_average(self, device, dtype):
self._test_batchnorm_simple_average(device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_simple_average(device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_simple_average_mixed(self, device, dtype):
self._test_batchnorm_simple_average(device, dtype, torch.float)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_simple_average(device, dtype, torch.float)
def _test_maxpool_indices(self, num_dim, adaptive=False, device="cpu", dtype=torch.float):
def expected_indices(dim):
if dim == 1:
return torch.tensor([1, 3], dtype=torch.double).repeat(2, 2, 1)
if dim == 2:
return torch.tensor([[5, 7], [13, 15]], dtype=torch.double).repeat(2, 2, 1, 1)
def expected_grad(dim):
if dim == 1:
return torch.tensor([0, 1, 0, 1], dtype=torch.double).repeat(2, 2, 1)
grad = expected_grad(dim - 1)
zero = torch.zeros(grad.size())
return torch.stack((zero, grad, zero, grad), 2)
def expected_output(dim):
if dim == 1:
return torch.arange(2, 17, 2).view(2, 2, 2)
if dim == 2:
col = torch.arange(6, 63, 8)
return torch.stack([col, col + 2], 1).view(2, 2, 2, 2)
if adaptive:
cls_name = 'AdaptiveMaxPool{}d'.format(num_dim)
else:
cls_name = 'MaxPool{}d'.format(num_dim)
module_cls = getattr(nn, cls_name)
module = module_cls(2, return_indices=True).to(device, dtype=dtype)
numel = 4 ** (num_dim + 1)
input = torch.arange(1, numel + 1).view(2, 2, *repeat(4, num_dim)).to(device, dtype=dtype)
input_var = input.clone().detach().requires_grad_()
output, indices = module(input_var)
if num_dim != 3:
expected_indices = expected_indices(num_dim)
expected_output = expected_output(num_dim)
self.assertEqual(indices.dim(), input.dim())
, expected_indices)
expected_output)
self.assertTrue(output.requires_grad)
self.assertFalse(indices.requires_grad)
grad_output = torch.ones(output.size(), device=device, dtype=dtype)
output.backward(grad_output, retain_graph=True)
expected_grad = expected_grad(num_dim)
ted_grad.view_as(input))
indices.add_(1)
self.assertRaises(RuntimeError, lambda: output.backward(grad_output))
t = torch.tensor([[[float("-inf")]]])
m = nn.MaxPool1d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0], 0)
t = torch.tensor([[[float("-inf")]]])
m = nn.MaxPool2d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0], 0)
t = torch.tensor([[[[float("-inf")]]]])
m = nn.MaxPool3d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0, 0], 0)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_MaxPool1d_indices(self, device, dtype):
self._test_maxpool_indices(1, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_MaxPool2d_indices(self, device, dtype):
self._test_maxpool_indices(2, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_MaxPool3d_indices(self, device, dtype):
self._test_maxpool_indices(3, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_AdaptiveMaxPool1d_indices(self, device, dtype):
self._test_maxpool_indices(1, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_AdaptiveMaxPool2d_indices(self, device, dtype):
self._test_maxpool_indices(2, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_AdaptiveMaxPool3d_indices(self, device, dtype):
self._test_maxpool_indices(3, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_maxpool_indices_no_batch_dim(self, device, dtype):
max_pool_cases = [
(nn.MaxPool1d(3, return_indices=True),
torch.randn(3, 5, device=device, dtype=dtype)),
(nn.MaxPool2d(3, return_indices=True),
torch.randn(3, 5, 6, device=device, dtype=dtype)),
(nn.MaxPool3d(3, return_indices=True),
torch.randn(3, 5, 6, 7, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool1d(3, return_indices=True),
torch.randn(3, 5, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool2d(3, return_indices=True),
torch.randn(3, 5, 6, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool3d(3, return_indices=True),
torch.randn(3, 5, 6, 7, device=device, dtype=dtype))]
for module, input in max_pool_cases:
_, indices_no_batch = module(input)
_, indicies_single_batch = module(input.unsqueeze(0))
self.assertEqual(indices_no_batch, indicies_single_batch.squeeze(0))
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@onlyNativeDeviceTypes
def test_max_pool_nan_inf(self, device, dtype):
for adaptive in ['', 'adaptive_']:
for num_dim in [1, 2, 3]:
fn_name = '{}max_pool{}d'.format(adaptive, num_dim)
fn = getattr(F, fn_name)
x = torch.full([1, 1] + num_dim * [3], nan, device=device, dtype=dtype, requires_grad=True)
res = fn(x, 1 if adaptive else 3)
res.backward(torch.randn_like(res))
self.assertTrue(math.isnan(res.item()))
x.requires_grad_(False)
res = fn(x, 1 if adaptive else 3)
self.assertTrue(math.isnan(res.item()))
x2 = torch.full([1, 1] + num_dim * [3], -inf, device=device, dtype=dtype, requires_grad=True)
res2 = fn(x2, 1 if adaptive else 3)
res2.backward(torch.randn_like(res2))
self.assertTrue(math.isinf(res2.item()))
x2.requires_grad_(False)
res2 = fn(x2, 1 if adaptive else 3)
self.assertTrue(math.isinf(res2.item()))
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_grid_sample_nan_inf(self, device, dtype):
input = torch.zeros([1, 1, 3, 3], device=device, dtype=dtype)
grid = torch.tensor([[[[nan, 0], [0, inf]]]], device=device, dtype=dtype)
for padding_mode in ('reflection', 'border', 'zeros'):
sample = torch.nn.functional.grid_sample(input=input, grid=grid, mode='nearest',
padding_mode=padding_mode, align_corners=False)
self.assertEqual(sample, torch.zeros([1, 1, 1, 2], device=device, dtype=dtype))
@expectedFailureMeta
@onlyNativeDeviceTypes
def test_fractional_max_pool2d(self, device):
x = torch.randn(1, 2, 7, 7, requires_grad=True, device=device)
samples = x.new(1, 2, 2).uniform_()
def func(x):
return F.fractional_max_pool2d(
x, (2, 2), output_size=(3, 3), _random_samples=samples)
self.assertEqual(func(x).shape, (1, 2, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
x = torch.randn(2, 7, 7, requires_grad=True, device=device)
self.assertEqual(func(x).shape, (2, 3, 3))
if self.device_type != 'cuda':
gradcheck(func, [x])
gradgradcheck(func, [x])
for kernel_size in [(), (1,)]:
with self.assertRaisesRegex(RuntimeError, "kernel_size must either"):
F.fractional_max_pool2d(x, kernel_size=kernel_size, output_size=(3, 3), _random_samples=samples)
err_large_msg = "too large relative to input "
err_out_size_msg = "output_size must either"
for output_size, msg in [((9, 3), err_large_msg + "height"),
((3, 9), err_large_msg + "width"),
((3,), err_out_size_msg),
((), err_out_size_msg)]:
with self.assertRaisesRegex(RuntimeError, msg):
F.fractional_max_pool2d(x, (2, 2), output_size=output_size, _random_samples=samples)
@expectedFailureMeta
@onlyNativeDeviceTypes
def test_fractional_max_pool3d(self, device):
x = torch.randn(1, 2, 7, 7, 7, requires_grad=True, device=device)
samples = x.new(1, 2, 3).uniform_()
def func(x):
return F.fractional_max_pool3d(
x, (2, 2, 2), output_size=(3, 3, 3), _random_samples=samples)
self.assertEqual(func(x).shape, (1, 2, 3, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
x = torch.randn(2, 7, 7, 7, requires_grad=True, device=device)
self.assertEqual(func(x).shape, (2, 3, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
for kernel_size in [(), (1,), (1, 1)]:
with self.assertRaisesRegex(RuntimeError, "kernel_size must either"):
F.fractional_max_pool3d(x, kernel_size=kernel_size, output_size=(3, 3, 3), _random_samples=samples)
err_large_msg = "too large relative to input "
err_out_size_msg = "output_size must either"
for output_size, msg in [((9, 3, 3), err_large_msg + "time"),
((3, 9, 3), err_large_msg + "height"),
((3, 3, 9), err_large_msg + "width"),
((3, 3), err_out_size_msg),
((3,), err_out_size_msg),
((), err_out_size_msg)]:
with self.assertRaisesRegex(RuntimeError, msg):
F.fractional_max_pool3d(x, (2, 2, 2), output_size=output_size, _random_samples=samples)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@onlyNativeDeviceTypes
def test_fractional_max_pool_nan_inf(self, device, dtype):
for num_dim in [2, 3]:
fn_name = 'FractionalMaxPool{}d'.format(num_dim)
fn = getattr(nn, fn_name)(kernel_size=2, output_size=1)
x = torch.full([1, 1] + num_dim * [3], nan, device=device, dtype=dtype, requires_grad=True)
res = fn(x)
res.backward(torch.randn_like(res))
self.assertTrue(math.isnan(res.item()))
x2 = torch.full([1, 1] + num_dim * [3], -inf, device=device, dtype=dtype, requires_grad=True)
res2 = fn(x2)
res2.backward(torch.randn_like(res2))
self.assertTrue(math.isinf(res2.item()))
@onlyNativeDeviceTypes
def test_pooling_zero_stride(self, device):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
fn = getattr(F, fn_name)
x = torch.ones([1, 2] + num_dim * [4], device=device, dtype=torch.float)
self.assertRaisesRegex(RuntimeError, r"stride should not be zero|stride must be greater than zero",
lambda: fn(x, kernel_size=2, stride=0))
fn_module_name = '{}Pool{}d'.format(op.title(), num_dim)
fn_module = getattr(nn, fn_module_name)(kernel_size=2, stride=0)
self.assertRaisesRegex(RuntimeError, r"stride should not be zero|stride must be greater than zero",
lambda: fn_module(x))
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_pool_large_size(self, device, dtype):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
fn = getattr(F, fn_name)
x = torch.ones([1, 1, 16777217] + (num_dim - 1) * [1],
device=device, dtype=dtype)
res = fn(x, 1, stride=1, padding=0)
self.assertEqual(x.shape[2], res.shape[2])
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_pool_invalid_size(self, device, dtype):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
if op == 'max':
fn_name += '_with_indices'
fn = getattr(F, fn_name)
x = torch.ones([1, 1] + num_dim * [4],
device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"too small|smaller than"):
try:
res = fn(x, 3, stride=2, padding=0, dilation=2)
except TypeError:
res = fn(x, 6, stride=2, padding=0)
def test_CTCLoss_empty_target(self, device):
target_lengths = [0, 0, 0]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (0,), dtype=torch.long, device=device)
log_probs = torch.randn(50, 3, 15, dtype=torch.double, device=device).log_softmax(2)
loss = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
self.assertTrue((loss >= 0).all().item())
self.assertEqual(-log_probs.sum(0)[:, 0], loss)
target_lengths = [0, 9, 0]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (9,), dtype=torch.long, device=device)
log_probs = torch.randn(50, 3, 15, dtype=torch.double, device=device).log_softmax(2)
loss = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
self.assertTrue((loss >= 0).all().item())
self.assertEqual(-log_probs.sum(0)[[0, 2], 0], loss[[0, 2]])
@skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message:
https://github.com/pytorch/pytorch/issues/34870""")
def test_ctc_loss(self, device):
batch_size = 64
num_labels = 101
target_length = 15
gradcheck_input_size = 10
ZERO_NONE = 0
ZERO_SOME = 1
ZERO_ALL = 2
tests = [(150, False, ZERO_NONE),
(150, True, ZERO_NONE),
(50, True, ZERO_SOME),
(50, True, ZERO_ALL)]
if 'cuda' in device:
tests += [(50, False, ZERO_NONE),
(50, True, ZERO_NONE),
(150, True, ZERO_SOME),
(150, True, ZERO_ALL)]
for input_length, vary_lengths, zero_mode in tests:
targets = torch.randint(1, num_labels, (batch_size, target_length),
device=device, dtype=torch.long)
x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True)
tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,
device=device)
input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()
if vary_lengths or i == 0 else input_length) for i in range(batch_size)]
if zero_mode == ZERO_ALL:
target_lengths = [0 for _ in range(batch_size)]
else:
target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()
if vary_lengths else target_length) for _ in range(batch_size)]
if zero_mode == ZERO_SOME:
idxes = torch.randint(0, batch_size, (10,))
for i in idxes:
target_lengths[i] = 0
def ctc_after_softmax(x):
x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]
.view(input_length, batch_size, num_labels))
log_probs = torch.log_softmax(x_full, 2)
return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
gradcheck(ctc_after_softmax, [x])
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7600)
def test_ctc_loss_cudnn(self, device):
batch_size = 16
input_length = 30
num_labels = 101
target_length = 15
targets = torch.randint(1, num_labels, (batch_size * target_length,),
device='cuda', dtype=torch.long)
log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)
log_probs.requires_grad_()
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)
with torch.backends.cudnn.flags(enabled=False):
loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)
loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),
input_lengths, target_lengths, reduction='none')
self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn))
grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)
self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)
def test_empty_dropout(self, device):
x = torch.tensor([]).to(device)
out = torch.nn.functional.dropout(x)
self.assertEqual(out.size(), x.size())
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@tf32_on_and_off(0.005)
def test_variable_sequence(self, device, dtype):
def pad(var, length):
if var.size(0) == length:
return var
return torch.cat([var, var.new_zeros(length - var.size(0), *var.size()[1:])])
def maybe_index_tuple(maybe_tuple_of_tensors, index):
if maybe_tuple_of_tensors is None:
return None
return tuple(maybe_tuple_of_tensors[j][:, index:index + 1, :].contiguous()
for j in range(2))
def check_lengths(lengths, enforce_sorted, use_default_hiddens, proj_size):
input_size = 3
hidden_size = 4
num_layers = 2
bidirectional = True
max_length = max(lengths)
x_leaf = torch.randn(max_length, len(lengths), input_size, device=device,
dtype=dtype, requires_grad=True)
num_directions = 2 if bidirectional else 1
lstm = nn.LSTM(input_size, hidden_size, bidirectional=bidirectional,
num_layers=num_layers, proj_size=proj_size).to(device, dtype)
lstm2 = deepcopy(lstm).to(device, dtype)
x = x_leaf
hidden0 = None
if not use_default_hiddens:
real_hidden_size = hidden_size if proj_size == 0 else proj_size
hidden0 = (torch.randn(num_directions * num_layers, len(lengths), real_hidden_size,
device=device, dtype=dtype),
torch.randn(num_directions * num_layers, len(lengths), hidden_size,
device=device, dtype=dtype))
seq_outs = []
seq_hiddens = []
for i, l in enumerate(lengths):
hidden_i = maybe_index_tuple(hidden0, i)
out, hid = lstm2(x[:l, i:i + 1], hidden_i)
out_pad = pad(out, max_length)
seq_outs.append(out_pad)
seq_hiddens.append(hid)
seq_out = torch.cat(seq_outs, 1)
seq_hidden = tuple(torch.cat(hids, 1) for hids in zip(*seq_hiddens))
packed = rnn_utils.pack_padded_sequence(x, lengths, enforce_sorted=enforce_sorted)
packed_out, packed_hidden = lstm(packed, hidden0)
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed_out)
prec = dtype2prec_DONTUSE[dtype]
self.assertEqual(packed_hidden, seq_hidden, atol=prec, rtol=0)
self.assertEqual(unpacked, seq_out, atol=prec, rtol=0)
self.assertEqual(unpacked_len, lengths, atol=prec, rtol=0)
seq_out.sum().backward()
grad_x = x_leaf.grad.data.clone()
x_leaf.grad.data.zero_()
unpacked.sum().backward()
self.assertEqual(x_leaf.grad, grad_x, atol=dtype2prec_DONTUSE[dtype], rtol=0)
for p1, p2 in zip(lstm.parameters(), lstm2.parameters()):
prec = dtype2prec_DONTUSE[dtype]
if dtype == torch.float16:
prec = 4e-2
self.assertEqual(p1.grad, p2.grad, atol=prec, rtol=0)
tests = [
[True, [5]],
[False, [5]],
[True, [10, 10, 6, 2, 2, 1, 1]],
[False, [10, 10, 6, 2, 2, 1, 1]],
[False, [2, 1, 3, 2, 10, 5, 3]],
]
for enforce_sorted, seq_lens, in tests:
for use_default_hiddens in (True, False):
for proj_size in [0, 2]:
check_lengths(seq_lens, enforce_sorted, use_default_hiddens, proj_size)
def _test_batchnorm_update_stats(self, device, dtype=torch.float):
module = nn.BatchNorm1d(3).to(device, dtype)
data = torch.rand(4, 3, device=device, dtype=dtype)
old_running_mean = module.running_mean.clone()
old_running_var = module.running_var.clone()
old_num_batches_tracked = module.num_batches_tracked.clone()
module(data)
self.assertNotEqual(old_running_mean, module.running_mean)
self.assertNotEqual(old_running_var, module.running_var)
self.assertEqual(old_num_batches_tracked + 1, module.num_batches_tracked)
module.eval()
old_running_mean = module.running_mean.clone()
old_running_var = module.running_var.clone()
old_num_batches_tracked = module.num_batches_tracked.clone()
module(data)
self.assertEqual(old_running_mean, module.running_mean)
self.assertEqual(old_running_var, module.running_var)
self.assertEqual(old_num_batches_tracked, module.num_batches_tracked)
def test_batchnorm_update_stats(self, device):
self._test_batchnorm_update_stats(device)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_update_stats(device)
def test_multi_margin_loss_errors(self, device):
self.assertRaises(RuntimeError,
lambda: nn.functional.multi_margin_loss(torch.randn(5, device=device),
torch.zeros(3, device=device)))
def _test_bfloat16_ops(self, op, device, inp_dims=(), prec=1e-2, scale_factor=None):
input1 = torch.randn(inp_dims, dtype=torch.float32, device=device, requires_grad=True)
if scale_factor is not None:
input1 = (torch.rand(inp_dims, dtype=torch.bfloat16, device=device) * scale_factor).float().requires_grad_()
out1 = op(input1)
grad_input1 = torch.randn_like(out1, device=device)
out1.backward(grad_input1)
op_bfp16 = op.bfloat16()
input2 = input1.detach().bfloat16().requires_grad_()
grad_input2 = grad_input1.bfloat16()
out2 = op_bfp16(input2)
out2.backward(grad_input2)
self.assertEqual(out1, out2, atol=prec, rtol=prec, exact_dtype=False)
self.assertEqual(input1.grad.data, input2.grad.data, atol=prec, rtol=prec, exact_dtype=False)
@onlyCUDA
def test_activations_bfloat16(self, device):
self._test_bfloat16_ops(torch.nn.ReLU(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Threshold(0.1, 20), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.ELU(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Softplus(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Hardshrink(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Softshrink(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.LeakyReLU(), device, inp_dims=(5), prec=1e-2)
@onlyCUDA
def test_pooling_bfloat16(self, device):
self._test_bfloat16_ops(torch.nn.AvgPool1d(3, stride=2), device, inp_dims=(8, 4, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AvgPool2d(3, stride=2), device, inp_dims=(8, 4, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AvgPool3d(3, stride=2), device, inp_dims=(8, 4, 16, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool1d(3), device, inp_dims=(8, 4, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool2d((3, 5)), device, inp_dims=(8, 4, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool3d((3, 5, 7)), device, inp_dims=(8, 4, 16, 16, 16), prec=0.05)
@onlyNativeDeviceTypes
def test_softmax_bfloat16(self, device):
for dim in [0, 1, 2, 3]:
self._test_bfloat16_ops(torch.nn.Softmax(dim=dim), device, inp_dims=(16, 33, 15, 16), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Softmax(dim=dim), device, inp_dims=(16, 33, 15, 16), prec=0.05, scale_factor=1000.0)
@onlyCUDA
@skipCUDAIfRocmVersionLessThan((4, 3))
@skipCUDAIfNotMiopenSuggestNHWC
@skipCUDAIfCudnnVersionLessThan(7603)
@dtypes(torch.half, torch.float)
def test_conv_cudnn_nhwc(self, device, dtype):
def helper(n, c, h, w, out_channels, kernel_size, groups):
input = torch.randint(-3, 3, (n, c, h, w), dtype=dtype, device=device)\
.to(memory_format=torch.channels_last)
input.requires_grad_()
conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups)\
.to(device='cuda', dtype=dtype, memory_format=torch.channels_last)
for p in conv.parameters():
p.data = torch.randint_like(p, -3, 3)
ref_input = input.detach().clone().contiguous().double().requires_grad_()
ref_conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups)
ref_conv.load_state_dict(conv.state_dict())
ref_conv = ref_conv.to(device='cuda', dtype=torch.double, memory_format=torch.contiguous_format)
out = conv(input)
ref_out = ref_conv(ref_input)
grad = torch.randint_like(out, -3, 3)
ref_grad = grad.detach().clone().double().contiguous()
out.backward(grad)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(conv.weight.grad.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ref_input.grad.is_contiguous())
self.assertTrue(ref_conv.weight.grad.is_contiguous())
self.assertEqual(out, ref_out, exact_dtype=False)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)
self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)
helper(2, 8, 4, 4, out_channels=4, kernel_size=3, groups=1)
helper(2, 8, 4, 4, out_channels=8, kernel_size=3, groups=8)
helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=1)
helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=16)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(8005)
@dtypes(torch.half, torch.float)
def test_conv_cudnn_ndhwc(self, device, dtype):
def helper(n, c, d, h, w, out_channels, kernel_size, groups):
input = torch.randint(-2, 2, (n, c, d, h, w), dtype=dtype, device=device)\
.to(memory_format=torch.channels_last_3d)
input.requires_grad_()
conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups)\
.to(device='cuda', dtype=dtype, memory_format=torch.channels_last_3d)
for p in conv.parameters():
p.data = torch.randint_like(p, -2, 2)
ref_input = input.detach().clone().contiguous().double().requires_grad_()
ref_conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups)
ref_conv.load_state_dict(conv.state_dict())
ref_conv = ref_conv.to(device='cuda', dtype=torch.double, memory_format=torch.contiguous_format)
out = conv(input)
ref_out = ref_conv(ref_input)
grad = torch.randint_like(out, -2, 2)
ref_grad = grad.detach().clone().double().contiguous()
out.backward(grad)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(conv.weight.grad.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ref_input.grad.is_contiguous())
self.assertTrue(ref_conv.weight.grad.is_contiguous())
self.assertEqual(out, ref_out, exact_dtype=False)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)
self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)
helper(2, 8, 4, 4, 4, out_channels=4, kernel_size=3, groups=1)
helper(2, 8, 4, 4, 4, out_channels=8, kernel_size=3, groups=8)
helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=1)
helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=16)
def _run_conv(self, layer, device, inp, grad, ref_conv, ref_input, ref_out,
input_format, weight_format, grad_format, output_format):
conv = layer(inp.size(1), grad.size(1),
ref_conv.weight.size(2)).float().to(device)
conv.load_state_dict(ref_conv.state_dict())
weight_data = conv.weight.detach().clone().contiguous(memory_format=weight_format)
conv.weight.data = weight_data.resize_(weight_data.size(), memory_format=weight_format)
input = inp.clone().contiguous(memory_format=input_format)
input.resize_(input.size(), memory_format=input_format)
input = input.requires_grad_()
grad = grad.contiguous(memory_format=grad_format)
grad.resize_(grad.size(), memory_format=grad_format)
out = conv(input)
out.backward(grad)
self.assertTrue(out.is_contiguous(memory_format=output_format))
self.assertEqual(out, ref_out)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
def _test_conv_cudnn_nhwc_nchw(self, layer, n, c, h, w, k, filter_size, device):
data = torch.randint(1, 10, (n, c, h, w), dtype=torch.float32, device=device)
ref_input = data.clone().contiguous().requires_grad_(True)
ref_conv = layer(c, k, filter_size).float().to(device)
ref_out = ref_conv(ref_input)
grad = torch.randint(1, 10, ref_out.size(), dtype=torch.float32, device="cuda")
ref_out.backward(grad)
for w_f in [torch.contiguous_format, torch.channels_last]:
for g_f in [torch.contiguous_format, torch.channels_last]:
for input_format in [torch.contiguous_format, torch.channels_last]:
output_format = torch.contiguous_format
if torch.backends.cudnn.version() >= 7603:
if input_format == torch.channels_last:
output_format = torch.channels_last
if w_f == torch.channels_last:
if layer == nn.Conv2d and filter_size * c != 1:
output_format = torch.channels_last
if layer == nn.ConvTranspose2d and filter_size * k != 1:
output_format = torch.channels_last
self._run_conv(layer, device, data, grad, ref_conv, ref_input,
ref_out, input_format, w_f, g_f, output_format)
@onlyCUDA
@skipCUDAIfRocmVersionLessThan((4, 3))
@skipCUDAIfNotMiopenSuggestNHWC
@skipCUDAIfCudnnVersionLessThan(7603)
@tf32_on_and_off(0.05)
def test_conv_cudnn_mismatch_memory_format(self, device):
configs = [
[4, 2, 8, 8, 4, 2],
[4, 1, 8, 8, 4, 2],
[1, 1, 8, 8, 4, 2],
[4, 2, 2, 8, 4, 1],
[4, 2, 1, 8, 4, 1],
[4, 2, 8, 8, 4, 1],
[4, 1, 8, 8, 4, 1],
]
for n, c, h, w, k, filter_size in configs:
self._test_conv_cudnn_nhwc_nchw(nn.Conv2d, n, c, h, w, k, filter_size, device)
self._test_conv_cudnn_nhwc_nchw(nn.ConvTranspose2d, n, c, h, w, k, filter_size, device)
lyCUDA
@skipCUDAIfNoCudnn
@dtypes(torch.float, torch.double)
def test_conv_cudnn_nhwc_support(self, device, dtype):
input = torch.randn((1, 16, 1, 1), dtype=dtype, device="cuda", requires_grad=True)
weight = torch.randn((8, 16, 3, 3), dtype=dtype, device="cuda", requires_grad=True)
weight = weight.to(memory_format=torch.channels_last)
o = torch.conv2d(input, weight, None, (2, 1), (1, 1), (1, 1), 1)
self.assertTrue(o.is_contiguous(memory_format=torch.channels_last))
o.sum().backward()
@onlyCPU
@dtypes(torch.float)
def test_conv2d_no_grad(self, device, dtype):
for batch in [1, 2, 3]:
for groups in [1, 2, 4]:
input = torch.rand(batch, groups, 8, 8, dtype=dtype, device=device)
m = nn.Conv2d(groups, 8, kernel_size=(3, 3), groups=groups, dtype=dtype, device=device)
with torch.no_grad():
output_ng = m(input)
output = m(input)
self.assertEqual(output, output_ng, rtol=1e-2, atol=1e-5)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfNoCudnn
@dtypes(torch.float, torch.float16)
@precisionOverride({torch.half: 0.002, torch.float: 1e-4})
def test_cudnn_convolution_relu(self, device, dtype):
for batch, groups, image_size, kernel_size, memory_format in \
product((1, 2, 3),
(1, 2, 4),
((1, 1), (8, 8)),
((1, 1), (3, 3)),
(torch.channels_last, torch.contiguous_format)):
if image_size[0] < kernel_size[0]:
continue
inp = torch.rand(batch, groups, *image_size, dtype=dtype, device=device)
w = torch.randn(8, groups, *kernel_size, dtype=dtype, device=device)
conv2d_out = torch.conv2d(inp, w, None, (1, 1), (0, 0), (1, 1), 1)
inp = inp.to(memory_format=memory_format)
w = w.to(memory_format=memory_format)
cudnn_out = torch.cudnn_convolution_relu(inp, w, None, (1, 1), (0, 0), (1, 1), 1)
self.assertTrue(cudnn_out.is_contiguous(memory_format=memory_format))
self.assertEqual(conv2d_out.relu(), cudnn_out)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfNoCudnn
@dtypes(torch.float, torch.float16)
@precisionOverride({torch.half: 0.002, torch.float: 1e-4})
def test_cudnn_convolution_add_relu(self, device, dtype):
for batch, groups, image_size, kernel_size, memory_format in \
product((1, 2, 3),
(1, 2, 4),
((1, 1), (8, 8)),
((1, 1), (3, 3)),
(torch.channels_last, torch.contiguous_format)):
if image_size[0] < kernel_size[0]:
continue
inp = torch.rand(batch, groups, *image_size, dtype=dtype, device=device)
w = torch.randn(8, groups, *kernel_size, dtype=dtype, device=device)
conv2d_out = torch.conv2d(inp, w, None, (1, 1), (0, 0), (1, 1), 1)
alpha = 2.0
z = torch.randn_like(conv2d_out)
inp = inp.to(memory_format=memory_format)
w = w.to(memory_format=memory_format)
z = z.to(memory_format=memory_format)
cudnn_out = torch.cudnn_convolution_add_relu(inp, w, z, alpha, None, (1, 1), (0, 0), (1, 1), 1)
self.assertTrue(cudnn_out.is_contiguous(memory_format=memory_format))
self.assertEqual(F.relu(conv2d_out + alpha * z), cudnn_out)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7603)
def test_convert_conv2d_weight_memory_format(self, device):
input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, device=device)
model = nn.Sequential(
nn.Conv2d(8, 4, 3),
nn.BatchNorm2d(4)).to(device).float()
for memory_format in [torch.channels_last, torch.contiguous_format]:
model = nn.utils.convert_conv2d_weight_memory_format(model, memory_format)
out = model(input)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
model = nn.Sequential(
nn.ConvTranspose2d(8, 4, 3),
nn.BatchNorm2d(4)).to(device).float()
for memory_format in [torch.channels_last, torch.contiguous_format]:
model = nn.utils.convert_conv2d_weight_memory_format(model, memory_format)
out = model(input)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
def test_conv_double_backward_strided_with_3D_input_and_weight(self, device):
input = torch.randn(2, 3, 6, device=device)
weight = torch.randn(3, 3, 3, device=device)
bias = torch.randn(3, device=device)
stride = (2,)
padding = (1,)
dilation = (1,)
transposed = False
output_padding = (0,)
groups = 1
output = torch.ops.aten.convolution(input, weight, bias, stride, padding, dilation, transposed,
output_padding, groups)
ggI = torch.randn(input.shape, device=device)
ggW = torch.randn(weight.shape, device=device)
ggB = torch.randn(bias.shape, device=device)
gO = torch.randn(output.shape, device=device)
output_mask = [True, True, True]
grad_grad_output, grad_input, grad_weight = torch.ops.aten._convolution_double_backward(
ggI, ggW, ggB, gO, weight, input, stride, padding, dilation, transposed,
output_padding, groups, output_mask)
self.assertEqual(grad_grad_output.shape, gO.shape)
self.assertEqual(grad_input.shape, input.shape)
self.assertEqual(grad_weight.shape, weight.shape)
def test_nll_loss_mismatched_batch(self, device):
x = torch.randn((10, 3), requires_grad=True, device=device)
t = torch.zeros((3,), dtype=torch.int64, device=device)
with self.assertRaisesRegex(ValueError, 'Expected.*batch_size'):
F.nll_loss(x, t)
def test_nll_loss_out_of_bounds_ignore_index(self, device):
x = torch.randn(6, 3, requires_grad=True, device=device)
t = torch.tensor([0, 1, 255, 0, 1, 2], dtype=torch.int64, device=device)
for reduction in ['mean', 'none']:
F.nll_loss(x, t, ignore_index=255, reduction=reduction).sum().backward()
def test_nll_loss_invalid_target_dim(self, device):
x = torch.randn((10, 3), device=device)
t = torch.zeros((10, 2), dtype=torch.int64, device=device)
with self.assertRaisesRegex(RuntimeError, "1D target tensor expected"):
F.nll_loss(x, t)
def test_nll_loss_invalid_weights(self, device):
x = torch.randn((10, 3), device=device)
t = torch.empty(10, dtype=torch.int64, device=device).random_(0, 3)
invalid_weights = [
torch.randn(4, device=device),
torch.randn(1, 3, device=device),
]
msg = "weight tensor should be defined either for all 3 classes or no classes"
for weight in invalid_weights:
with self.assertRaisesRegex(RuntimeError, msg):
F.nll_loss(x, t, weight=weight)
def _nll_loss_helper(self, input_size, reduction, expected, device):
input = torch.rand(input_size, requires_grad=True, device=device)
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.randint(num_channels, target_size, device=device)
output = F.nll_loss(input, target, reduction=reduction)
output.sum().backward()
self.assertEqual(input.grad.size(), input.size())
def test_nll_loss_empty_tensor_reduction_none(self, device):
self._nll_loss_helper([0, 3], "none", torch.empty([0], device=device), device)
self._nll_loss_helper([0, 3, 5, 7], "none", torch.empty([0, 5, 7], device=device), device)
self._nll_loss_helper([2, 3, 0, 7], "none", torch.empty([2, 0, 7], device=device), device)
self._nll_loss_helper([2, 3, 5, 0], "none", torch.empty([2, 5, 0], device=device), device)
self._nll_loss_helper([2, 3, 5, 7, 0], "none", torch.empty([2, 5, 7, 0], device=device), device)
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_nll_loss_empty_tensor_reduction_mean(self, device):
nan = torch.tensor(float('nan'), device=device)
self._nll_loss_helper([0, 3], "mean", nan, device)
self._nll_loss_helper([0, 3, 5, 7], "mean", nan, device)
self._nll_loss_helper([2, 3, 0, 7], "mean", nan, device)
self._nll_loss_helper([2, 3, 5, 0], "mean", nan, device)
self._nll_loss_helper([2, 3, 5, 7, 0], "mean", nan, device)
def test_nll_loss_empty_tensor_reduction_sum(self, device):
zero = torch.tensor(0, device=device)
self._nll_loss_helper([0, 3], "sum", zero, device)
self._nll_loss_helper([0, 3, 5, 7], "sum", zero, device)
self._nll_loss_helper([2, 3, 0, 7], "sum", zero, device)
self._nll_loss_helper([2, 3, 5, 0], "sum", zero, device)
self._nll_loss_helper([2, 3, 5, 7, 0], "sum", zero, device)
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_nll_loss_total_weight_is_zero(self, device):
def helper(input_size):
input = torch.ones(input_size, requires_grad=True, device=device)
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.zeros(target_size, dtype=torch.long, device=device)
weight = torch.zeros([num_channels], device=device)
self.assertEqual(F.nll_loss(input, target, weight, reduction="sum").item(), 0.)
self.assertEqual(F.nll_loss(input, target, weight, reduction="mean").item(), float("nan"))
self.assertEqual(F.nll_loss(input, target, weight, reduction="none"), torch.zeros(target.shape, device=device))
helper([2, 3])
helper([2, 3, 5, 7])
helper([2, 3, 5, 7, 9])
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_nll_loss_all_ignored(self, device):
def helper(input_size):
input = torch.ones(input_size, device=device)
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.zeros(target_size, dtype=torch.long, device=device)
self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction="sum").item(), 0)
self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction="mean").item(), float("nan"))
self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction="none"), torch.zeros(target.shape, device=device))
helper([2, 3])
helper([2, 3, 5, 7])
helper([2, 3, 5, 7, 9])
def test_nll_loss_byte_target_matches_long(self, device):
N, C = 10, 4
input = torch.randn(N, C, device=device, requires_grad=True)
target = torch.empty(N, dtype=torch.long, device=device).random_(0, C)
def compute_result_and_gradient(reduction, target_dtype):
input_ = input.detach()
input_.requires_grad_()
prob = F.log_softmax(input_, dim=-1)
loss = nn.NLLLoss(reduction=reduction)
result = loss(prob, target.to(target_dtype))
result.sum().backward()
return result, input_.grad
for reduction in ["none", "mean", "sum"]:
result_long, grad_long = compute_result_and_gradient(reduction, torch.long)
result_byte, grad_byte = compute_result_and_gradient(reduction, torch.uint8)
self.assertEqual(result_long, result_byte)
self.assertEqual(grad_long, grad_byte)
def test_cross_entropy_loss_prob_target_all_reductions(self, device):
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
weight = torch.randn(C, device=device).abs()
for reduction, w in product(['none', 'mean', 'sum'], [None, weight]):
m = torch.nn.CrossEntropyLoss(weight=w, reduction=reduction)
output = m(input, target)
output_ref = loss_reference_fns['CrossEntropyLoss'](
input, target, reduction=reduction, weight=w)
self.assertEqual(output, output_ref)
def test_cross_entropy_loss_prob_target_unit_weights(self, device):
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
for reduction in ['none', 'mean', 'sum']:
m = torch.nn.CrossEntropyLoss(reduction=reduction)
unit_weight = torch.ones(C, device=device, dtype=target.dtype)
m_unit = torch.nn.CrossEntropyLoss(weight=unit_weight, reduction=reduction)
output = m(input, target)
output_unit = m_unit(input, target)
self.assertEqual(output, output_unit)
def test_cross_entropy_loss_index_target_unit_weights(self, device):
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
for reduction in ['none', 'mean', 'sum']:
m = torch.nn.CrossEntropyLoss(reduction=reduction)
unit_weight = torch.ones(C, device=device, dtype=input.dtype)
m_unit = torch.nn.CrossEntropyLoss(weight=unit_weight, reduction=reduction)
output = m(input, target)
output_unit = m_unit(input, target)
self.assertEqual(output, output_unit)
def test_cross_entropy_loss_one_hot_target(self, device):
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
weight = torch.randn(C, device=device).abs()
target_one_hot = F.one_hot(target, num_classes=C).to(input.dtype)
target_one_hot = target_one_hot.permute(0, -1, *range(1, target_one_hot.dim() - 1))
for reduction, w in product(['none', 'mean', 'sum'], [None, weight]):
if reduction == 'mean' and weight is not None:
continue
m = torch.nn.CrossEntropyLoss(weight=w, reduction=reduction)
output = m(input, target)
output_one_hot = m(input, target_one_hot)
self.assertEqual(output, output_one_hot)
def test_cross_entropy_label_smoothing_errors(self, device):
N, C = 3, 4
input_args = [
(torch.randn((N, C), device=device), torch.arange(0, C, device=device)),
(torch.randn((N, C), device=device), torch.randn(N, C, device=device))
]
for input_arg in input_args:
loss = nn.CrossEntropyLoss(label_smoothing=1.2)
with self.assertRaisesRegex(RuntimeError,
r"label_smoothing must be between 0\.0"):
loss(*input_arg)
def test_cross_entropy_label_smoothing_consistent_index_target_and_probs(self, device):
N, C = 10, 4
ks = range(5)
reductions = ['none', 'mean', 'sum']
label_smoothings = [0.05, 0.15]
for k, reduction, label_smoothing in product(ks, reductions, label_smoothings):
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
target_proba = F.one_hot(target, num_classes=C)
target_proba = target_proba.permute(0, -1, *range(1, target_proba.dim() - 1))
target_mask = (target_proba == 1)
target_proba = target_proba.to(dtype=input.dtype)
target_proba.masked_fill_(target_mask, 1 - label_smoothing + label_smoothing / C)
target_proba.masked_fill_(~target_mask, label_smoothing / C)
loss = nn.CrossEntropyLoss(reduction=reduction)
output_with_prob = loss(input, target_proba)
loss = nn.CrossEntropyLoss(
reduction=reduction, label_smoothing=label_smoothing)
output_with_index = loss(input, target)
self.assertEqual(output_with_prob, output_with_index,
rtol=1e-07, atol=1e-05)
def test_cross_entropy_label_smoothing_with_probs(self, device):
N, C = 10, 4
ks = range(5)
reductions = ['none', 'mean', 'sum']
label_smoothings = [0.05, 0.15]
for k, label_smoothing in product(ks, label_smoothings):
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = F.log_softmax(torch.randn(N, C, *other_dims, device=device), dim=1)
for reduction in reductions:
loss = nn.CrossEntropyLoss(reduction=reduction, label_smoothing=label_smoothing)
output_with_smoothing = loss(input, target)
target_with_smoothing = target * (1 - label_smoothing) + label_smoothing / C
loss = nn.CrossEntropyLoss(reduction=reduction)
output_with_manual_smoothing = loss(input, target_with_smoothing)
self.assertEqual(output_with_smoothing, output_with_manual_smoothing)
def test_cross_entropy_label_smoothing_weight_ignore_indices(self, device):
reductions = ['none', 'sum', 'mean']
label_smoothings = [0.05, 0.15]
weight = torch.tensor([0.3, 0.6], device=device)
inp1 = torch.tensor([[0.3, 0.4], [1, 2]], device=device)
inp2 = torch.tensor([[0.3, 0.6], [1, 2]], device=device)
targ_default_ignore_index = torch.tensor([-100, 1], device=device)
targ_negative_ignore_index = torch.tensor([-2, 1], device=device)
targ_positive_ignore_index = torch.tensor([2, 1], device=device)
for reduction, label_smoothing, weight in product(reductions, label_smoothings, (None, weight)):
def check_equal(loss, inp_targ_1, inp_targ_2):
inp1, targ1 = inp_targ_1
inp2, targ2 = inp_targ_2
l1 = loss(inp1, targ1)
l2 = loss(inp2, targ2)
self.assertEqual(l1, l2)
loss = nn.CrossEntropyLoss(reduction=reduction,
label_smoothing=label_smoothing,
weight=weight)
check_equal(loss, (inp1, targ_default_ignore_index), (inp2, targ_default_ignore_index))
if reduction != 'none':
check_equal(loss, (inp1, targ_default_ignore_index), (inp2[1:], targ_default_ignore_index[1:]))
# negative ignore_index
loss = nn.CrossEntropyLoss(reduction=reduction,
label_smoothing=label_smoothing,
ignore_index=-2,
weight=weight)
check_equal(loss, (inp1, targ_negative_ignore_index), (inp2, targ_negative_ignore_index))
if reduction != 'none':
# Check that we correctly tally the denominator for `mean`
# i.e. we don't count the ignored_idx at all.
check_equal(loss, (inp1, targ_negative_ignore_index), (inp2[1:], targ_negative_ignore_index[1:]))
loss = nn.CrossEntropyLoss(reduction=reduction,
label_smoothing=label_smoothing,
ignore_index=2,
weight=weight)
check_equal(loss, (inp1, targ_positive_ignore_index), (inp2, targ_positive_ignore_index))
if reduction != 'none':
check_equal(loss, (inp1, targ_positive_ignore_index), (inp2[1:], targ_positive_ignore_index[1:]))
def test_softshrink_negative(self, device):
input = torch.randn(5, device=device, requires_grad=True)
m = torch.nn.Softshrink(-1)
with self.assertRaisesRegex(RuntimeError,
r'lambda must be greater or equal to 0, but found to be -1\.'):
m(input)
def test_fold(self, device):
def test_dtype(fn, input, dtype):
input = input.detach().clone().to(dtype=dtype).requires_grad_(True)
input2 = input.detach().clone().float().requires_grad_(True)
out = fn(input)
out.sum().backward()
out2 = fn(input2)
out2.sum().backward()
self.assertEqual(out.dtype, dtype)
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(out, out2.to(dtype=dtype), atol=0.05, rtol=0)
self.assertEqual(input.grad, input2.grad.to(dtype=dtype))
def func(x):
return F.fold(x, output_size=(4, 5), kernel_size=(2, 2))
seeds = (44, 83, 71, 25, 999)
for sd in seeds:
torch.manual_seed(sd)
x = torch.randn(1, 12, 12, device=device, requires_grad=True)
gradcheck(func, [x], check_forward_ad=True)
gradgradcheck(func, [x], check_fwd_over_rev=True)
if device == 'cpu':
test_dtype(func, x, torch.bfloat16)
def test_logsigmoid_out(self, device):
# this isn't actually documented, but was broken previously:
x = torch.randn(2, 3, device=device).t()
empty_out = torch.randn(0, device=device)
self.assertEqual(F.logsigmoid(x), F.logsigmoid(x, out=empty_out))
noncontig_out = torch.randn(2, 3, device=device).t()
self.assertEqual(F.logsigmoid(x), F.logsigmoid(x, out=noncontig_out))
def test_maxpool3d_non_square_backward(self, device):
for dim in (2, 3, 4):
shape = tuple(32 if i != dim else 256 for i in range(4))
x = torch.randn(shape, device=device, requires_grad=True)
F.max_pool3d(x, kernel_size=(1, 1, 1)).sum().backward()
self.assertEqual(x.grad, torch.ones_like(x.grad))
def test_clip_grad_norm_error_if_nonfinite(self, device):
norms_pos = [0.1, 1, 2, 3.5, inf]
norms_neg = [-0.1, -1, -2, -3.5]
norms_except_0 = norms_pos + norms_neg
norms_all = norms_except_0 + [0]
# Each entry in test_cases has the following values, in this order:
#
# grad_only_one_elem If True, only one element of the parameter's
# multiplication
#
# norms_nonfinite Norm types that should produce nonfinite total norm
#
# norms_finite Norm types that should produce finite total norm
test_cases = [
# Test errors from an infinite grad
(False, False, [inf, -inf], norms_except_0, [0]),
(False, True, [inf, -inf], norms_pos, norms_neg + [0]),
(True, False, [inf, -inf], norms_pos, norms_neg + [0]),
(True, True, [inf, -inf], norms_pos, norms_neg + [0]),
# Test errors from a NaN grad
(False, False, [nan], norms_except_0, [0]),
(False, True, [nan], norms_except_0, [0]),
(True, False, [nan], norms_except_0, [0]),
(True, True, [nan], norms_except_0, [0]),
# Test a grad that should never error
(False, False, [2e22, -2e22], [], norms_all),
(False, True, [2e22, -2e22], [], norms_all),
(True, False, [2e22, -2e22], [], norms_all),
(True, True, [2e22, -2e22], [], norms_all),
# Test a grad that will overflow to inf for only some norm orders
(False, False, [2e200, -2e200], [3.5, 2, -2, -3.5], [inf, 1, 0.1, 0, -1, -0.1]),
(False, True, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
(True, False, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
(True, True, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
]
def gen_parameters(scalar, grad_only_one_elem, prefix_finite_grad_param):
param = torch.ones(10, dtype=torch.float64, device=device, requires_grad=True)
if grad_only_one_elem:
param[1].mul(scalar).sum().backward()
else:
param.mul(scalar).sum().backward()
if prefix_finite_grad_param:
prefix_param = torch.ones(1, dtype=torch.float64, device=device, requires_grad=True)
prefix_param.mul(1).sum().backward()
parameters = [prefix_param, param]
else:
parameters = [param]
return parameters
def run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, is_norm_nonfinite):
msg = (
f'norm_type: {norm_type}, ',
f'error_if_nonfinite: {error_if_nonfinite}, '
f'scalar: {scalar}, '
f'grad_only_one_elem: {grad_only_one_elem}, '
f'prefix_finite_grad_param: {prefix_finite_grad_param}, '
f'is_norm_nonfinite: {is_norm_nonfinite}')
parameters = gen_parameters(scalar, grad_only_one_elem, prefix_finite_grad_param)
# Should only throw an error if the total norm is expected to be
# nonfinite and `error_if_nonfinite=True`
if is_norm_nonfinite and error_if_nonfinite:
error_msg = f'The total norm of order {float(norm_type)} for gradients'
grads_before = [p.grad.clone() for p in parameters]
with self.assertRaisesRegex(RuntimeError, error_msg, msg=msg):
clip_grad_norm_(parameters, 1, norm_type=norm_type, error_if_nonfinite=True)
# Grad should not change if error is thrown
grads_after = [p.grad for p in parameters]
self.assertEqual(grads_before, grads_after, msg=msg)
else:
clip_grad_norm_(parameters, 1, norm_type=norm_type, error_if_nonfinite=error_if_nonfinite)
for grad_only_one_elem, prefix_finite_grad_param, scalars, norms_nonfinite, norms_finite in test_cases:
for error_if_nonfinite in [False, True]:
for norm_type, scalar in product(norms_nonfinite, scalars):
run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, True)
for norm_type, scalar in product(norms_finite, scalars):
run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, False)
@onlyCUDA
@deviceCountAtLeast(2)
def test_clip_grad_norm_multi_device(self, devices):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.layer1 = nn.Linear(10, 10)
self.layer2 = nn.Linear(10, 10)
test_model = TestModel()
test_model.layer1.to(devices[0])
test_model.layer2.to(devices[1])
ref_model = TestModel().to(devices[0])
for norm_type in [2., math.inf]:
for p in test_model.parameters():
p.grad = torch.ones_like(p)
for p in ref_model.parameters():
p.grad = torch.ones_like(p)
norm = clip_grad_norm_(test_model.parameters(), 0.5, norm_type=norm_type)
expected = clip_grad_norm_(ref_model.parameters(), 0.5, norm_type=norm_type)
self.assertEqual(norm, expected)
for p, pe in zip(test_model.parameters(), ref_model.parameters()):
self.assertEqual(p.grad.to(devices[0]), pe.grad)
def test_elu_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.elu(x, inplace=True)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.elu_(x)
# Merge into OpInfo?
@onlyNativeDeviceTypes
def test_elu_inplace_with_neg_alpha(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.elu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.celu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
@expectedFailureMeta # https://github.com/pytorch/pytorch/issues/54897
def test_hardswish_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.hardswish(x, inplace=True)
def test_silu_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.silu(x, inplace=True)
@onlyNativeDeviceTypes
def test_mish_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.mish(x, inplace=True)
def test_softplus_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.softplus(x, out=x)
def test_softplus_low_threshold(self, device):
# Ensure gradients are computed correctly with a low threshold.
model = torch.nn.Softplus(threshold=1).double()
input = torch.tensor(0.9, device=device, dtype=torch.double,
requires_grad=True)
output = model(input)
torch.autograd.gradcheck(model, input)
def test_softshrink_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.softshrink(x, out=x)
def test_leaky_relu_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.leaky_relu(x, inplace=True)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.leaky_relu_(x)
# Merge into OpInfo?
def test_leaky_relu_inplace_with_neg_slope(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), -2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
# Merge into OpInfo?
def test_leaky_relu_inplace_with_zero_slope(self, device):
a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)
b.backward(torch.ones(3, device=device))
expected = torch.tensor([0., 0., 1.], device=device)
self.assertEqual(a.grad, expected)
a_bf16 = torch.tensor([-2., 0., 2.], device=device, dtype=torch.bfloat16, requires_grad=True)
b_bf16 = torch.nn.functional.leaky_relu_(a_bf16.clone(), 0.0)
b_bf16.backward(torch.ones(3, device=device))
expected_bf16 = torch.tensor([0., 0., 1.], device=device, dtype=torch.bfloat16)
self.assertEqual(a_bf16.grad, expected_bf16)
def test_threshold_inplace_overlap(self, device):
# Inplace threshold is okay, because it is idempotent
x = torch.randn((1, 6), device=device).expand((6, 6))
F.threshold(x, 0.5, 0.5, inplace=True)
F.threshold_(x, 0.5, 0.5)
@onlyNativeDeviceTypes
def test_triplet_margin_with_distance_loss_default_parity(self, device):
# Test for `nn.TripletMarginWithDistanceLoss` and
# `F.triplet_margin_with_distance_loss`. Checks
# for parity against the respective non-distance-agnostic
# implementations of triplet margin loss (``nn.TripletMarginLoss`
# and `F.triplet_margin_loss`) under *default args*.
for extra_args in \
itertools.product((0.5, 1, 1.5), (True, False), ('none', 'mean', 'sum')):
kwargs = {'margin': extra_args[0], 'swap': extra_args[1], 'reduction': extra_args[2]}
anchor = torch.randn(5, 10, device=device, requires_grad=True)
positive = torch.randn(5, 10, device=device, requires_grad=True)
negative = torch.randn(5, 10, device=device, requires_grad=True)
# Test forward, functional
expected = F.triplet_margin_loss(anchor, positive, negative, **kwargs)
actual = F.triplet_margin_with_distance_loss(anchor, positive, negative, **kwargs)
self.assertEqual(actual, expected, rtol=1e-6, atol=1e-6)
# Test forward, module
loss_ref = nn.TripletMarginLoss(**kwargs)
loss_op = nn.TripletMarginWithDistanceLoss(**kwargs)
self.assertEqual(loss_op(anchor, positive, negative),
loss_ref(anchor, positive, negative),
rtol=1e-6, atol=1e-6)
# Test backward
self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(
a, p, n, **kwargs), (anchor, positive, negative)))
self.assertTrue(gradcheck(lambda a, p, n: loss_op(a, p, n),
(anchor, positive, negative)))
@onlyNativeDeviceTypes
def test_triplet_margin_with_distance_loss(self, device):
# Test for parity between `nn.TripletMarginWithDistanceLoss` and
# `F.triplet_margin_with_distance_loss`.
pairwise_distance = nn.PairwiseDistance()
def cosine_distance(x, y):
return 1.0 - F.cosine_similarity(x, y)
distance_functions = (pairwise_distance, cosine_distance,
lambda x, y: 1.0 - F.cosine_similarity(x, y))
reductions = ('mean', 'none', 'sum')
margins = (1.0, 1.5, 0.5)
swaps = (True, False)
for distance_fn, reduction, margin, swap \
in itertools.product(distance_functions, reductions, margins, swaps):
anchor = torch.randn(5, 10, device=device, requires_grad=True)
positive = torch.randn(5, 10, device=device, requires_grad=True)
negative = torch.randn(5, 10, device=device, requires_grad=True)
# Test backward
self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(
a, p, n, distance_function=distance_fn, reduction=reduction, margin=margin, swap=swap),
(anchor, positive, negative)))
loss_op = nn.TripletMarginWithDistanceLoss(distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
self.assertTrue(gradcheck(lambda a, p, n: loss_op(
a, p, n), (anchor, positive, negative)))
traced_loss_op = torch.jit.trace(loss_op, (anchor, positive, negative))
self.assertTrue(gradcheck(lambda a, p, n: traced_loss_op(
a, p, n), (anchor, positive, negative)))
# Test forward parity
functional = F.triplet_margin_with_distance_loss(anchor, positive, negative,
distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
modular = loss_op(anchor, positive, negative)
traced = traced_loss_op(anchor, positive, negative)
self.assertEqual(functional, modular, atol=1e-6, rtol=1e-6)
self.assertEqual(traced, modular, atol=1e-6, rtol=1e-6)
def test_to_complex(self, device):
m = nn.Linear(3, 5).to(device)
self.assertIs(m, m.to(device))
m.to(torch.cfloat)
self.assertIs(m.weight.dtype, torch.cfloat)
m.to(torch.cdouble)
self.assertIs(m.weight.dtype, torch.cdouble)
m.to(torch.float)
self.assertIs(m.weight.dtype, torch.float)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
m.to(torch.cfloat)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("Complex modules are a new feature" in str(w[-1].message))
@skipMeta
@dtypes(torch.float32, torch.float64)
def test_module_to_empty(self, device, dtype):
class MyModule(nn.Module):
def __init__(self, in_features, out_features, device=None, dtype=None):
super().__init__()
factory_kwargs = {"device": device, "dtype": dtype}
self.weight = nn.Parameter(torch.randn(in_features, out_features, **factory_kwargs))
def forward(self, x):
return x @ self.weight
# Test meta module instantiation.
input = torch.randn(5, 10, device=device, dtype=dtype)
m = MyModule(10, 1, device='meta', dtype=dtype)
m(input)
# Test materializing meta module on a real device.
m.to_empty(device=device)
m(input)
with torch.no_grad():
torch.nn.init.kaiming_uniform_(m.weight)
m(input)
# Test creating meta module from materialized module.
m.to_empty(device='meta')
m(input)
@skipMeta
def test_skip_init(self, device):
torch.manual_seed(1)
m_initialized = torch.nn.Linear(5, 1)
m_initialized.to(device)
torch.manual_seed(1)
m_uninitialized = torch.nn.utils.skip_init(torch.nn.Linear, 5, 1, device=device)
self.assertEqual(m_initialized.weight.device, m_uninitialized.weight.device)
self.assertFalse(torch.allclose(m_initialized.weight, m_uninitialized.weight))
def test_adaptive_pool_invalid(self, device):
inp_1d = (torch.randn(1, 1, 1, device=device), (-1,))
inp_2d = (torch.randn(1, 1, 1, 1, device=device), (-1, 0))
inp_3d = (torch.randn(1, 1, 1, 1, 1, device=device), (-1, 0, 2))
module_input_dict = {torch.nn.AdaptiveAvgPool1d : inp_1d,
torch.nn.AdaptiveAvgPool2d : inp_2d,
torch.nn.AdaptiveAvgPool3d : inp_3d}
for m, inp in module_input_dict.items():
with self.assertRaisesRegex(RuntimeError,
r"elements of output_size must be greater than or equal to 0"):
t, output_size = inp
m(output_size)(t)
class TestModuleGlobalHooks(TestCase):
def tearDown(self):
nn.modules.module._global_backward_hooks = OrderedDict()
nn.modules.module._global_forward_hooks = OrderedDict()
nn.modules.module._global_forward_pre_hooks = OrderedDict()
def test_module_global_hooks(self):
module = nn.Sigmoid
module_1 = module()
module_2 = module()
module_3 = module()
input = torch.ones(5, 5, requires_grad=True)
counter = {
'forwards': 0,
'backwards': 0
}
def fw_hook(inc, h_module, input, output):
self.assertIsInstance(input, tuple)
self.assertTrue(isinstance(output, torch.Tensor))
self.assertTrue(isinstance(h_module, module))
self.assertEqual(input[0], torch.ones(5, 5))
self.assertEqual(output, torch.empty(5, 5).fill_(1 / (1 + 1 / math.e)))
counter['forwards'] += inc
def bw_hook(inc, h_module, grad_input, grad_output):
self.assertIsInstance(grad_input, tuple)
self.assertIsInstance(grad_output, tuple)
self.assertTrue(isinstance(h_module, module))
self.assertEqual(grad_output[0], torch.ones(5, 5) * 2)
counter['backwards'] += inc
test_fwd = nn.modules.module.register_module_forward_hook(lambda *args: fw_hook(1, *args))
module_1(input)
module_2(input)
module_3(input)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 0)
test_bwd = nn.modules.module.register_module_backward_hook(
lambda *args: bw_hook(1, *args))
output_1 = module_1(input)
output_2 = module_2(input)
output_3 = module_3(input)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 0)
output_1.backward(torch.ones(5, 5) * 2, retain_graph=True)
output_2.backward(torch.ones(5, 5) * 2, retain_graph=False)
output_3.backward(torch.ones(5, 5) * 2, retain_graph=False)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 3)
output_1.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 4)
test2_fwd = nn.modules.module.register_module_forward_hook(lambda *args: fw_hook(2, *args))
output = module_1(input)
output = module_2(input)
output = module_3(input)
self.assertEqual(counter['forwards'], 15)
self.assertEqual(counter['backwards'], 4)
test2_bwd = nn.modules.module.register_module_backward_hook(lambda *args: bw_hook(2, *args))
module_1(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 18)
self.assertEqual(counter['backwards'], 7)
test2_bwd.remove()
module_2(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 21)
self.assertEqual(counter['backwards'], 8)
test2_fwd.remove()
module_3(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 22)
self.assertEqual(counter['backwards'], 9)
test_fwd.remove()
test_bwd.remove()
def test_module_global_hook_invalid_outputs(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
def bw_fail1(self, grad_input, grad_output):
return grad_input[:-1]
def bw_fail2(self, grad_input, grad_output):
return grad_input + (torch.randn(2, 2),)
with nn.modules.module.register_module_backward_hook(bw_fail1):
with self.assertRaisesRegex(RuntimeError, 'got 0, but expected 1'):
module(input).sum().backward()
with nn.modules.module.register_module_backward_hook(bw_fail2):
with self.assertRaisesRegex(RuntimeError, 'got 2, but expected 1'):
module(input).sum().backward()
def test_module_backward_global_hook_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.sigmoid(input)
def bw_hook(module, grad_input, grad_output):
for grad in grad_input:
self.assertTrue(isinstance(grad, torch.Tensor))
for grad in grad_output:
self.assertTrue(isinstance(grad, torch.Tensor))
return tuple(gi * 2 for gi in grad_input)
nn.modules.module.register_module_backward_hook(bw_hook)
module(input).backward(torch.ones(5, 5))
expected_grad = sig_x * (1 - sig_x) * 2
self.assertEqual(input.grad, expected_grad)
def test_module_global_forward_preforward_hook_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.sigmoid(input)
def forward_pre_hook(m, input):
return torch.nn.functional.relu(input[0])
def forward_hook(m, input, output):
return -output
nn.modules.module.register_module_forward_pre_hook(forward_pre_hook)
nn.modules.module.register_module_forward_hook(forward_hook)
output = module(input)
expected_res = -torch.sigmoid(torch.nn.functional.relu(input))
self.assertEqual(output, expected_res)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
mask = (input > 0).double()
expected_grad = -sig_x * (1 - sig_x) * 2 * mask
self.assertEqual(input.grad, expected_grad)
def test_module_forward_preforward_hook_removable(self):
module = nn.Sigmoid()
def removable_hook(m, input):
nonlocal handle
handle.remove()
return input
def removable_hook_2(m, input):
nonlocal handle_2
handle_2.remove()
return input
handle = module.register_forward_pre_hook(removable_hook)
handle_2 = module.register_forward_pre_hook(removable_hook_2)
# make sure hook register is successful
self.assertEqual(len(handle.hooks_dict_ref()), 2)
self.assertEqual(len(handle_2.hooks_dict_ref()), 2)
input = torch.randn(2, 2)
output = module(input)
self.assertEqual(torch.sigmoid(input), output)
# make sure hook removal is successful
self.assertFalse(handle.id in handle.hooks_dict_ref())
self.assertFalse(handle_2.id in handle.hooks_dict_ref())
self.assertEqual(len(handle.hooks_dict_ref()), 0)
self.assertEqual(len(handle_2.hooks_dict_ref()), 0)
def test_module_forward_forward_hook_removable(self):
module = nn.Sigmoid()
def removable_hook(m, input, output):
nonlocal handle
handle.remove()
return output
def removable_hook_2(m, input, output):
nonlocal handle_2
handle_2.remove()
return output
handle = module.register_forward_hook(removable_hook)
handle_2 = module.register_forward_hook(removable_hook_2)
# make sure hook register is successful
self.assertEqual(len(handle.hooks_dict_ref()), 2)
self.assertEqual(len(handle_2.hooks_dict_ref()), 2)
input = torch.randn(2, 2)
output = module(input)
self.assertEqual(torch.sigmoid(input), output)
# make sure hook removal is successful
self.assertFalse(handle.id in handle.hooks_dict_ref())
self.assertFalse(handle_2.id in handle.hooks_dict_ref())
self.assertEqual(len(handle.hooks_dict_ref()), 0)
self.assertEqual(len(handle_2.hooks_dict_ref()), 0)
def test_global_and_local_hooks_order(self):
module = nn.Sigmoid()
global_forward_pre_called = False
local_forward_pre_called = False
global_forward_called = False
local_forward_called = False
global_backward_called = False
local_backward_called = False
def global_forward_pre_hook(m, input):
nonlocal global_forward_pre_called
self.assertTrue(not local_forward_pre_called)
global_forward_pre_called = True
return input
def local_forward_pre_hook(m, input):
nonlocal local_forward_pre_called
self.assertTrue(global_forward_pre_called)
local_forward_pre_called = True
return input
def global_forward_hook(m, input, output):
nonlocal global_forward_called
self.assertTrue(not local_forward_called)
global_forward_called = True
return output
def local_forward_hook(m, input, output):
nonlocal local_forward_called
self.assertTrue(global_forward_called)
local_forward_called = True
return output
def global_backward_hook(m, input, output):
nonlocal global_backward_called
self.assertTrue(not local_backward_called)
global_backward_called = True
return input
def local_backward_hook(m, input, output):
nonlocal local_backward_called
self.assertTrue(global_backward_called)
local_backward_called = True
return input
input = torch.randn(5, 5, requires_grad=True)
nn.modules.module.register_module_forward_pre_hook(global_forward_pre_hook)
module.register_forward_pre_hook(local_forward_pre_hook)
nn.modules.module.register_module_forward_hook(global_forward_hook)
module.register_forward_hook(local_forward_hook)
nn.modules.module.register_module_backward_hook(global_backward_hook)
module.register_backward_hook(local_backward_hook)
output = module(input)
self.assertTrue(local_forward_called and local_forward_pre_called and global_forward_called and global_forward_pre_called)
output.backward(torch.ones(5, 5), retain_graph=True)
self.assertTrue(local_backward_called and global_backward_called)
class LazyModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
pass
class TestLazyModules(TestCase):
@suppress_warnings
def test_lazy_module_parameter(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
state_dict = module.state_dict()
self.assertIsInstance(state_dict['test_param'], UninitializedParameter)
new_module = LazyModule()
# An error is raised when there is an attempt to replace an existing parameter
# with an uninitialized one
new_module.register_parameter('test_param', nn.Parameter(torch.ones(5, 5)))
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
new_module.load_state_dict(state_dict)
# Uninitialized parameters are overriden when the state dict to be loaded contains a valid one
new_module = LazyModule()
new_module.register_parameter('test_param', nn.Parameter(torch.ones(5, 5)))
module.load_state_dict(new_module.state_dict())
self.assertEqual(module.test_param, torch.ones((5, 5)))
# Uninitialized parameters are left unchanged
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
new_module = LazyModule()
new_module.register_parameter('test_param', UninitializedParameter())
module.load_state_dict(new_module.state_dict())
self.assertTrue(module.has_uninitialized_params())
@suppress_warnings
def test_lazy_module_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
state_dict = module.state_dict()
self.assertIsInstance(state_dict['test_buffer'], UninitializedBuffer)
new_module = LazyModule()
# An error is raised when there is an attempt to replace an existing parameter
# with an uninitialized one
new_module.register_buffer('test_buffer', torch.ones(5, 5))
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
new_module.load_state_dict(state_dict)
# Uninitialized parameters are overriden when the state dict to be loaded contains a valid one
new_module = LazyModule()
new_module.register_buffer('test_buffer', torch.ones(5, 5))
module.load_state_dict(new_module.state_dict())
self.assertEqual(module.test_buffer, torch.ones((5, 5)))
# Uninitialized parameters are left unchanged
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
new_module = LazyModule()
new_module.register_buffer('test_buffer', UninitializedBuffer())
module.load_state_dict(new_module.state_dict())
module.load_state_dict(new_module.state_dict())
self.assertTrue(module.has_uninitialized_params())
@suppress_warnings
def test_lazy_module_jit_param(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'run a forward pass'):
torch.jit.script(module)
@suppress_warnings
def test_lazy_module_jit_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'run a forward pass'):
torch.jit.script(module)
@suppress_warnings
def test_lazy_share_memory_param(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'share memory on an uninitialized'):
module.share_memory()
@suppress_warnings
def test_lazy_share_memory_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'share memory on an uninitialized'):
module.share_memory()
@suppress_warnings
def test_linear(self):
module = nn.LazyLinear(10)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(5, 5)
module(input)
self.assertIsInstance(module, nn.Linear)
self.assertNotIsInstance(module, nn.LazyLinear)
self.assertTrue(module.weight.shape == (10, 5))
self.assertTrue(module.bias.shape == (10,))
y = module(input)
self.assertTrue(torch.equal(torch.nn.functional.linear(input, module.weight, module.bias), y))
@suppress_warnings
def test_lazy_linear_pickle(self):
module = nn.LazyLinear(10)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, nn.LazyLinear)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(5, 5)
module(input) # fully materialized
new_module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(new_module, nn.Linear)
self.assertNotIsInstance(new_module, nn.LazyLinear)
self.assertTrue(new_module.weight.shape == (10, 5))
self.assertNotIsInstance(new_module.weight, UninitializedParameter)
self.assertTrue(new_module.bias.shape == (10,))
self.assertNotIsInstance(new_module.bias, UninitializedParameter)
@suppress_warnings
def test_linear_state(self):
module = nn.Linear(5, 10)
lazy_module = nn.LazyLinear(10)
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertTrue(lazy_module.weight.shape == (10, 5))
self.assertTrue(lazy_module.bias.shape == (10,))
module = nn.Linear(5, 10)
lazy_module = nn.LazyLinear(10)
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def _check_lazy_conv(self, cls, lazy_cls, func, init_args, input_shape,
expected_weight_shape, expected_bias_shape):
module = lazy_cls(*init_args)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(*input_shape)
module(input)
self.assertIsInstance(module, cls)
self.assertNotIsInstance(module, lazy_cls)
self.assertEqual(module.weight.shape, expected_weight_shape)
if module.bias is not None:
self.assertEqual(module.bias.shape, expected_bias_shape)
y = module(input)
self.assertTrue(torch.equal(func(input, module.weight, module.bias), y))
def _check_lazy_conv_pickle(self, cls, lazy_cls, init_args, input_shape,
expected_weight_shape, expected_bias_shape):
module = lazy_cls(*init_args)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, lazy_cls)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(*input_shape)
module(input)
new_module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(new_module, cls)
self.assertNotIsInstance(new_module, lazy_cls)
self.assertEqual(new_module.weight.shape, expected_weight_shape)
self.assertNotIsInstance(new_module.weight, UninitializedParameter)
if new_module.bias is not None:
self.assertEqual(new_module.bias.shape, expected_bias_shape)
self.assertNotIsInstance(new_module.bias, UninitializedParameter)
def _check_lazy_conv_state(self, gen_module, gen_lazy_module,
expected_weight_shape, expected_bias_shape):
module = gen_module()
lazy_module = gen_lazy_module()
lazy_module.load_state_dict(module.state_dict())
# Conv one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertEqual(lazy_module.weight.shape, expected_weight_shape)
if lazy_module.bias is not None:
self.assertEqual(lazy_module.bias.shape, expected_bias_shape)
module = gen_module()
lazy_module = gen_lazy_module()
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def test_lazy_pre_forward_hook(self):
class TestModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
def __init__(self):
super().__init__()
def initialize_parameters(self, input):
return None
def forward(self, input):
return input
def hook_function(module, input):
return input[0] + 1
module = TestModule()
module.register_forward_pre_hook(hook_function)
output = module(torch.zeros(2, 2))
self.assertEqual(output, torch.ones(2, 2))
def test_lazy_forward_hook(self):
class TestModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
def __init__(self):
super().__init__()
def initialize_parameters(self, input):
return None
def forward(self, input):
return input
def hook_function(module, input, output):
return input[0] + 1
module = TestModule()
module.register_forward_hook(hook_function)
output = module(torch.zeros(2, 2))
self.assertEqual(output, torch.ones(2, 2))
@suppress_warnings
def test_lazy_conv1d(self):
self._check_lazy_conv(nn.Conv1d, nn.LazyConv1d, torch.nn.functional.conv1d,
(32, 2), (192, 16, 50), (32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv1d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv1d, nn.LazyConv1d, (32, 2), (192, 16, 50),
(32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv1d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv1d(16, 32, 2),
lambda: nn.LazyConv1d(32, 2),
(32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv2d(self):
self._check_lazy_conv(nn.Conv2d, nn.LazyConv2d, torch.nn.functional.conv2d,
(32, 2), (192, 16, 8, 6), (32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv2d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv2d, nn.LazyConv2d, (32, 2), (192, 16, 8, 6),
(32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv2d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv2d(16, 32, 2),
lambda: nn.LazyConv2d(32, 2),
(32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d(self):
self._check_lazy_conv(nn.Conv3d, nn.LazyConv3d, torch.nn.functional.conv3d,
(32, 2), (192, 16, 8, 7, 6), (32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv3d, nn.LazyConv3d, (32, 2), (192, 16, 8, 7, 6),
(32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv3d(16, 32, 2),
lambda: nn.LazyConv3d(32, 2),
(32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transposed1d(self):
self._check_lazy_conv(nn.ConvTranspose1d, nn.LazyConvTranspose1d, torch.nn.functional.conv_transpose1d,
(32, 2), (192, 16, 50), (16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose1d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose1d, nn.LazyConvTranspose1d, (32, 2),
(192, 16, 50), (16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose1d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose1d(16, 32, 2),
lambda: nn.LazyConvTranspose1d(32, 2),
(16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d(self):
self._check_lazy_conv(nn.ConvTranspose2d, nn.LazyConvTranspose2d, torch.nn.functional.conv_transpose2d,
(32, 2), (192, 16, 8, 6), (16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose2d, nn.LazyConvTranspose2d, (32, 2),
(192, 16, 8, 6), (16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose2d(16, 32, 2),
lambda: nn.LazyConvTranspose2d(32, 2),
(16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d(self):
self._check_lazy_conv(nn.ConvTranspose3d, nn.LazyConvTranspose3d, torch.nn.functional.conv_transpose3d,
(32, 2), (192, 16, 8, 7, 6), (16, 32, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose3d, nn.LazyConvTranspose3d, (32, 2),
(192, 16, 8, 7, 6), (16, 32, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose3d(16, 32, 2),
lambda: nn.LazyConvTranspose3d(32, 2),
(16, 32, 2, 2, 2), (32,))
def _check_lazy_norm(self, cls, lazy_cls, input_shape):
for affine in [False, True]:
for track_running_stats in [False, True]:
lazy_module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
if affine:
self.assertIsInstance(lazy_module.weight, UninitializedParameter)
self.assertIsInstance(lazy_module.bias, UninitializedParameter)
if track_running_stats:
self.assertIsInstance(lazy_module.running_mean, UninitializedBuffer)
self.assertIsInstance(lazy_module.running_var, UninitializedBuffer)
input = torch.ones(*input_shape)
lazy_output = lazy_module(input)
self.assertIsInstance(lazy_module, cls)
self.assertNotIsInstance(lazy_module, lazy_cls)
num_features = input_shape[1]
module = cls(num_features, affine=affine, track_running_stats=track_running_stats)
expected_output = module(input)
self.assertEqual(lazy_output, expected_output)
if module.weight is not None:
self.assertEqual(lazy_module.weight.shape, module.weight.shape)
self.assertEqual(lazy_module.weight, module.weight)
if module.bias is not None:
self.assertEqual(lazy_module.bias.shape, module.bias.shape)
self.assertEqual(lazy_module.bias, module.bias)
if module.running_mean is not None:
self.assertEqual(lazy_module.running_mean.shape, module.running_mean.shape)
self.assertEqual(lazy_module.running_mean, module.running_mean)
if module.running_var is not None:
self.assertEqual(lazy_module.running_var.shape, module.running_var.shape)
self.assertEqual(lazy_module.running_var, module.running_var)
if module.num_batches_tracked is not None:
self.assertEqual(lazy_module.num_batches_tracked.shape, module.num_batches_tracked.shape)
self.assertEqual(lazy_module.num_batches_tracked, module.num_batches_tracked)
def _check_lazy_norm_pickle(self, cls, lazy_cls, input_shape):
for affine in [False, True]:
for track_running_stats in [False, True]:
module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, lazy_cls)
if affine:
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
if track_running_stats:
self.assertIsInstance(module.running_mean, UninitializedBuffer)
self.assertIsInstance(module.running_var, UninitializedBuffer)
input = torch.ones(*input_shape)
module(input) # fully materialized
module = pickle.loads(pickle.dumps(module))
self.assertNotIsInstance(module, lazy_cls)
self.assertIsInstance(module, cls)
if affine:
self.assertNotIsInstance(module.weight, UninitializedParameter)
self.assertNotIsInstance(module.bias, UninitializedParameter)
if track_running_stats:
self.assertNotIsInstance(module.running_mean, UninitializedBuffer)
self.assertNotIsInstance(module.running_var, UninitializedBuffer)
def _check_lazy_batchnorm_state(self, cls, lazy_cls):
module = cls(10)
lazy_module = lazy_cls(affine=True, track_running_stats=True)
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertEqual(lazy_module.weight.shape, (10,))
self.assertEqual(lazy_module.bias.shape, (10,))
self.assertEqual(lazy_module.running_mean.shape, (10,))
self.assertEqual(lazy_module.running_var.shape, (10,))
module = cls(10)
lazy_module = lazy_cls()
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def _check_lazy_instancenorm_state(self, cls, lazy_cls):
for affine in [False, True]:
for track_running_stats in [False, True]:
module = cls(10, affine=affine, track_running_stats=track_running_stats)
lazy_module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
lazy_module.load_state_dict(module.state_dict())
# InstanceNorm one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
if affine:
self.assertEqual(lazy_module.weight.shape, (10,))
self.assertEqual(lazy_module.bias.shape, (10,))
if track_running_stats:
self.assertEqual(lazy_module.running_mean.shape, (10,))
self.assertEqual(lazy_module.running_var.shape, (10,))
module = cls(10, affine=True, track_running_stats=True)
lazy_module = lazy_cls(affine=True, track_running_stats=True)
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def test_lazy_batchnorm1d(self):
self._check_lazy_norm(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 3, 6))
self._check_lazy_norm(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 6))
def test_lazy_batchnorm1d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 3, 6))
self._check_lazy_norm_pickle(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 6))
def test_lazy_batchnorm1d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm1d, nn.LazyBatchNorm1d)
self._check_lazy_batchnorm_state(nn.BatchNorm1d, nn.LazyBatchNorm1d)
def test_lazy_batchnorm2d(self):
self._check_lazy_norm(nn.BatchNorm2d, nn.LazyBatchNorm2d, (16, 3, 6, 7))
def test_lazy_batchnorm2d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm2d, nn.LazyBatchNorm2d, (16, 3, 6, 7))
def test_lazy_batchnorm2d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm2d, nn.LazyBatchNorm2d)
self._check_lazy_batchnorm_state(nn.BatchNorm2d, nn.LazyBatchNorm2d)
def test_lazy_batchnorm3d(self):
self._check_lazy_norm(nn.BatchNorm3d, nn.LazyBatchNorm3d, (16, 3, 6, 7, 8))
def test_lazy_batchnorm3d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm3d, nn.LazyBatchNorm3d, (16, 3, 6, 7, 8))
def test_lazy_batchnorm3d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm3d, nn.LazyBatchNorm3d)
self._check_lazy_batchnorm_state(nn.BatchNorm3d, nn.LazyBatchNorm3d)
def test_lazy_instancenorm1d(self):
self._check_lazy_norm(nn.InstanceNorm1d, nn.LazyInstanceNorm1d, (16, 3, 6))
def test_lazy_instancenorm1d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm1d, nn.LazyInstanceNorm1d, (16, 3, 6))
def test_lazy_instancenorm1d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm1d, nn.LazyInstanceNorm1d)
self._check_lazy_instancenorm_state(nn.InstanceNorm1d, nn.LazyInstanceNorm1d)
def test_lazy_instancenorm2d(self):
self._check_lazy_norm(nn.InstanceNorm2d, nn.LazyInstanceNorm2d, (16, 3, 6, 7))
def test_lazy_instancenorm2d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm2d, nn.LazyInstanceNorm2d, (16, 3, 6, 7))
def test_lazy_instancenorm2d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm2d, nn.LazyInstanceNorm2d)
self._check_lazy_instancenorm_state(nn.InstanceNorm2d, nn.LazyInstanceNorm2d)
def test_lazy_instancenorm3d(self):
self._check_lazy_norm(nn.InstanceNorm3d, nn.LazyInstanceNorm3d, (16, 3, 6, 7, 8))
def test_lazy_instancenorm3d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm3d, nn.LazyInstanceNorm3d, (16, 3, 6, 7, 8))
def test_lazy_instancenorm3d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm3d, nn.LazyInstanceNorm3d)
self._check_lazy_instancenorm_state(nn.InstanceNorm3d, nn.LazyInstanceNorm3d)
@suppress_warnings
def test_materialize_dtype(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.test_param.materialize(10)
self.assertTrue(module.test_param.dtype == torch.float64)
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.half()
module.test_param.materialize(10)
self.assertTrue(module.test_param.dtype == torch.float16)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@suppress_warnings
def test_materialize_device(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.test_param.materialize(10)
self.assertTrue(module.test_param.device.type == 'cpu')
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.cuda()
module.test_param.materialize(10)
self.assertTrue(module.test_param.device.type == 'cuda')
@suppress_warnings
def test_chained_initialization(self):
class MyNetwork(torch.nn.Module):
def __init__(self):
super(MyNetwork, self).__init__()
self.linear_1 = torch.nn.LazyLinear(15)
self.linear_2 = torch.nn.LazyLinear(10)
def forward(self, x):
y = self.linear_1(x)
return self.linear_2(y)
net = MyNetwork()
net(torch.ones(5, 10))
self.assertTrue(net.linear_1.weight.shape == (15, 10))
self.assertTrue(net.linear_1.bias.shape == (15,))
self.assertTrue(net.linear_2.weight.shape == (10, 15))
self.assertTrue(net.linear_2.bias.shape == (10,))
@suppress_warnings
def test_optimizer_pass(self):
optimizers = [torch.optim.Adadelta, torch.optim.Adagrad, torch.optim.Adam,
torch.optim.AdamW, torch.optim.Adamax,
torch.optim.ASGD, torch.optim.SGD, torch.optim.Rprop,
torch.optim.RMSprop, torch.optim.LBFGS]
def run_step(module, optim):
self.assertIsInstance(optim.param_groups[0]['params'][0], UninitializedParameter)
module.test_param.materialize(10)
self.assertIsInstance(optim.param_groups[0]['params'][0], Parameter)
self.assertNotIsInstance(optim.param_groups[0]['params'][0], UninitializedParameter)
for p in module.parameters():
p.grad = torch.rand_like(p)
if isinstance(optim, torch.optim.LBFGS):
optim.step(lambda: 1.0)
else:
optim.step()
for optim_cls in optimizers:
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
if optim_cls is torch.optim.SGD:
optim = optim_cls(module.parameters(), lr=0.0)
elif optim_cls is torch.optim.Adagrad:
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
optim = optim_cls(module.parameters())
continue
else:
optim = optim_cls(module.parameters())
run_step(module, optim)
@suppress_warnings
def test_weight_norm(self):
m = nn.LazyLinear(7)
with self.assertRaisesRegex(ValueError, 'have uninitialized parameters.'):
m = torch.nn.utils.weight_norm(m)
@suppress_warnings
def test_spectral_norm(self):
m = nn.LazyLinear(7)
with self.assertRaisesRegex(ValueError, 'have uninitialized parameters.'):
m = torch.nn.utils.spectral_norm(m)
@suppress_warnings
def test_invalid_functions(self):
param = torch.nn.parameter.UninitializedParameter()
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
torch.empty_like(param)
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
torch.add(param, param)
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
param + param
class TestFunctionalPickle(TestCase):
# issue gh-38137
def test_pickle_softsign(self):
# Make sure it does not throw an exception
s = pickle.dumps(F.softsign)
class TestStateDictHooks(TestCase):
def test_load_state_dict_pre_hook(self):
m = nn.Linear(10, 10)
m_state_dict = m.state_dict()
m_load = nn.Linear(10, 10)
hook_called = 0
def hook_without_module(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.assertEqual(m_state_dict, state_dict)
nonlocal hook_called
hook_called += 1
def hook_with_module(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.assertEqual(m_state_dict, state_dict)
self.assertTrue(m_load is module)
nonlocal hook_called
hook_called += 1
hook_called = 0
m_load._register_load_state_dict_pre_hook(hook_without_module)
m_load.load_state_dict(m_state_dict)
self.assertEqual(1, hook_called)
hook_called = 0
m_load._register_load_state_dict_pre_hook(hook_with_module, True)
m_load.load_state_dict(m_state_dict)
self.assertEqual(2, hook_called)
def test_load_state_dict_module_pre_hook(self):
hook_called = 0
# Test with module instance method as hook
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.foo = torch.nn.Parameter(torch.rand(10))
def my_pre_load_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
assert [] == error_msgs
assert [] == unexpected_keys
assert [] == missing_keys
assert strict
nonlocal hook_called
hook_called += 1
def my_pre_load_hook_with_module(
self,
module,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
assert [] == error_msgs
assert [] == unexpected_keys
assert [] == missing_keys
assert strict
assert self is module
nonlocal hook_called
hook_called += 1
m = MyModule()
state_dict = m.state_dict()
hook_called = 0
m._register_load_state_dict_pre_hook(m.my_pre_load_hook)
m.load_state_dict(state_dict)
self.assertEqual(1, hook_called)
hook_called = 0
m._register_load_state_dict_pre_hook(m.my_pre_load_hook_with_module, True)
m.load_state_dict(state_dict)
self.assertEqual(2, hook_called)
instantiate_device_type_tests(TestNNDeviceType, globals())
instantiate_parametrized_tests(TestNN)
if __name__ == '__main__':
run_tests()
| true | true |
1c346385bc302ce04a1fb5d8618983f744e70098 | 23,858 | py | Python | huggingface-gpt2/ort_addon/ort_supplement/src/transformers/trainer_ort.py | kshama-msft/onnxruntime-training-examples | 0192a776e2fc62f1eeda3e3f1200cf40448302c1 | [
"MIT"
] | null | null | null | huggingface-gpt2/ort_addon/ort_supplement/src/transformers/trainer_ort.py | kshama-msft/onnxruntime-training-examples | 0192a776e2fc62f1eeda3e3f1200cf40448302c1 | [
"MIT"
] | null | null | null | huggingface-gpt2/ort_addon/ort_supplement/src/transformers/trainer_ort.py | kshama-msft/onnxruntime-training-examples | 0192a776e2fc62f1eeda3e3f1200cf40448302c1 | [
"MIT"
] | null | null | null | import json
import time
import logging
import os
import random
import re
import shutil
from contextlib import contextmanager
from pathlib import Path
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple
import numpy as np
import torch
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from tqdm import tqdm, trange
import onnxruntime
from onnxruntime.capi.ort_trainer import ORTTrainer, IODescription, ModelDescription
from onnxruntime.capi.ort_trainer import LossScaler
from .data.data_collator import DataCollator, DefaultDataCollator
from .modeling_utils import PreTrainedModel
from .training_args import TrainingArguments
from .trainer import PredictionOutput, TrainOutput, EvalPrediction, set_seed
from azureml.core.run import Run
# get the Azure ML run object
run = Run.get_context()
try:
from torch.utils.tensorboard import SummaryWriter
_has_tensorboard = True
except ImportError:
try:
from tensorboardX import SummaryWriter
_has_tensorboard = True
except ImportError:
_has_tensorboard = False
def is_tensorboard_available():
return _has_tensorboard
logger = logging.getLogger(__name__)
PREFIX_CHECKPOINT_DIR = "ort_checkpoint"
class linear_schedule_with_warmup():
def __init__(self, num_warmup_steps, num_training_steps, last_epoch=-1):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
self.num_warmup_steps = num_warmup_steps
self.num_training_steps = num_training_steps
self.last_epoch = last_epoch
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / float(max(1, self.num_warmup_steps))
return max(
0.0, float(self.num_training_steps - current_step) / float(max(1, self.num_training_steps - self.num_warmup_steps))
)
def get_lr_this_step(self, current_step, base_lr):
return self.lr_lambda(current_step) * base_lr
class OrtTrainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for Transformers.
"""
model: PreTrainedModel
args: TrainingArguments
data_collator: DataCollator
train_dataset: Optional[Dataset]
eval_dataset: Optional[Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
tb_writer: Optional["SummaryWriter"] = None
def __init__(
self,
model: PreTrainedModel,
args: TrainingArguments,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
):
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for Transformers.
Args:
prediction_loss_only:
(Optional) in evaluation and prediction, only return the loss
"""
self.model = model
self.args = args
if data_collator is not None:
self.data_collator = data_collator
else:
self.data_collator = DefaultDataCollator()
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
if is_tensorboard_available() and self.is_world_master():
self.tb_writer = SummaryWriter(log_dir=self.args.logging_dir)
if not is_tensorboard_available():
logger.warning(
"You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it."
)
set_seed(self.args.seed)
onnxruntime.set_seed(self.args.seed)
# Create output directory if needed
if self.is_world_master():
os.makedirs(self.args.output_dir, exist_ok=True)
torch.cuda.set_device(self.args.local_rank)
self.ort_model = self.to_ort_model(model, model.config, args)
def update_torch_model(self,):
if self.ort_model:
logger.info(
"Updating weights of torch model from ORT model."
)
ort_state_dict = self.ort_model.state_dict()
self.model.load_state_dict(ort_state_dict, strict=False)
else:
logger.warning(
"No ORT model found to update weights from, assuming torch model is up to date."
)
def gpt2_model_description(self,n_head, vocab_size, n_hidden, n_layer, n_ctx, batch_size):
logger.info("****num of head is: {}".format(n_head))
logger.info("****vocab size is: {}".format(vocab_size))
logger.info("****num of hidden layer is: {}".format(n_hidden))
logger.info("****num of layer is: {}".format(n_layer))
logger.info("****seq length is: {}".format(n_ctx))
input_ids_desc = IODescription('input_ids', [batch_size, n_ctx], torch.int64, num_classes = vocab_size)
labels_desc = IODescription('labels', [batch_size, n_ctx], torch.int64, num_classes = vocab_size)
loss_desc = IODescription('loss', [], torch.float32)
return ModelDescription([input_ids_desc, labels_desc],
[loss_desc])
def ort_trainer_learning_rate_description(self):
return IODescription('Learning_Rate', [1,], torch.float32)
def to_ort_model(self,model, config, args):
model_desc = self.gpt2_model_description(config.n_head, config.vocab_size, config.n_embd, config.n_layer, config.n_ctx, args.per_gpu_train_batch_size)
learning_rate_description = self.ort_trainer_learning_rate_description()
def map_optimizer_attributes(name):
no_decay_keys = ["bias", "gamma", "beta", "LayerNorm"]
no_decay = False
for no_decay_key in no_decay_keys:
if no_decay_key in name:
no_decay = True
break
if no_decay:
return {"alpha": 0.9, "beta": 0.999, "lambda": 0.0, "epsilon": args.adam_epsilon}
else:
return {"alpha": 0.9, "beta": 0.999, "lambda": args.weight_decay, "epsilon": args.adam_epsilon}
from onnxruntime.capi._pybind_state import set_cuda_device_id, set_arena_extend_strategy, ArenaExtendStrategy
set_arena_extend_strategy(ArenaExtendStrategy.kSameAsRequested)
set_cuda_device_id(self.args.local_rank)
model = ORTTrainer(model, None, model_desc, "AdamOptimizer",
map_optimizer_attributes,
learning_rate_description,
args.device,
gradient_accumulation_steps=args.gradient_accumulation_steps,
world_rank = self.args.world_rank,
world_size = self.args.world_size,
use_mixed_precision = self.args.fp16,
allreduce_post_accumulation = True,
_opset_version=12
)
logger.info("****************************Model converted to ORT")
return model
def get_train_dataloader(self) -> DataLoader:
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = (
RandomSampler(self.train_dataset) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset)
)
return DataLoader(
self.train_dataset,
batch_size=self.args.per_gpu_train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator.collate_batch,
)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
return DataLoader(
eval_dataset if eval_dataset is not None else self.eval_dataset,
batch_size=self.args.eval_batch_size,
shuffle=False,
collate_fn=self.data_collator.collate_batch,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
shuffle=False,
collate_fn=self.data_collator.collate_batch,
)
def train(self, model_path: Optional[str] = None):
"""
Main training entry point.
Args:
model_path:
(Optional) Local path to model if model to train has been instantiated from a local path
If present, we will try reloading the optimizer/scheduler states from there.
"""
train_dataloader = self.get_train_dataloader()
if self.args.max_steps > 0:
t_total = self.args.max_steps
num_train_epochs = (
self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1
)
else:
t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs)
num_train_epochs = self.args.num_train_epochs
scheduler = linear_schedule_with_warmup(num_warmup_steps=self.args.warmup_steps, num_training_steps=t_total)
loss_scaler = LossScaler(self.ort_model.loss_scale_input_name, True, up_scale_window=2000, loss_scale=float(1 << 20)) if self.args.fp16 else 1
model = self.ort_model
if self.tb_writer is not None:
self.tb_writer.add_text("args", self.args.to_json_string())
# Train!
if self.is_world_master():
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataloader.dataset))
logger.info(" Num Epochs = %d", num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", self.args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
self.args.train_batch_size
* self.args.gradient_accumulation_steps
* (self.args.world_size if self.args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path is not None:
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(model_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // self.args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // self.args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
global_step = 0
logger.info(" Starting fine-tuning.")
tr_loss = 0.0
logging_loss = 0.0
global_batch_train_start = time.time()
train_iterator = trange(
epochs_trained, int(num_train_epochs), desc="Epoch", disable=self.args.local_rank not in [-1, 0],
)
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=self.args.local_rank not in [-1, 0])
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if len(inputs['input_ids']) < self.args.per_gpu_train_batch_size:
#skip incomplete batch
logger.info('Skipping incomplete batch...')
continue
learning_rate = torch.tensor([scheduler.get_lr_this_step(global_step, base_lr = self.args.learning_rate)])
loss, all_finite = self._training_step(model, inputs, learning_rate, loss_scaler)
tr_loss += loss
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
len(epoch_iterator) <= self.args.gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
if self.args.fp16:
loss_scaler.update_loss_scale(all_finite.item())
global_step += 1
global_batch_train_duration = time.time() - global_batch_train_start
global_batch_train_start = time.time()
if self.args.local_rank in [-1, 0]:
if (self.args.logging_steps > 0 and global_step % self.args.logging_steps == 0) or (
global_step == 1 and self.args.logging_first_step
):
logs = {}
loss_avg = (tr_loss - logging_loss) / (self.args.logging_steps * self.args.gradient_accumulation_steps)
logs["learning_rate"] = learning_rate.item()
logs["loss"] = loss_avg
logs["global_step"] = global_step
logs["global_step_time"] = global_batch_train_duration
logging_loss = tr_loss
if self.tb_writer:
for k, v in logs.items():
self.tb_writer.add_scalar(k, v, global_step)
run.log(k,v)
epoch_iterator.write(json.dumps({**logs, **{"step": global_step}}))
if self.args.save_steps > 0 and global_step % self.args.save_steps == 0:
# In all cases (even distributed/parallel), self.model is always a reference
# to the model we want to save.
if hasattr(model, "module"):
assert model.module is self.ort_model
else:
assert model is self.ort_model
# Save model checkpoint
output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{global_step}")
self.save_model(output_dir)
# self._rotate_checkpoints()
if self.args.max_steps > 0 and global_step > self.args.max_steps:
epoch_iterator.close()
break
if self.args.max_steps > 0 and global_step > self.args.max_steps:
train_iterator.close()
break
if self.tb_writer:
self.tb_writer.close()
self.update_torch_model()
del(self.ort_model)
self.ort_model = None
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
return TrainOutput(global_step, tr_loss / global_step)
def _training_step(
self, model: nn.Module, inputs: Dict[str, torch.Tensor], learning_rate, loss_scaler
) -> float:
model.train()
if self.args.fp16:
loss_scale = torch.tensor([loss_scaler.loss_scale_])
result = model(inputs['input_ids'],inputs['labels'], learning_rate, loss_scale)
else:
result = model(inputs['input_ids'],inputs['labels'], learning_rate)
all_finite = None
if isinstance(result, (list, tuple)):
loss = result[0]
all_finite = result[-1]
else:
loss = result
return loss.item(), all_finite
def is_world_master(self) -> bool:
"""
This will be True only in one process, even in distributed mode,
even when training on multiple machines.
"""
return self.args.local_rank == -1 or torch.distributed.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Saving best-practices: if you use default names for the model,
you can reload it using from_pretrained().
Will only save from the master process.
"""
if self.is_world_master():
self._save(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
self.update_torch_model()
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None
) -> Dict[str, float]:
"""
Run evaluation and return metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent.
Args:
eval_dataset: (Optional) Pass a dataset if you wish to override
the one on the instance.
Returns:
A dict containing:
- the eval loss
- the potential metrics computed from the predictions
"""
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self._prediction_loop(eval_dataloader, description="Evaluation")
return output.metrics
def predict(self, test_dataset: Dataset) -> PredictionOutput:
"""
Run prediction and return predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in evaluate().
"""
test_dataloader = self.get_test_dataloader(test_dataset)
return self._prediction_loop(test_dataloader, description="Prediction")
def _prediction_loop(
self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
"""
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
self.update_torch_model()
# multi-gpu eval
if self.args.n_gpu > 1 and not isinstance(self.model, torch.nn.DataParallel):
model = torch.nn.DataParallel(self.model)
else:
model = self.model
model.to(self.args.device)
if self.is_world_master():
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", len(dataloader.dataset))
logger.info(" Batch size = %d", dataloader.batch_size)
eval_losses: List[float] = []
preds: np.ndarray = None
label_ids: np.ndarray = None
model.eval()
for inputs in tqdm(dataloader, desc=description):
has_labels = any(inputs.get(k) is not None for k in ["labels", "masked_lm_labels"])
for k, v in inputs.items():
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if not prediction_loss_only:
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
if inputs.get("labels") is not None:
if label_ids is None:
label_ids = inputs["labels"].detach().cpu().numpy()
else:
label_ids = np.append(label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if len(eval_losses) > 0:
metrics["loss"] = np.mean(eval_losses)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
| 42.679785 | 158 | 0.619583 | import json
import time
import logging
import os
import random
import re
import shutil
from contextlib import contextmanager
from pathlib import Path
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple
import numpy as np
import torch
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from tqdm import tqdm, trange
import onnxruntime
from onnxruntime.capi.ort_trainer import ORTTrainer, IODescription, ModelDescription
from onnxruntime.capi.ort_trainer import LossScaler
from .data.data_collator import DataCollator, DefaultDataCollator
from .modeling_utils import PreTrainedModel
from .training_args import TrainingArguments
from .trainer import PredictionOutput, TrainOutput, EvalPrediction, set_seed
from azureml.core.run import Run
run = Run.get_context()
try:
from torch.utils.tensorboard import SummaryWriter
_has_tensorboard = True
except ImportError:
try:
from tensorboardX import SummaryWriter
_has_tensorboard = True
except ImportError:
_has_tensorboard = False
def is_tensorboard_available():
return _has_tensorboard
logger = logging.getLogger(__name__)
PREFIX_CHECKPOINT_DIR = "ort_checkpoint"
class linear_schedule_with_warmup():
def __init__(self, num_warmup_steps, num_training_steps, last_epoch=-1):
self.num_warmup_steps = num_warmup_steps
self.num_training_steps = num_training_steps
self.last_epoch = last_epoch
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / float(max(1, self.num_warmup_steps))
return max(
0.0, float(self.num_training_steps - current_step) / float(max(1, self.num_training_steps - self.num_warmup_steps))
)
def get_lr_this_step(self, current_step, base_lr):
return self.lr_lambda(current_step) * base_lr
class OrtTrainer:
model: PreTrainedModel
args: TrainingArguments
data_collator: DataCollator
train_dataset: Optional[Dataset]
eval_dataset: Optional[Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
tb_writer: Optional["SummaryWriter"] = None
def __init__(
self,
model: PreTrainedModel,
args: TrainingArguments,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
):
self.model = model
self.args = args
if data_collator is not None:
self.data_collator = data_collator
else:
self.data_collator = DefaultDataCollator()
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
if is_tensorboard_available() and self.is_world_master():
self.tb_writer = SummaryWriter(log_dir=self.args.logging_dir)
if not is_tensorboard_available():
logger.warning(
"You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it."
)
set_seed(self.args.seed)
onnxruntime.set_seed(self.args.seed)
if self.is_world_master():
os.makedirs(self.args.output_dir, exist_ok=True)
torch.cuda.set_device(self.args.local_rank)
self.ort_model = self.to_ort_model(model, model.config, args)
def update_torch_model(self,):
if self.ort_model:
logger.info(
"Updating weights of torch model from ORT model."
)
ort_state_dict = self.ort_model.state_dict()
self.model.load_state_dict(ort_state_dict, strict=False)
else:
logger.warning(
"No ORT model found to update weights from, assuming torch model is up to date."
)
def gpt2_model_description(self,n_head, vocab_size, n_hidden, n_layer, n_ctx, batch_size):
logger.info("****num of head is: {}".format(n_head))
logger.info("****vocab size is: {}".format(vocab_size))
logger.info("****num of hidden layer is: {}".format(n_hidden))
logger.info("****num of layer is: {}".format(n_layer))
logger.info("****seq length is: {}".format(n_ctx))
input_ids_desc = IODescription('input_ids', [batch_size, n_ctx], torch.int64, num_classes = vocab_size)
labels_desc = IODescription('labels', [batch_size, n_ctx], torch.int64, num_classes = vocab_size)
loss_desc = IODescription('loss', [], torch.float32)
return ModelDescription([input_ids_desc, labels_desc],
[loss_desc])
def ort_trainer_learning_rate_description(self):
return IODescription('Learning_Rate', [1,], torch.float32)
def to_ort_model(self,model, config, args):
model_desc = self.gpt2_model_description(config.n_head, config.vocab_size, config.n_embd, config.n_layer, config.n_ctx, args.per_gpu_train_batch_size)
learning_rate_description = self.ort_trainer_learning_rate_description()
def map_optimizer_attributes(name):
no_decay_keys = ["bias", "gamma", "beta", "LayerNorm"]
no_decay = False
for no_decay_key in no_decay_keys:
if no_decay_key in name:
no_decay = True
break
if no_decay:
return {"alpha": 0.9, "beta": 0.999, "lambda": 0.0, "epsilon": args.adam_epsilon}
else:
return {"alpha": 0.9, "beta": 0.999, "lambda": args.weight_decay, "epsilon": args.adam_epsilon}
from onnxruntime.capi._pybind_state import set_cuda_device_id, set_arena_extend_strategy, ArenaExtendStrategy
set_arena_extend_strategy(ArenaExtendStrategy.kSameAsRequested)
set_cuda_device_id(self.args.local_rank)
model = ORTTrainer(model, None, model_desc, "AdamOptimizer",
map_optimizer_attributes,
learning_rate_description,
args.device,
gradient_accumulation_steps=args.gradient_accumulation_steps,
world_rank = self.args.world_rank,
world_size = self.args.world_size,
use_mixed_precision = self.args.fp16,
allreduce_post_accumulation = True,
_opset_version=12
)
logger.info("****************************Model converted to ORT")
return model
def get_train_dataloader(self) -> DataLoader:
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = (
RandomSampler(self.train_dataset) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset)
)
return DataLoader(
self.train_dataset,
batch_size=self.args.per_gpu_train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator.collate_batch,
)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
return DataLoader(
eval_dataset if eval_dataset is not None else self.eval_dataset,
batch_size=self.args.eval_batch_size,
shuffle=False,
collate_fn=self.data_collator.collate_batch,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
shuffle=False,
collate_fn=self.data_collator.collate_batch,
)
def train(self, model_path: Optional[str] = None):
train_dataloader = self.get_train_dataloader()
if self.args.max_steps > 0:
t_total = self.args.max_steps
num_train_epochs = (
self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1
)
else:
t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs)
num_train_epochs = self.args.num_train_epochs
scheduler = linear_schedule_with_warmup(num_warmup_steps=self.args.warmup_steps, num_training_steps=t_total)
loss_scaler = LossScaler(self.ort_model.loss_scale_input_name, True, up_scale_window=2000, loss_scale=float(1 << 20)) if self.args.fp16 else 1
model = self.ort_model
if self.tb_writer is not None:
self.tb_writer.add_text("args", self.args.to_json_string())
if self.is_world_master():
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataloader.dataset))
logger.info(" Num Epochs = %d", num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", self.args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
self.args.train_batch_size
* self.args.gradient_accumulation_steps
* (self.args.world_size if self.args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if model_path is not None:
try:
global_step = int(model_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // self.args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // self.args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
global_step = 0
logger.info(" Starting fine-tuning.")
tr_loss = 0.0
logging_loss = 0.0
global_batch_train_start = time.time()
train_iterator = trange(
epochs_trained, int(num_train_epochs), desc="Epoch", disable=self.args.local_rank not in [-1, 0],
)
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=self.args.local_rank not in [-1, 0])
for step, inputs in enumerate(epoch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if len(inputs['input_ids']) < self.args.per_gpu_train_batch_size:
logger.info('Skipping incomplete batch...')
continue
learning_rate = torch.tensor([scheduler.get_lr_this_step(global_step, base_lr = self.args.learning_rate)])
loss, all_finite = self._training_step(model, inputs, learning_rate, loss_scaler)
tr_loss += loss
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
len(epoch_iterator) <= self.args.gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
if self.args.fp16:
loss_scaler.update_loss_scale(all_finite.item())
global_step += 1
global_batch_train_duration = time.time() - global_batch_train_start
global_batch_train_start = time.time()
if self.args.local_rank in [-1, 0]:
if (self.args.logging_steps > 0 and global_step % self.args.logging_steps == 0) or (
global_step == 1 and self.args.logging_first_step
):
logs = {}
loss_avg = (tr_loss - logging_loss) / (self.args.logging_steps * self.args.gradient_accumulation_steps)
logs["learning_rate"] = learning_rate.item()
logs["loss"] = loss_avg
logs["global_step"] = global_step
logs["global_step_time"] = global_batch_train_duration
logging_loss = tr_loss
if self.tb_writer:
for k, v in logs.items():
self.tb_writer.add_scalar(k, v, global_step)
run.log(k,v)
epoch_iterator.write(json.dumps({**logs, **{"step": global_step}}))
if self.args.save_steps > 0 and global_step % self.args.save_steps == 0:
if hasattr(model, "module"):
assert model.module is self.ort_model
else:
assert model is self.ort_model
output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{global_step}")
self.save_model(output_dir)
if self.args.max_steps > 0 and global_step > self.args.max_steps:
epoch_iterator.close()
break
if self.args.max_steps > 0 and global_step > self.args.max_steps:
train_iterator.close()
break
if self.tb_writer:
self.tb_writer.close()
self.update_torch_model()
del(self.ort_model)
self.ort_model = None
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
return TrainOutput(global_step, tr_loss / global_step)
def _training_step(
self, model: nn.Module, inputs: Dict[str, torch.Tensor], learning_rate, loss_scaler
) -> float:
model.train()
if self.args.fp16:
loss_scale = torch.tensor([loss_scaler.loss_scale_])
result = model(inputs['input_ids'],inputs['labels'], learning_rate, loss_scale)
else:
result = model(inputs['input_ids'],inputs['labels'], learning_rate)
all_finite = None
if isinstance(result, (list, tuple)):
loss = result[0]
all_finite = result[-1]
else:
loss = result
return loss.item(), all_finite
def is_world_master(self) -> bool:
return self.args.local_rank == -1 or torch.distributed.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
if self.is_world_master():
self._save(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
self.update_torch_model()
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None
) -> Dict[str, float]:
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self._prediction_loop(eval_dataloader, description="Evaluation")
return output.metrics
def predict(self, test_dataset: Dataset) -> PredictionOutput:
test_dataloader = self.get_test_dataloader(test_dataset)
return self._prediction_loop(test_dataloader, description="Prediction")
def _prediction_loop(
self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
self.update_torch_model()
if self.args.n_gpu > 1 and not isinstance(self.model, torch.nn.DataParallel):
model = torch.nn.DataParallel(self.model)
else:
model = self.model
model.to(self.args.device)
if self.is_world_master():
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", len(dataloader.dataset))
logger.info(" Batch size = %d", dataloader.batch_size)
eval_losses: List[float] = []
preds: np.ndarray = None
label_ids: np.ndarray = None
model.eval()
for inputs in tqdm(dataloader, desc=description):
has_labels = any(inputs.get(k) is not None for k in ["labels", "masked_lm_labels"])
for k, v in inputs.items():
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if not prediction_loss_only:
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
if inputs.get("labels") is not None:
if label_ids is None:
label_ids = inputs["labels"].detach().cpu().numpy()
else:
label_ids = np.append(label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if len(eval_losses) > 0:
metrics["loss"] = np.mean(eval_losses)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
| true | true |
1c346472665faf8d7a4d71f45fe1217ce194b535 | 3,029 | py | Python | pibooth/scripts/regenerate.py | IAmCorbin/pibooth | c73f379ca86ce1ce73a1e4816e41e74349ae5a30 | [
"MIT"
] | null | null | null | pibooth/scripts/regenerate.py | IAmCorbin/pibooth | c73f379ca86ce1ce73a1e4816e41e74349ae5a30 | [
"MIT"
] | 1 | 2019-12-13T18:29:47.000Z | 2019-12-13T18:29:47.000Z | pibooth/scripts/regenerate.py | IAmCorbin/pibooth | c73f379ca86ce1ce73a1e4816e41e74349ae5a30 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Pibooth picture regeneration module.
"""
import os
from os import path as osp
from PIL import Image
from pibooth.utils import LOGGER, configure_logging
from pibooth.config import PiConfigParser
from pibooth.pictures import get_picture_maker
def get_captures(images_folder):
"""Get a list of images from the folder given in input
"""
captures_paths = os.listdir(images_folder)
captures = []
for capture_path in captures_paths:
try:
image = Image.open(osp.join(images_folder, capture_path))
captures.append(image)
except OSError:
LOGGER.info("File %s doesn't seem to be an image", capture_path)
return captures
def regenerate_all_images(config):
"""Regenerate the pibboth images from the raw images and the config
"""
captures_folders = config.getpath('GENERAL', 'directory')
capture_choices = config.gettuple('PICTURE', 'captures', int, 2)
backgrounds = config.gettuple('PICTURE', 'backgrounds', ('color', 'path'), 2)
overlays = config.gettuple('PICTURE', 'overlays', 'path', 2)
texts = [config.get('PICTURE', 'footer_text1').strip('"'),
config.get('PICTURE', 'footer_text2').strip('"')]
colors = config.gettuple('PICTURE', 'text_colors', 'color', len(texts))
text_fonts = config.gettuple('PICTURE', 'text_fonts', str, len(texts))
alignments = config.gettuple('PICTURE', 'text_alignments', str, len(texts))
# Part that fetch the captures
for captures_folder in os.listdir(osp.join(captures_folders, 'raw')):
captures_folder_path = osp.join(captures_folders, 'raw', captures_folder)
if not osp.isdir(captures_folder_path):
continue
captures = get_captures(captures_folder_path)
LOGGER.info("Generating image from raws in folder %s", captures_folder_path)
if len(captures) == capture_choices[0]:
overlay = overlays[0]
background = backgrounds[0]
elif len(captures) == capture_choices[1]:
overlay = overlays[1]
background = backgrounds[1]
else:
LOGGER.warning("Folder %s doesn't contain the correct number of pictures", captures_folder_path)
continue
maker = get_picture_maker(captures, config.get('PICTURE', 'orientation'))
maker.set_background(background)
if any(elem != '' for elem in texts):
for params in zip(texts, text_fonts, colors, alignments):
maker.add_text(*params)
if config.getboolean('PICTURE', 'captures_cropping'):
maker.set_cropping()
if overlay:
maker.set_overlay(overlay)
picture_file = osp.join(captures_folders, captures_folder + "_pibooth.jpg")
maker.save(picture_file)
def main():
"""Application entry point.
"""
configure_logging()
config = PiConfigParser("~/.config/pibooth/pibooth.cfg")
regenerate_all_images(config)
if __name__ == "__main__":
main()
| 34.420455 | 108 | 0.658963 |
import os
from os import path as osp
from PIL import Image
from pibooth.utils import LOGGER, configure_logging
from pibooth.config import PiConfigParser
from pibooth.pictures import get_picture_maker
def get_captures(images_folder):
captures_paths = os.listdir(images_folder)
captures = []
for capture_path in captures_paths:
try:
image = Image.open(osp.join(images_folder, capture_path))
captures.append(image)
except OSError:
LOGGER.info("File %s doesn't seem to be an image", capture_path)
return captures
def regenerate_all_images(config):
captures_folders = config.getpath('GENERAL', 'directory')
capture_choices = config.gettuple('PICTURE', 'captures', int, 2)
backgrounds = config.gettuple('PICTURE', 'backgrounds', ('color', 'path'), 2)
overlays = config.gettuple('PICTURE', 'overlays', 'path', 2)
texts = [config.get('PICTURE', 'footer_text1').strip('"'),
config.get('PICTURE', 'footer_text2').strip('"')]
colors = config.gettuple('PICTURE', 'text_colors', 'color', len(texts))
text_fonts = config.gettuple('PICTURE', 'text_fonts', str, len(texts))
alignments = config.gettuple('PICTURE', 'text_alignments', str, len(texts))
# Part that fetch the captures
for captures_folder in os.listdir(osp.join(captures_folders, 'raw')):
captures_folder_path = osp.join(captures_folders, 'raw', captures_folder)
if not osp.isdir(captures_folder_path):
continue
captures = get_captures(captures_folder_path)
LOGGER.info("Generating image from raws in folder %s", captures_folder_path)
if len(captures) == capture_choices[0]:
overlay = overlays[0]
background = backgrounds[0]
elif len(captures) == capture_choices[1]:
overlay = overlays[1]
background = backgrounds[1]
else:
LOGGER.warning("Folder %s doesn't contain the correct number of pictures", captures_folder_path)
continue
maker = get_picture_maker(captures, config.get('PICTURE', 'orientation'))
maker.set_background(background)
if any(elem != '' for elem in texts):
for params in zip(texts, text_fonts, colors, alignments):
maker.add_text(*params)
if config.getboolean('PICTURE', 'captures_cropping'):
maker.set_cropping()
if overlay:
maker.set_overlay(overlay)
picture_file = osp.join(captures_folders, captures_folder + "_pibooth.jpg")
maker.save(picture_file)
def main():
configure_logging()
config = PiConfigParser("~/.config/pibooth/pibooth.cfg")
regenerate_all_images(config)
if __name__ == "__main__":
main()
| true | true |
1c3467031d6a858936ba06df5f76eb7581feb8c5 | 1,685 | py | Python | sdk/python/lib/pulumi/invoke.py | Dominik-K/pulumi | 3621c01f4becf75e24bf937bbda69ff8aaf6b5f7 | [
"Apache-2.0"
] | null | null | null | sdk/python/lib/pulumi/invoke.py | Dominik-K/pulumi | 3621c01f4becf75e24bf937bbda69ff8aaf6b5f7 | [
"Apache-2.0"
] | null | null | null | sdk/python/lib/pulumi/invoke.py | Dominik-K/pulumi | 3621c01f4becf75e24bf937bbda69ff8aaf6b5f7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
class InvokeOptions:
"""
InvokeOptions is a bag of options that control the behavior of a call to runtime.invoke.
"""
parent: Optional['Resource']
"""
An optional parent to use for default options for this invoke (e.g. the default provider to use).
"""
provider: Optional['ProviderResource']
"""
An optional provider to use for this invocation. If no provider is supplied, the default provider for the
invoked function's package will be used.
"""
def __init__(self, parent: Optional['Resource'] = None, provider: Optional['ProviderResource'] = None) -> None:
"""
:param Optional[Resource] parent: An optional parent to use for default options for this invoke (e.g. the
default provider to use).
:param Optional[ProviderResource] provider: An optional provider to use for this invocation. If no provider is
supplied, the default provider for the invoked function's package will be used.
"""
self.parent = parent
self.provider = provider
| 43.205128 | 118 | 0.705638 |
from typing import Optional
class InvokeOptions:
parent: Optional['Resource']
provider: Optional['ProviderResource']
def __init__(self, parent: Optional['Resource'] = None, provider: Optional['ProviderResource'] = None) -> None:
self.parent = parent
self.provider = provider
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.