id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
242,600
wal-e/wal-e
wal_e/worker/pg/psql_worker.py
PgBackupStatements.run_start_backup
def run_start_backup(cls): """ Connects to a server and attempts to start a hot backup Yields the WAL information in a dictionary for bookkeeping and recording. """ def handler(popen): assert popen.returncode != 0 raise UserException('Could not start hot backup') # The difficulty of getting a timezone-stamped, UTC, # ISO-formatted datetime is downright embarrassing. # # See http://bugs.python.org/issue5094 label = 'freeze_start_' + (datetime.datetime.utcnow() .replace(tzinfo=UTC()).isoformat()) return cls._dict_transform(psql_csv_run( "SELECT file_name, " " lpad(file_offset::text, 8, '0') AS file_offset " "FROM pg_{0}file_name_offset(" " pg_start_backup('{1}'))".format(cls._wal_name(), label), error_handler=handler))
python
def run_start_backup(cls): def handler(popen): assert popen.returncode != 0 raise UserException('Could not start hot backup') # The difficulty of getting a timezone-stamped, UTC, # ISO-formatted datetime is downright embarrassing. # # See http://bugs.python.org/issue5094 label = 'freeze_start_' + (datetime.datetime.utcnow() .replace(tzinfo=UTC()).isoformat()) return cls._dict_transform(psql_csv_run( "SELECT file_name, " " lpad(file_offset::text, 8, '0') AS file_offset " "FROM pg_{0}file_name_offset(" " pg_start_backup('{1}'))".format(cls._wal_name(), label), error_handler=handler))
[ "def", "run_start_backup", "(", "cls", ")", ":", "def", "handler", "(", "popen", ")", ":", "assert", "popen", ".", "returncode", "!=", "0", "raise", "UserException", "(", "'Could not start hot backup'", ")", "# The difficulty of getting a timezone-stamped, UTC,", "# I...
Connects to a server and attempts to start a hot backup Yields the WAL information in a dictionary for bookkeeping and recording.
[ "Connects", "to", "a", "server", "and", "attempts", "to", "start", "a", "hot", "backup" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/pg/psql_worker.py#L109-L133
242,601
wal-e/wal-e
wal_e/worker/pg/psql_worker.py
PgBackupStatements.run_stop_backup
def run_stop_backup(cls): """ Stop a hot backup, if it was running, or error Return the last WAL file name and position that is required to gain consistency on the captured heap. """ def handler(popen): assert popen.returncode != 0 raise UserException('Could not stop hot backup') return cls._dict_transform(psql_csv_run( "SELECT file_name, " " lpad(file_offset::text, 8, '0') AS file_offset " "FROM pg_{0}file_name_offset(" " pg_stop_backup())".format(cls._wal_name()), error_handler=handler))
python
def run_stop_backup(cls): def handler(popen): assert popen.returncode != 0 raise UserException('Could not stop hot backup') return cls._dict_transform(psql_csv_run( "SELECT file_name, " " lpad(file_offset::text, 8, '0') AS file_offset " "FROM pg_{0}file_name_offset(" " pg_stop_backup())".format(cls._wal_name()), error_handler=handler))
[ "def", "run_stop_backup", "(", "cls", ")", ":", "def", "handler", "(", "popen", ")", ":", "assert", "popen", ".", "returncode", "!=", "0", "raise", "UserException", "(", "'Could not stop hot backup'", ")", "return", "cls", ".", "_dict_transform", "(", "psql_cs...
Stop a hot backup, if it was running, or error Return the last WAL file name and position that is required to gain consistency on the captured heap.
[ "Stop", "a", "hot", "backup", "if", "it", "was", "running", "or", "error" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/pg/psql_worker.py#L136-L153
242,602
wal-e/wal-e
wal_e/blobstore/s3/calling_format.py
_is_ipv4_like
def _is_ipv4_like(s): """Find if a string superficially looks like an IPv4 address. AWS documentation plays it fast and loose with this; in other regions, it seems like even non-valid IPv4 addresses (in particular, ones that possess decimal numbers out of range for IPv4) are rejected. """ parts = s.split('.') if len(parts) != 4: return False for part in parts: try: int(part) except ValueError: return False return True
python
def _is_ipv4_like(s): parts = s.split('.') if len(parts) != 4: return False for part in parts: try: int(part) except ValueError: return False return True
[ "def", "_is_ipv4_like", "(", "s", ")", ":", "parts", "=", "s", ".", "split", "(", "'.'", ")", "if", "len", "(", "parts", ")", "!=", "4", ":", "return", "False", "for", "part", "in", "parts", ":", "try", ":", "int", "(", "part", ")", "except", "...
Find if a string superficially looks like an IPv4 address. AWS documentation plays it fast and loose with this; in other regions, it seems like even non-valid IPv4 addresses (in particular, ones that possess decimal numbers out of range for IPv4) are rejected.
[ "Find", "if", "a", "string", "superficially", "looks", "like", "an", "IPv4", "address", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/s3/calling_format.py#L43-L62
242,603
wal-e/wal-e
wal_e/blobstore/s3/calling_format.py
_is_mostly_subdomain_compatible
def _is_mostly_subdomain_compatible(bucket_name): """Returns True if SubdomainCallingFormat can be used...mostly This checks to make sure that putting aside certificate validation issues that a bucket_name is able to use the SubdomainCallingFormat. """ return (bucket_name.lower() == bucket_name and len(bucket_name) >= 3 and len(bucket_name) <= 63 and '_' not in bucket_name and '..' not in bucket_name and '-.' not in bucket_name and '.-' not in bucket_name and not bucket_name.startswith('-') and not bucket_name.endswith('-') and not bucket_name.startswith('.') and not bucket_name.endswith('.') and not _is_ipv4_like(bucket_name))
python
def _is_mostly_subdomain_compatible(bucket_name): return (bucket_name.lower() == bucket_name and len(bucket_name) >= 3 and len(bucket_name) <= 63 and '_' not in bucket_name and '..' not in bucket_name and '-.' not in bucket_name and '.-' not in bucket_name and not bucket_name.startswith('-') and not bucket_name.endswith('-') and not bucket_name.startswith('.') and not bucket_name.endswith('.') and not _is_ipv4_like(bucket_name))
[ "def", "_is_mostly_subdomain_compatible", "(", "bucket_name", ")", ":", "return", "(", "bucket_name", ".", "lower", "(", ")", "==", "bucket_name", "and", "len", "(", "bucket_name", ")", ">=", "3", "and", "len", "(", "bucket_name", ")", "<=", "63", "and", "...
Returns True if SubdomainCallingFormat can be used...mostly This checks to make sure that putting aside certificate validation issues that a bucket_name is able to use the SubdomainCallingFormat.
[ "Returns", "True", "if", "SubdomainCallingFormat", "can", "be", "used", "...", "mostly" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/s3/calling_format.py#L65-L83
242,604
wal-e/wal-e
wal_e/blobstore/s3/calling_format.py
_connect_secureish
def _connect_secureish(*args, **kwargs): """Connect using the safest available options. This turns on encryption (works in all supported boto versions) and certificate validation (in the subset of supported boto versions that can handle certificate validation, namely, those after 2.6.0). Versions below 2.6 don't support the validate_certs option to S3Connection, and enable it via configuration option just seems to cause an error. """ if tuple(int(x) for x in boto.__version__.split('.')) >= (2, 6, 0): kwargs['validate_certs'] = True kwargs['is_secure'] = True auth_region_name = kwargs.pop('auth_region_name', None) conn = connection.S3Connection(*args, **kwargs) if auth_region_name: conn.auth_region_name = auth_region_name return conn
python
def _connect_secureish(*args, **kwargs): if tuple(int(x) for x in boto.__version__.split('.')) >= (2, 6, 0): kwargs['validate_certs'] = True kwargs['is_secure'] = True auth_region_name = kwargs.pop('auth_region_name', None) conn = connection.S3Connection(*args, **kwargs) if auth_region_name: conn.auth_region_name = auth_region_name return conn
[ "def", "_connect_secureish", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "tuple", "(", "int", "(", "x", ")", "for", "x", "in", "boto", ".", "__version__", ".", "split", "(", "'.'", ")", ")", ">=", "(", "2", ",", "6", ",", "0", ...
Connect using the safest available options. This turns on encryption (works in all supported boto versions) and certificate validation (in the subset of supported boto versions that can handle certificate validation, namely, those after 2.6.0). Versions below 2.6 don't support the validate_certs option to S3Connection, and enable it via configuration option just seems to cause an error.
[ "Connect", "using", "the", "safest", "available", "options", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/s3/calling_format.py#L86-L109
242,605
wal-e/wal-e
wal_e/blobstore/s3/calling_format.py
from_store_name
def from_store_name(bucket_name, region=None): """Construct a CallingInfo value from a bucket name. This is useful to encapsulate the ugliness of setting up S3 connections, especially with regions and TLS certificates are involved. """ # Late-bind `region` for the sake of tests that inject the # AWS_REGION environment variable. if region is None: region = os.getenv('AWS_REGION') mostly_ok = _is_mostly_subdomain_compatible(bucket_name) if not mostly_ok: return CallingInfo( bucket_name=bucket_name, region=region, calling_format=connection.OrdinaryCallingFormat, ordinary_endpoint=must_resolve(region)) else: if '.' in bucket_name: # The bucket_name might have been DNS compatible, but once # dots are involved TLS certificate validations will # certainly fail even if that's the case. return CallingInfo( bucket_name=bucket_name, calling_format=connection.OrdinaryCallingFormat, region=region, ordinary_endpoint=must_resolve(region)) else: # If the bucket follows naming rules and has no dots in # the name, SubdomainCallingFormat can be used, with TLS, # world-wide. return CallingInfo( bucket_name=bucket_name, calling_format=connection.SubdomainCallingFormat, region=region, ordinary_endpoint=None) assert False
python
def from_store_name(bucket_name, region=None): # Late-bind `region` for the sake of tests that inject the # AWS_REGION environment variable. if region is None: region = os.getenv('AWS_REGION') mostly_ok = _is_mostly_subdomain_compatible(bucket_name) if not mostly_ok: return CallingInfo( bucket_name=bucket_name, region=region, calling_format=connection.OrdinaryCallingFormat, ordinary_endpoint=must_resolve(region)) else: if '.' in bucket_name: # The bucket_name might have been DNS compatible, but once # dots are involved TLS certificate validations will # certainly fail even if that's the case. return CallingInfo( bucket_name=bucket_name, calling_format=connection.OrdinaryCallingFormat, region=region, ordinary_endpoint=must_resolve(region)) else: # If the bucket follows naming rules and has no dots in # the name, SubdomainCallingFormat can be used, with TLS, # world-wide. return CallingInfo( bucket_name=bucket_name, calling_format=connection.SubdomainCallingFormat, region=region, ordinary_endpoint=None) assert False
[ "def", "from_store_name", "(", "bucket_name", ",", "region", "=", "None", ")", ":", "# Late-bind `region` for the sake of tests that inject the", "# AWS_REGION environment variable.", "if", "region", "is", "None", ":", "region", "=", "os", ".", "getenv", "(", "'AWS_REGI...
Construct a CallingInfo value from a bucket name. This is useful to encapsulate the ugliness of setting up S3 connections, especially with regions and TLS certificates are involved.
[ "Construct", "a", "CallingInfo", "value", "from", "a", "bucket", "name", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/s3/calling_format.py#L242-L282
242,606
wal-e/wal-e
wal_e/blobstore/s3/calling_format.py
CallingInfo.connect
def connect(self, creds): """Return a boto S3Connection set up with great care. This includes TLS settings, calling format selection, and region detection. The credentials are applied by the caller because in many cases (instance-profile IAM) it is possible for those credentials to fluctuate rapidly. By comparison, region fluctuations of a bucket name are not nearly so likely versus the gains of not looking up a bucket's region over and over. """ def _conn_help(*args, **kwargs): return _connect_secureish( *args, provider=creds, calling_format=self.calling_format(), auth_region_name=self.region, **kwargs) # If WALE_S3_ENDPOINT is set, do not attempt to guess # the right calling conventions and instead honor the explicit # settings within WALE_S3_ENDPOINT. impl = os.getenv('WALE_S3_ENDPOINT') if impl: return connection.S3Connection(**_s3connection_opts_from_uri(impl)) # Check if subdomain format compatible: if so, use the # BUCKETNAME.s3.amazonaws.com hostname to communicate with the # bucket. if self.calling_format is connection.SubdomainCallingFormat: return _conn_help(host='s3.amazonaws.com') # Check if OrdinaryCallingFormat compatible, but also see if # the endpoint has already been set, in which case only # setting the host= flag is necessary. assert self.calling_format is connection.OrdinaryCallingFormat assert self.ordinary_endpoint is not None return _conn_help(host=self.ordinary_endpoint)
python
def connect(self, creds): def _conn_help(*args, **kwargs): return _connect_secureish( *args, provider=creds, calling_format=self.calling_format(), auth_region_name=self.region, **kwargs) # If WALE_S3_ENDPOINT is set, do not attempt to guess # the right calling conventions and instead honor the explicit # settings within WALE_S3_ENDPOINT. impl = os.getenv('WALE_S3_ENDPOINT') if impl: return connection.S3Connection(**_s3connection_opts_from_uri(impl)) # Check if subdomain format compatible: if so, use the # BUCKETNAME.s3.amazonaws.com hostname to communicate with the # bucket. if self.calling_format is connection.SubdomainCallingFormat: return _conn_help(host='s3.amazonaws.com') # Check if OrdinaryCallingFormat compatible, but also see if # the endpoint has already been set, in which case only # setting the host= flag is necessary. assert self.calling_format is connection.OrdinaryCallingFormat assert self.ordinary_endpoint is not None return _conn_help(host=self.ordinary_endpoint)
[ "def", "connect", "(", "self", ",", "creds", ")", ":", "def", "_conn_help", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_connect_secureish", "(", "*", "args", ",", "provider", "=", "creds", ",", "calling_format", "=", "self", ".", ...
Return a boto S3Connection set up with great care. This includes TLS settings, calling format selection, and region detection. The credentials are applied by the caller because in many cases (instance-profile IAM) it is possible for those credentials to fluctuate rapidly. By comparison, region fluctuations of a bucket name are not nearly so likely versus the gains of not looking up a bucket's region over and over.
[ "Return", "a", "boto", "S3Connection", "set", "up", "with", "great", "care", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/s3/calling_format.py#L191-L229
242,607
wal-e/wal-e
wal_e/blobstore/file/calling_format.py
remove_empty_dirs
def remove_empty_dirs(path): """ removes empty dirs under a given path """ for root, dirs, files in os.walk(path): for d in dirs: dir_path = os.path.join(root, d) if not os.listdir(dir_path): os.rmdir(dir_path)
python
def remove_empty_dirs(path): for root, dirs, files in os.walk(path): for d in dirs: dir_path = os.path.join(root, d) if not os.listdir(dir_path): os.rmdir(dir_path)
[ "def", "remove_empty_dirs", "(", "path", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "d", "in", "dirs", ":", "dir_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "d", ...
removes empty dirs under a given path
[ "removes", "empty", "dirs", "under", "a", "given", "path" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/file/calling_format.py#L6-L12
242,608
wal-e/wal-e
wal_e/blobstore/file/calling_format.py
ensure_dir_exists
def ensure_dir_exists(path): """ create a directory if required """ dir_path = os.path.dirname(path) if not os.path.exists(dir_path): os.makedirs(dir_path)
python
def ensure_dir_exists(path): dir_path = os.path.dirname(path) if not os.path.exists(dir_path): os.makedirs(dir_path)
[ "def", "ensure_dir_exists", "(", "path", ")", ":", "dir_path", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dir_path", ")", ":", "os", ".", "makedirs", "(", "dir_path", ")" ]
create a directory if required
[ "create", "a", "directory", "if", "required" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/file/calling_format.py#L15-L19
242,609
wal-e/wal-e
wal_e/cmd.py
external_program_check
def external_program_check( to_check=frozenset([PSQL_BIN, LZOP_BIN, PV_BIN])): """ Validates the existence and basic working-ness of other programs Implemented because it is easy to get confusing error output when one does not install a dependency because of the fork-worker model that is both necessary for throughput and makes more obscure the cause of failures. This is intended to be a time and frustration saving measure. This problem has confused The Author in practice when switching rapidly between machines. """ could_not_run = [] error_msgs = [] def psql_err_handler(popen): assert popen.returncode != 0 error_msgs.append(textwrap.fill( 'Could not get a connection to the database: ' 'note that superuser access is required')) # Bogus error message that is re-caught and re-raised raise EnvironmentError('INTERNAL: Had problems running psql ' 'from external_program_check') with open(os.devnull, 'wb') as nullf: for program in to_check: try: if program is PSQL_BIN: psql_csv_run('SELECT 1', error_handler=psql_err_handler) else: if program is PV_BIN: extra_args = ['--quiet'] else: extra_args = [] proc = popen_sp([program] + extra_args, stdout=nullf, stderr=nullf, stdin=subprocess.PIPE) # Close stdin for processes that default to # reading from the pipe; the programs WAL-E uses # of this kind will terminate in this case. proc.stdin.close() proc.wait() except EnvironmentError: could_not_run.append(program) if could_not_run: error_msgs.append( 'Could not run the following programs, are they installed? ' + ', '.join(could_not_run)) if error_msgs: raise UserException( 'could not run one or more external programs WAL-E depends upon', '\n'.join(error_msgs)) return None
python
def external_program_check( to_check=frozenset([PSQL_BIN, LZOP_BIN, PV_BIN])): could_not_run = [] error_msgs = [] def psql_err_handler(popen): assert popen.returncode != 0 error_msgs.append(textwrap.fill( 'Could not get a connection to the database: ' 'note that superuser access is required')) # Bogus error message that is re-caught and re-raised raise EnvironmentError('INTERNAL: Had problems running psql ' 'from external_program_check') with open(os.devnull, 'wb') as nullf: for program in to_check: try: if program is PSQL_BIN: psql_csv_run('SELECT 1', error_handler=psql_err_handler) else: if program is PV_BIN: extra_args = ['--quiet'] else: extra_args = [] proc = popen_sp([program] + extra_args, stdout=nullf, stderr=nullf, stdin=subprocess.PIPE) # Close stdin for processes that default to # reading from the pipe; the programs WAL-E uses # of this kind will terminate in this case. proc.stdin.close() proc.wait() except EnvironmentError: could_not_run.append(program) if could_not_run: error_msgs.append( 'Could not run the following programs, are they installed? ' + ', '.join(could_not_run)) if error_msgs: raise UserException( 'could not run one or more external programs WAL-E depends upon', '\n'.join(error_msgs)) return None
[ "def", "external_program_check", "(", "to_check", "=", "frozenset", "(", "[", "PSQL_BIN", ",", "LZOP_BIN", ",", "PV_BIN", "]", ")", ")", ":", "could_not_run", "=", "[", "]", "error_msgs", "=", "[", "]", "def", "psql_err_handler", "(", "popen", ")", ":", ...
Validates the existence and basic working-ness of other programs Implemented because it is easy to get confusing error output when one does not install a dependency because of the fork-worker model that is both necessary for throughput and makes more obscure the cause of failures. This is intended to be a time and frustration saving measure. This problem has confused The Author in practice when switching rapidly between machines.
[ "Validates", "the", "existence", "and", "basic", "working", "-", "ness", "of", "other", "programs" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/cmd.py#L86-L146
242,610
wal-e/wal-e
wal_e/cmd.py
parse_boolean_envvar
def parse_boolean_envvar(val): """Parse a boolean environment variable.""" if not val or val.lower() in {'false', '0'}: return False elif val.lower() in {'true', '1'}: return True else: raise ValueError('Invalid boolean environment variable: %s' % val)
python
def parse_boolean_envvar(val): if not val or val.lower() in {'false', '0'}: return False elif val.lower() in {'true', '1'}: return True else: raise ValueError('Invalid boolean environment variable: %s' % val)
[ "def", "parse_boolean_envvar", "(", "val", ")", ":", "if", "not", "val", "or", "val", ".", "lower", "(", ")", "in", "{", "'false'", ",", "'0'", "}", ":", "return", "False", "elif", "val", ".", "lower", "(", ")", "in", "{", "'true'", ",", "'1'", "...
Parse a boolean environment variable.
[ "Parse", "a", "boolean", "environment", "variable", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/cmd.py#L161-L168
242,611
wal-e/wal-e
wal_e/cmd.py
_config_hint_generate
def _config_hint_generate(optname, both_env_and_param): """Generate HINT language for missing configuration""" env = optname.replace('-', '_').upper() if both_env_and_param: option = '--' + optname.lower() return ('Pass "{0}" or set the environment variable "{1}".' .format(option, env)) else: return 'Set the environment variable {0}.'.format(env)
python
def _config_hint_generate(optname, both_env_and_param): env = optname.replace('-', '_').upper() if both_env_and_param: option = '--' + optname.lower() return ('Pass "{0}" or set the environment variable "{1}".' .format(option, env)) else: return 'Set the environment variable {0}.'.format(env)
[ "def", "_config_hint_generate", "(", "optname", ",", "both_env_and_param", ")", ":", "env", "=", "optname", ".", "replace", "(", "'-'", ",", "'_'", ")", ".", "upper", "(", ")", "if", "both_env_and_param", ":", "option", "=", "'--'", "+", "optname", ".", ...
Generate HINT language for missing configuration
[ "Generate", "HINT", "language", "for", "missing", "configuration" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/cmd.py#L386-L395
242,612
wal-e/wal-e
wal_e/cmd.py
render_subcommand
def render_subcommand(args): """Render a subcommand for human-centric viewing""" if args.subcommand == 'delete': return 'delete ' + args.delete_subcommand if args.subcommand in ('wal-prefetch', 'wal-push', 'wal-fetch'): return None return args.subcommand
python
def render_subcommand(args): if args.subcommand == 'delete': return 'delete ' + args.delete_subcommand if args.subcommand in ('wal-prefetch', 'wal-push', 'wal-fetch'): return None return args.subcommand
[ "def", "render_subcommand", "(", "args", ")", ":", "if", "args", ".", "subcommand", "==", "'delete'", ":", "return", "'delete '", "+", "args", ".", "delete_subcommand", "if", "args", ".", "subcommand", "in", "(", "'wal-prefetch'", ",", "'wal-push'", ",", "'w...
Render a subcommand for human-centric viewing
[ "Render", "a", "subcommand", "for", "human", "-", "centric", "viewing" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/cmd.py#L570-L578
242,613
wal-e/wal-e
wal_e/worker/worker_util.py
do_lzop_put
def do_lzop_put(creds, url, local_path, gpg_key): """ Compress and upload a given local path. :type url: string :param url: A (s3|wabs)://bucket/key style URL that is the destination :type local_path: string :param local_path: a path to a file to be compressed """ assert url.endswith('.lzo') blobstore = get_blobstore(storage.StorageLayout(url)) with tempfile.NamedTemporaryFile( mode='r+b', buffering=pipebuf.PIPE_BUF_BYTES) as tf: with pipeline.get_upload_pipeline( open(local_path, 'rb'), tf, gpg_key=gpg_key): pass tf.flush() clock_start = time.time() tf.seek(0) k = blobstore.uri_put_file(creds, url, tf) clock_finish = time.time() kib_per_second = format_kib_per_second( clock_start, clock_finish, k.size) return kib_per_second
python
def do_lzop_put(creds, url, local_path, gpg_key): assert url.endswith('.lzo') blobstore = get_blobstore(storage.StorageLayout(url)) with tempfile.NamedTemporaryFile( mode='r+b', buffering=pipebuf.PIPE_BUF_BYTES) as tf: with pipeline.get_upload_pipeline( open(local_path, 'rb'), tf, gpg_key=gpg_key): pass tf.flush() clock_start = time.time() tf.seek(0) k = blobstore.uri_put_file(creds, url, tf) clock_finish = time.time() kib_per_second = format_kib_per_second( clock_start, clock_finish, k.size) return kib_per_second
[ "def", "do_lzop_put", "(", "creds", ",", "url", ",", "local_path", ",", "gpg_key", ")", ":", "assert", "url", ".", "endswith", "(", "'.lzo'", ")", "blobstore", "=", "get_blobstore", "(", "storage", ".", "StorageLayout", "(", "url", ")", ")", "with", "tem...
Compress and upload a given local path. :type url: string :param url: A (s3|wabs)://bucket/key style URL that is the destination :type local_path: string :param local_path: a path to a file to be compressed
[ "Compress", "and", "upload", "a", "given", "local", "path", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/worker_util.py#L16-L46
242,614
wal-e/wal-e
wal_e/worker/worker_util.py
do_lzop_get
def do_lzop_get(creds, url, path, decrypt, do_retry=True): """ Get and decompress an S3 or WABS URL This streams the content directly to lzop; the compressed version is never stored on disk. """ blobstore = get_blobstore(storage.StorageLayout(url)) return blobstore.do_lzop_get(creds, url, path, decrypt, do_retry=do_retry)
python
def do_lzop_get(creds, url, path, decrypt, do_retry=True): blobstore = get_blobstore(storage.StorageLayout(url)) return blobstore.do_lzop_get(creds, url, path, decrypt, do_retry=do_retry)
[ "def", "do_lzop_get", "(", "creds", ",", "url", ",", "path", ",", "decrypt", ",", "do_retry", "=", "True", ")", ":", "blobstore", "=", "get_blobstore", "(", "storage", ".", "StorageLayout", "(", "url", ")", ")", "return", "blobstore", ".", "do_lzop_get", ...
Get and decompress an S3 or WABS URL This streams the content directly to lzop; the compressed version is never stored on disk.
[ "Get", "and", "decompress", "an", "S3", "or", "WABS", "URL" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/worker_util.py#L49-L58
242,615
wal-e/wal-e
wal_e/worker/base.py
_BackupList.find_all
def find_all(self, query): """A procedure to assist in finding or detailing specific backups Currently supports: * a backup name (base_number_number) * the psuedo-name LATEST, which finds the backup with the most recent modification date """ match = re.match(storage.BASE_BACKUP_REGEXP, query) if match is not None: for backup in iter(self): if backup.name == query: yield backup elif query == 'LATEST': all_backups = list(iter(self)) if not all_backups: return assert len(all_backups) > 0 all_backups.sort(key=lambda bi: bi.last_modified) yield all_backups[-1] else: raise exception.UserException( msg='invalid backup query submitted', detail='The submitted query operator was "{0}."' .format(query))
python
def find_all(self, query): match = re.match(storage.BASE_BACKUP_REGEXP, query) if match is not None: for backup in iter(self): if backup.name == query: yield backup elif query == 'LATEST': all_backups = list(iter(self)) if not all_backups: return assert len(all_backups) > 0 all_backups.sort(key=lambda bi: bi.last_modified) yield all_backups[-1] else: raise exception.UserException( msg='invalid backup query submitted', detail='The submitted query operator was "{0}."' .format(query))
[ "def", "find_all", "(", "self", ",", "query", ")", ":", "match", "=", "re", ".", "match", "(", "storage", ".", "BASE_BACKUP_REGEXP", ",", "query", ")", "if", "match", "is", "not", "None", ":", "for", "backup", "in", "iter", "(", "self", ")", ":", "...
A procedure to assist in finding or detailing specific backups Currently supports: * a backup name (base_number_number) * the psuedo-name LATEST, which finds the backup with the most recent modification date
[ "A", "procedure", "to", "assist", "in", "finding", "or", "detailing", "specific", "backups" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/base.py#L106-L138
242,616
wal-e/wal-e
wal_e/worker/base.py
_DeleteFromContext._delete_wals_before
def _delete_wals_before(self, segment_info): """ Delete all WAL files before segment_info. Doesn't delete any base-backup data. """ wal_key_depth = self.layout.wal_directory().count('/') + 1 for key in self._backup_list(prefix=self.layout.wal_directory()): key_name = self.layout.key_name(key) bucket = self._container_name(key) url = '{scm}://{bucket}/{name}'.format(scm=self.layout.scheme, bucket=bucket, name=key_name) key_parts = key_name.split('/') key_depth = len(key_parts) if key_depth != wal_key_depth: logger.warning( msg="skipping non-qualifying key in 'delete before'", detail=( 'The unexpected key is "{0}", and it appears to be ' 'at an unexpected depth.'.format(url)), hint=generic_weird_key_hint_message) elif key_depth == wal_key_depth: segment_match = (re.match(storage.SEGMENT_REGEXP + r'\.lzo', key_parts[-1])) label_match = (re.match(storage.SEGMENT_REGEXP + r'\.[A-F0-9]{8,8}.backup.lzo', key_parts[-1])) history_match = re.match(r'[A-F0-9]{8,8}\.history', key_parts[-1]) all_matches = [segment_match, label_match, history_match] non_matches = len(list(m for m in all_matches if m is None)) # These patterns are intended to be mutually # exclusive, so either one should match or none should # match. assert non_matches in (len(all_matches) - 1, len(all_matches)) if non_matches == len(all_matches): logger.warning( msg="skipping non-qualifying key in 'delete before'", detail=('The unexpected key is "{0}", and it appears ' 'not to match the WAL file naming pattern.' .format(url)), hint=generic_weird_key_hint_message) elif segment_match is not None: scanned_sn = self._groupdict_to_segment_number( segment_match.groupdict()) self._delete_if_before(segment_info, scanned_sn, key, 'a wal file') elif label_match is not None: scanned_sn = self._groupdict_to_segment_number( label_match.groupdict()) self._delete_if_before(segment_info, scanned_sn, key, 'a backup history file') elif history_match is not None: # History (timeline) files do not have any actual # WAL position information, so they are never # deleted. pass else: assert False else: assert False
python
def _delete_wals_before(self, segment_info): wal_key_depth = self.layout.wal_directory().count('/') + 1 for key in self._backup_list(prefix=self.layout.wal_directory()): key_name = self.layout.key_name(key) bucket = self._container_name(key) url = '{scm}://{bucket}/{name}'.format(scm=self.layout.scheme, bucket=bucket, name=key_name) key_parts = key_name.split('/') key_depth = len(key_parts) if key_depth != wal_key_depth: logger.warning( msg="skipping non-qualifying key in 'delete before'", detail=( 'The unexpected key is "{0}", and it appears to be ' 'at an unexpected depth.'.format(url)), hint=generic_weird_key_hint_message) elif key_depth == wal_key_depth: segment_match = (re.match(storage.SEGMENT_REGEXP + r'\.lzo', key_parts[-1])) label_match = (re.match(storage.SEGMENT_REGEXP + r'\.[A-F0-9]{8,8}.backup.lzo', key_parts[-1])) history_match = re.match(r'[A-F0-9]{8,8}\.history', key_parts[-1]) all_matches = [segment_match, label_match, history_match] non_matches = len(list(m for m in all_matches if m is None)) # These patterns are intended to be mutually # exclusive, so either one should match or none should # match. assert non_matches in (len(all_matches) - 1, len(all_matches)) if non_matches == len(all_matches): logger.warning( msg="skipping non-qualifying key in 'delete before'", detail=('The unexpected key is "{0}", and it appears ' 'not to match the WAL file naming pattern.' .format(url)), hint=generic_weird_key_hint_message) elif segment_match is not None: scanned_sn = self._groupdict_to_segment_number( segment_match.groupdict()) self._delete_if_before(segment_info, scanned_sn, key, 'a wal file') elif label_match is not None: scanned_sn = self._groupdict_to_segment_number( label_match.groupdict()) self._delete_if_before(segment_info, scanned_sn, key, 'a backup history file') elif history_match is not None: # History (timeline) files do not have any actual # WAL position information, so they are never # deleted. pass else: assert False else: assert False
[ "def", "_delete_wals_before", "(", "self", ",", "segment_info", ")", ":", "wal_key_depth", "=", "self", ".", "layout", ".", "wal_directory", "(", ")", ".", "count", "(", "'/'", ")", "+", "1", "for", "key", "in", "self", ".", "_backup_list", "(", "prefix"...
Delete all WAL files before segment_info. Doesn't delete any base-backup data.
[ "Delete", "all", "WAL", "files", "before", "segment_info", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/base.py#L329-L393
242,617
wal-e/wal-e
wal_e/worker/base.py
_DeleteFromContext.delete_everything
def delete_everything(self): """Delete everything in a storage layout Named provocatively for a reason: can (and in fact intended to) cause irrecoverable loss of data. This can be used to: * Completely obliterate data from old WAL-E versions (i.e. layout.VERSION is an obsolete version) * Completely obliterate all backups (from a decommissioned database, for example) """ for k in self._backup_list(prefix=self.layout.basebackups()): self._maybe_delete_key(k, 'part of a base backup') for k in self._backup_list(prefix=self.layout.wal_directory()): self._maybe_delete_key(k, 'part of wal logs') if self.deleter: self.deleter.close()
python
def delete_everything(self): for k in self._backup_list(prefix=self.layout.basebackups()): self._maybe_delete_key(k, 'part of a base backup') for k in self._backup_list(prefix=self.layout.wal_directory()): self._maybe_delete_key(k, 'part of wal logs') if self.deleter: self.deleter.close()
[ "def", "delete_everything", "(", "self", ")", ":", "for", "k", "in", "self", ".", "_backup_list", "(", "prefix", "=", "self", ".", "layout", ".", "basebackups", "(", ")", ")", ":", "self", ".", "_maybe_delete_key", "(", "k", ",", "'part of a base backup'",...
Delete everything in a storage layout Named provocatively for a reason: can (and in fact intended to) cause irrecoverable loss of data. This can be used to: * Completely obliterate data from old WAL-E versions (i.e. layout.VERSION is an obsolete version) * Completely obliterate all backups (from a decommissioned database, for example)
[ "Delete", "everything", "in", "a", "storage", "layout" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/base.py#L395-L415
242,618
wal-e/wal-e
wal_e/worker/base.py
_DeleteFromContext.delete_before
def delete_before(self, segment_info): """ Delete all base backups and WAL before a given segment This is the most commonly-used deletion operator; to delete old backups and WAL. """ # This will delete all base backup data before segment_info. self._delete_base_backups_before(segment_info) # This will delete all WAL segments before segment_info. self._delete_wals_before(segment_info) if self.deleter: self.deleter.close()
python
def delete_before(self, segment_info): # This will delete all base backup data before segment_info. self._delete_base_backups_before(segment_info) # This will delete all WAL segments before segment_info. self._delete_wals_before(segment_info) if self.deleter: self.deleter.close()
[ "def", "delete_before", "(", "self", ",", "segment_info", ")", ":", "# This will delete all base backup data before segment_info.", "self", ".", "_delete_base_backups_before", "(", "segment_info", ")", "# This will delete all WAL segments before segment_info.", "self", ".", "_del...
Delete all base backups and WAL before a given segment This is the most commonly-used deletion operator; to delete old backups and WAL.
[ "Delete", "all", "base", "backups", "and", "WAL", "before", "a", "given", "segment" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/base.py#L417-L433
242,619
wal-e/wal-e
wal_e/worker/base.py
_DeleteFromContext.delete_with_retention
def delete_with_retention(self, num_to_retain): """ Retain the num_to_retain most recent backups and delete all data before them. """ base_backup_sentinel_depth = self.layout.basebackups().count('/') + 1 # Sweep over base backup files, collecting sentinel files from # completed backups. completed_basebackups = [] for key in self._backup_list(prefix=self.layout.basebackups()): key_name = self.layout.key_name(key) key_parts = key_name.split('/') key_depth = len(key_parts) url = '{scheme}://{bucket}/{name}'.format( scheme=self.layout.scheme, bucket=self._container_name(key), name=key_name) if key_depth == base_backup_sentinel_depth: # This is a key at the depth of a base-backup-sentinel file. # Check to see if it matches the known form. match = re.match(storage.COMPLETE_BASE_BACKUP_REGEXP, key_parts[-1]) # If this isn't a base-backup-sentinel file, just ignore it. if match is None: continue # This key corresponds to a base-backup-sentinel file and # represents a completed backup. Grab its segment number. scanned_sn = \ self._groupdict_to_segment_number(match.groupdict()) completed_basebackups.append(dict( scanned_sn=scanned_sn, url=url)) # Sort the base backups from newest to oldest. basebackups = sorted( completed_basebackups, key=lambda backup: backup['scanned_sn'].as_an_integer, reverse=True) last_retained = None if len(basebackups) <= num_to_retain: detail = None if len(basebackups) == 0: msg = 'Not deleting any data.' detail = 'No existing base backups.' elif len(basebackups) == 1: last_retained = basebackups[-1] msg = 'Retaining existing base backup.' else: last_retained = basebackups[-1] msg = "Retaining all %d base backups." % len(basebackups) else: last_retained = basebackups[num_to_retain - 1] num_deleting = len(basebackups) - num_to_retain msg = "Deleting %d oldest base backups." % num_deleting detail = "Found %d total base backups." % len(basebackups) log_message = dict(msg=msg) if detail is not None: log_message['detail'] = detail if last_retained is not None: log_message['hint'] = \ "Deleting keys older than %s." % last_retained['url'] logger.info(**log_message) # This will delete all base backup and WAL data before # last_retained['scanned_sn']. if last_retained is not None: self._delete_base_backups_before(last_retained['scanned_sn']) self._delete_wals_before(last_retained['scanned_sn']) if self.deleter: self.deleter.close()
python
def delete_with_retention(self, num_to_retain): base_backup_sentinel_depth = self.layout.basebackups().count('/') + 1 # Sweep over base backup files, collecting sentinel files from # completed backups. completed_basebackups = [] for key in self._backup_list(prefix=self.layout.basebackups()): key_name = self.layout.key_name(key) key_parts = key_name.split('/') key_depth = len(key_parts) url = '{scheme}://{bucket}/{name}'.format( scheme=self.layout.scheme, bucket=self._container_name(key), name=key_name) if key_depth == base_backup_sentinel_depth: # This is a key at the depth of a base-backup-sentinel file. # Check to see if it matches the known form. match = re.match(storage.COMPLETE_BASE_BACKUP_REGEXP, key_parts[-1]) # If this isn't a base-backup-sentinel file, just ignore it. if match is None: continue # This key corresponds to a base-backup-sentinel file and # represents a completed backup. Grab its segment number. scanned_sn = \ self._groupdict_to_segment_number(match.groupdict()) completed_basebackups.append(dict( scanned_sn=scanned_sn, url=url)) # Sort the base backups from newest to oldest. basebackups = sorted( completed_basebackups, key=lambda backup: backup['scanned_sn'].as_an_integer, reverse=True) last_retained = None if len(basebackups) <= num_to_retain: detail = None if len(basebackups) == 0: msg = 'Not deleting any data.' detail = 'No existing base backups.' elif len(basebackups) == 1: last_retained = basebackups[-1] msg = 'Retaining existing base backup.' else: last_retained = basebackups[-1] msg = "Retaining all %d base backups." % len(basebackups) else: last_retained = basebackups[num_to_retain - 1] num_deleting = len(basebackups) - num_to_retain msg = "Deleting %d oldest base backups." % num_deleting detail = "Found %d total base backups." % len(basebackups) log_message = dict(msg=msg) if detail is not None: log_message['detail'] = detail if last_retained is not None: log_message['hint'] = \ "Deleting keys older than %s." % last_retained['url'] logger.info(**log_message) # This will delete all base backup and WAL data before # last_retained['scanned_sn']. if last_retained is not None: self._delete_base_backups_before(last_retained['scanned_sn']) self._delete_wals_before(last_retained['scanned_sn']) if self.deleter: self.deleter.close()
[ "def", "delete_with_retention", "(", "self", ",", "num_to_retain", ")", ":", "base_backup_sentinel_depth", "=", "self", ".", "layout", ".", "basebackups", "(", ")", ".", "count", "(", "'/'", ")", "+", "1", "# Sweep over base backup files, collecting sentinel files fro...
Retain the num_to_retain most recent backups and delete all data before them.
[ "Retain", "the", "num_to_retain", "most", "recent", "backups", "and", "delete", "all", "data", "before", "them", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/base.py#L435-L511
242,620
wal-e/wal-e
wal_e/blobstore/swift/calling_format.py
connect
def connect(creds): """ Construct a connection value from a container """ return swiftclient.Connection( authurl=creds.authurl, user=creds.user, key=creds.password, auth_version=creds.auth_version, tenant_name=creds.tenant_name, os_options={ "region_name": creds.region, "endpoint_type": creds.endpoint_type, "domain_id": creds.domain_id, "domain_name": creds.domain_name, "tenant_id": creds.tenant_id, "user_id": creds.user_id, "user_domain_id": creds.user_domain_id, "user_domain_name": creds.user_domain_name, "project_id": creds.project_id, "project_name": creds.project_name, "project_domain_id": creds.project_domain_id, "project_domain_name": creds.project_domain_name, } )
python
def connect(creds): return swiftclient.Connection( authurl=creds.authurl, user=creds.user, key=creds.password, auth_version=creds.auth_version, tenant_name=creds.tenant_name, os_options={ "region_name": creds.region, "endpoint_type": creds.endpoint_type, "domain_id": creds.domain_id, "domain_name": creds.domain_name, "tenant_id": creds.tenant_id, "user_id": creds.user_id, "user_domain_id": creds.user_domain_id, "user_domain_name": creds.user_domain_name, "project_id": creds.project_id, "project_name": creds.project_name, "project_domain_id": creds.project_domain_id, "project_domain_name": creds.project_domain_name, } )
[ "def", "connect", "(", "creds", ")", ":", "return", "swiftclient", ".", "Connection", "(", "authurl", "=", "creds", ".", "authurl", ",", "user", "=", "creds", ".", "user", ",", "key", "=", "creds", ".", "password", ",", "auth_version", "=", "creds", "....
Construct a connection value from a container
[ "Construct", "a", "connection", "value", "from", "a", "container" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/swift/calling_format.py#L4-L28
242,621
wal-e/wal-e
wal_e/blobstore/gs/calling_format.py
connect
def connect(creds, max_retries=100): """Construct a connection value to Google Storage API The credentials are retrieved using get_credentials that checks the environment for the correct values. """ credentials, project = google.auth.default() return RetryClient(max_retries=max_retries, project=project, credentials=credentials)
python
def connect(creds, max_retries=100): credentials, project = google.auth.default() return RetryClient(max_retries=max_retries, project=project, credentials=credentials)
[ "def", "connect", "(", "creds", ",", "max_retries", "=", "100", ")", ":", "credentials", ",", "project", "=", "google", ".", "auth", ".", "default", "(", ")", "return", "RetryClient", "(", "max_retries", "=", "max_retries", ",", "project", "=", "project", ...
Construct a connection value to Google Storage API The credentials are retrieved using get_credentials that checks the environment for the correct values.
[ "Construct", "a", "connection", "value", "to", "Google", "Storage", "API" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/gs/calling_format.py#L6-L15
242,622
wal-e/wal-e
wal_e/retries.py
retry
def retry(exception_processor=generic_exception_processor, max_retries=100): """ Generic retry decorator Tries to call the decorated function. Should no exception be raised, the value is simply returned, otherwise, call an exception_processor function with the exception (type, value, traceback) tuple (with the intention that it could raise the exception without losing the traceback) and the exception processor's optionally usable context value (exc_processor_cxt). It's recommended to delete all references to the traceback passed to the exception_processor to speed up garbage collector via the 'del' operator. This context value is passed to and returned from every invocation of the exception processor. This can be used to more conveniently (vs. an object with __call__ defined) implement exception processors that have some state, such as the 'number of attempts'. The first invocation will pass None. :param f: A function to be retried. :type f: function :param exception_processor: A function to process raised exceptions. :type exception_processor: function :param max_retries: An integer representing the maximum number of retry attempts. :type max_retries: integer """ max_retries = int(os.getenv('WALE_RETRIES', max_retries)) def yield_new_function_from(f): def shim(*args, **kwargs): exc_processor_cxt = None retries = 0 while True: # Avoid livelocks while spinning on retry by yielding. gevent.sleep(0.1) try: return f(*args, **kwargs) except KeyboardInterrupt: raise except Exception: exception_info_tuple = None retries += 1 if max_retries >= 1 and retries >= max_retries: raise try: exception_info_tuple = sys.exc_info() exc_processor_cxt = exception_processor( exception_info_tuple, exc_processor_cxt=exc_processor_cxt) finally: # Although cycles are harmless long-term, help the # garbage collector. del exception_info_tuple # Exponential backoff with jitter capped at 2 minutes. duration = min(120, (2 ** retries)) / 2 gevent.sleep(duration + random.randint(0, duration)) return functools.wraps(f)(shim) return yield_new_function_from
python
def retry(exception_processor=generic_exception_processor, max_retries=100): max_retries = int(os.getenv('WALE_RETRIES', max_retries)) def yield_new_function_from(f): def shim(*args, **kwargs): exc_processor_cxt = None retries = 0 while True: # Avoid livelocks while spinning on retry by yielding. gevent.sleep(0.1) try: return f(*args, **kwargs) except KeyboardInterrupt: raise except Exception: exception_info_tuple = None retries += 1 if max_retries >= 1 and retries >= max_retries: raise try: exception_info_tuple = sys.exc_info() exc_processor_cxt = exception_processor( exception_info_tuple, exc_processor_cxt=exc_processor_cxt) finally: # Although cycles are harmless long-term, help the # garbage collector. del exception_info_tuple # Exponential backoff with jitter capped at 2 minutes. duration = min(120, (2 ** retries)) / 2 gevent.sleep(duration + random.randint(0, duration)) return functools.wraps(f)(shim) return yield_new_function_from
[ "def", "retry", "(", "exception_processor", "=", "generic_exception_processor", ",", "max_retries", "=", "100", ")", ":", "max_retries", "=", "int", "(", "os", ".", "getenv", "(", "'WALE_RETRIES'", ",", "max_retries", ")", ")", "def", "yield_new_function_from", ...
Generic retry decorator Tries to call the decorated function. Should no exception be raised, the value is simply returned, otherwise, call an exception_processor function with the exception (type, value, traceback) tuple (with the intention that it could raise the exception without losing the traceback) and the exception processor's optionally usable context value (exc_processor_cxt). It's recommended to delete all references to the traceback passed to the exception_processor to speed up garbage collector via the 'del' operator. This context value is passed to and returned from every invocation of the exception processor. This can be used to more conveniently (vs. an object with __call__ defined) implement exception processors that have some state, such as the 'number of attempts'. The first invocation will pass None. :param f: A function to be retried. :type f: function :param exception_processor: A function to process raised exceptions. :type exception_processor: function :param max_retries: An integer representing the maximum number of retry attempts. :type max_retries: integer
[ "Generic", "retry", "decorator" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/retries.py#L42-L112
242,623
wal-e/wal-e
wal_e/worker/upload_pool.py
TarUploadPool._start
def _start(self, tpart): """Start upload and accout for resource consumption.""" g = gevent.Greenlet(self.uploader, tpart) g.link(self._finish) # Account for concurrency_burden before starting the greenlet # to avoid racing against .join. self.concurrency_burden += 1 self.member_burden += len(tpart) g.start()
python
def _start(self, tpart): g = gevent.Greenlet(self.uploader, tpart) g.link(self._finish) # Account for concurrency_burden before starting the greenlet # to avoid racing against .join. self.concurrency_burden += 1 self.member_burden += len(tpart) g.start()
[ "def", "_start", "(", "self", ",", "tpart", ")", ":", "g", "=", "gevent", ".", "Greenlet", "(", "self", ".", "uploader", ",", "tpart", ")", "g", ".", "link", "(", "self", ".", "_finish", ")", "# Account for concurrency_burden before starting the greenlet", "...
Start upload and accout for resource consumption.
[ "Start", "upload", "and", "accout", "for", "resource", "consumption", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/upload_pool.py#L29-L40
242,624
wal-e/wal-e
wal_e/worker/upload_pool.py
TarUploadPool._finish
def _finish(self, g): """Called on completion of an upload greenlet. Takes care to forward Exceptions or, if there is no error, the finished TarPartition value across a channel. """ assert g.ready() if g.successful(): finished_tpart = g.get() self.wait_change.put(finished_tpart) else: self.wait_change.put(g.exception)
python
def _finish(self, g): assert g.ready() if g.successful(): finished_tpart = g.get() self.wait_change.put(finished_tpart) else: self.wait_change.put(g.exception)
[ "def", "_finish", "(", "self", ",", "g", ")", ":", "assert", "g", ".", "ready", "(", ")", "if", "g", ".", "successful", "(", ")", ":", "finished_tpart", "=", "g", ".", "get", "(", ")", "self", ".", "wait_change", ".", "put", "(", "finished_tpart", ...
Called on completion of an upload greenlet. Takes care to forward Exceptions or, if there is no error, the finished TarPartition value across a channel.
[ "Called", "on", "completion", "of", "an", "upload", "greenlet", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/upload_pool.py#L42-L54
242,625
wal-e/wal-e
wal_e/worker/upload_pool.py
TarUploadPool._wait
def _wait(self): """Block until an upload finishes Raise an exception if that tar volume failed with an error. """ val = self.wait_change.get() if isinstance(val, Exception): # Don't other uncharging, because execution is going to stop raise val else: # Uncharge for resources. self.member_burden -= len(val) self.concurrency_burden -= 1
python
def _wait(self): val = self.wait_change.get() if isinstance(val, Exception): # Don't other uncharging, because execution is going to stop raise val else: # Uncharge for resources. self.member_burden -= len(val) self.concurrency_burden -= 1
[ "def", "_wait", "(", "self", ")", ":", "val", "=", "self", ".", "wait_change", ".", "get", "(", ")", "if", "isinstance", "(", "val", ",", "Exception", ")", ":", "# Don't other uncharging, because execution is going to stop", "raise", "val", "else", ":", "# Unc...
Block until an upload finishes Raise an exception if that tar volume failed with an error.
[ "Block", "until", "an", "upload", "finishes" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/upload_pool.py#L56-L69
242,626
wal-e/wal-e
wal_e/worker/upload_pool.py
TarUploadPool.put
def put(self, tpart): """Upload a tar volume Blocks if there is too much work outstanding already, and raise errors of previously submitted greenlets that die unexpectedly. """ if self.closed: raise UserCritical(msg='attempt to upload tar after closing', hint='report a bug') while True: too_many = ( self.concurrency_burden + 1 > self.max_concurrency or self.member_burden + len(tpart) > self.max_members ) if too_many: # If there are not enough resources to start an upload # even with zero uploads in progress, then something # has gone wrong: the user should not be given enough # rope to hang themselves in this way. if self.concurrency_burden == 0: raise UserCritical( msg=('not enough resources in pool to ' 'support an upload'), hint='report a bug') # _wait blocks until an upload finishes and clears its # used resources, after which another attempt to # evaluate scheduling resources for another upload # might be worth evaluating. # # Alternatively, an error was encountered in a # previous upload in which case it'll be raised here # and cause the process to regard the upload as a # failure. self._wait() gc.collect() else: # Enough resources available: commence upload self._start(tpart) return
python
def put(self, tpart): if self.closed: raise UserCritical(msg='attempt to upload tar after closing', hint='report a bug') while True: too_many = ( self.concurrency_burden + 1 > self.max_concurrency or self.member_burden + len(tpart) > self.max_members ) if too_many: # If there are not enough resources to start an upload # even with zero uploads in progress, then something # has gone wrong: the user should not be given enough # rope to hang themselves in this way. if self.concurrency_burden == 0: raise UserCritical( msg=('not enough resources in pool to ' 'support an upload'), hint='report a bug') # _wait blocks until an upload finishes and clears its # used resources, after which another attempt to # evaluate scheduling resources for another upload # might be worth evaluating. # # Alternatively, an error was encountered in a # previous upload in which case it'll be raised here # and cause the process to regard the upload as a # failure. self._wait() gc.collect() else: # Enough resources available: commence upload self._start(tpart) return
[ "def", "put", "(", "self", ",", "tpart", ")", ":", "if", "self", ".", "closed", ":", "raise", "UserCritical", "(", "msg", "=", "'attempt to upload tar after closing'", ",", "hint", "=", "'report a bug'", ")", "while", "True", ":", "too_many", "=", "(", "se...
Upload a tar volume Blocks if there is too much work outstanding already, and raise errors of previously submitted greenlets that die unexpectedly.
[ "Upload", "a", "tar", "volume" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/upload_pool.py#L71-L113
242,627
wal-e/wal-e
wal_e/pep3143daemon/daemon.py
close_filenos
def close_filenos(preserve): """ Close unprotected file descriptors Close all open file descriptors that are not in preserve. If ulimit -nofile is "unlimited", all is defined filenos <= 4096, else all is <= the output of resource.getrlimit(). :param preserve: set with protected files :type preserve: set :return: None """ maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = 4096 for fileno in range(maxfd): if fileno not in preserve: try: os.close(fileno) except OSError as err: if not err.errno == errno.EBADF: raise DaemonError( 'Failed to close file descriptor {0}: {1}' .format(fileno, err))
python
def close_filenos(preserve): maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = 4096 for fileno in range(maxfd): if fileno not in preserve: try: os.close(fileno) except OSError as err: if not err.errno == errno.EBADF: raise DaemonError( 'Failed to close file descriptor {0}: {1}' .format(fileno, err))
[ "def", "close_filenos", "(", "preserve", ")", ":", "maxfd", "=", "resource", ".", "getrlimit", "(", "resource", ".", "RLIMIT_NOFILE", ")", "[", "1", "]", "if", "maxfd", "==", "resource", ".", "RLIM_INFINITY", ":", "maxfd", "=", "4096", "for", "fileno", "...
Close unprotected file descriptors Close all open file descriptors that are not in preserve. If ulimit -nofile is "unlimited", all is defined filenos <= 4096, else all is <= the output of resource.getrlimit(). :param preserve: set with protected files :type preserve: set :return: None
[ "Close", "unprotected", "file", "descriptors" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/daemon.py#L309-L333
242,628
wal-e/wal-e
wal_e/pep3143daemon/daemon.py
default_signal_map
def default_signal_map(): """ Create the default signal map for this system. :return: dict """ name_map = { 'SIGTSTP': None, 'SIGTTIN': None, 'SIGTTOU': None, 'SIGTERM': 'terminate'} signal_map = {} for name, target in list(name_map.items()): if hasattr(signal, name): signal_map[getattr(signal, name)] = target return signal_map
python
def default_signal_map(): name_map = { 'SIGTSTP': None, 'SIGTTIN': None, 'SIGTTOU': None, 'SIGTERM': 'terminate'} signal_map = {} for name, target in list(name_map.items()): if hasattr(signal, name): signal_map[getattr(signal, name)] = target return signal_map
[ "def", "default_signal_map", "(", ")", ":", "name_map", "=", "{", "'SIGTSTP'", ":", "None", ",", "'SIGTTIN'", ":", "None", ",", "'SIGTTOU'", ":", "None", ",", "'SIGTERM'", ":", "'terminate'", "}", "signal_map", "=", "{", "}", "for", "name", ",", "target"...
Create the default signal map for this system. :return: dict
[ "Create", "the", "default", "signal", "map", "for", "this", "system", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/daemon.py#L336-L350
242,629
wal-e/wal-e
wal_e/pep3143daemon/daemon.py
parent_is_inet
def parent_is_inet(): """ Check if parent is inet Check if our parent seems ot be a superserver, aka inetd/xinetd. This is done by checking if sys.__stdin__ is a network socket. :return: bool """ result = False sock = socket.fromfd( sys.__stdin__.fileno(), socket.AF_INET, socket.SOCK_RAW) try: sock.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE) result = True except (OSError, socket.error) as err: if not err.args[0] == errno.ENOTSOCK: result = True return result
python
def parent_is_inet(): result = False sock = socket.fromfd( sys.__stdin__.fileno(), socket.AF_INET, socket.SOCK_RAW) try: sock.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE) result = True except (OSError, socket.error) as err: if not err.args[0] == errno.ENOTSOCK: result = True return result
[ "def", "parent_is_inet", "(", ")", ":", "result", "=", "False", "sock", "=", "socket", ".", "fromfd", "(", "sys", ".", "__stdin__", ".", "fileno", "(", ")", ",", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_RAW", ")", "try", ":", "sock", ".", ...
Check if parent is inet Check if our parent seems ot be a superserver, aka inetd/xinetd. This is done by checking if sys.__stdin__ is a network socket. :return: bool
[ "Check", "if", "parent", "is", "inet" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/daemon.py#L366-L386
242,630
wal-e/wal-e
wal_e/pep3143daemon/daemon.py
redirect_stream
def redirect_stream(system, target): """ Redirect Unix streams If None, redirect Stream to /dev/null, else redirect to target. :param system: ether sys.stdin, sys.stdout, or sys.stderr :type system: file object :param target: File like object, or None :type target: None, File Object :return: None :raise: DaemonError """ if target is None: target_fd = os.open(os.devnull, os.O_RDWR) else: target_fd = target.fileno() try: os.dup2(target_fd, system.fileno()) except OSError as err: raise DaemonError('Could not redirect {0} to {1}: {2}' .format(system, target, err))
python
def redirect_stream(system, target): if target is None: target_fd = os.open(os.devnull, os.O_RDWR) else: target_fd = target.fileno() try: os.dup2(target_fd, system.fileno()) except OSError as err: raise DaemonError('Could not redirect {0} to {1}: {2}' .format(system, target, err))
[ "def", "redirect_stream", "(", "system", ",", "target", ")", ":", "if", "target", "is", "None", ":", "target_fd", "=", "os", ".", "open", "(", "os", ".", "devnull", ",", "os", ".", "O_RDWR", ")", "else", ":", "target_fd", "=", "target", ".", "fileno"...
Redirect Unix streams If None, redirect Stream to /dev/null, else redirect to target. :param system: ether sys.stdin, sys.stdout, or sys.stderr :type system: file object :param target: File like object, or None :type target: None, File Object :return: None :raise: DaemonError
[ "Redirect", "Unix", "streams" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/daemon.py#L403-L425
242,631
wal-e/wal-e
wal_e/pep3143daemon/daemon.py
DaemonContext._get_signal_handler
def _get_signal_handler(self, handler): """ get the callback function for handler If the handler is None, returns signal.SIG_IGN. If the handler is a string, return the matching attribute of this instance if possible. Else return the handler itself. :param handler: :type handler: str, None, function :return: function """ if not handler: result = signal.SIG_IGN elif isinstance(handler, string_types): result = getattr(self, handler) else: result = handler return result
python
def _get_signal_handler(self, handler): if not handler: result = signal.SIG_IGN elif isinstance(handler, string_types): result = getattr(self, handler) else: result = handler return result
[ "def", "_get_signal_handler", "(", "self", ",", "handler", ")", ":", "if", "not", "handler", ":", "result", "=", "signal", ".", "SIG_IGN", "elif", "isinstance", "(", "handler", ",", "string_types", ")", ":", "result", "=", "getattr", "(", "self", ",", "h...
get the callback function for handler If the handler is None, returns signal.SIG_IGN. If the handler is a string, return the matching attribute of this instance if possible. Else return the handler itself. :param handler: :type handler: str, None, function :return: function
[ "get", "the", "callback", "function", "for", "handler" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/daemon.py#L141-L159
242,632
wal-e/wal-e
wal_e/pep3143daemon/daemon.py
DaemonContext._files_preserve
def _files_preserve(self): """ create a set of protected files create a set of files, based on self.files_preserve and self.stdin, self,stdout and self.stderr, that should not get closed while daemonizing. :return: set """ result = set() files = [] if not self.files_preserve else self.files_preserve files.extend([self.stdin, self.stdout, self.stderr]) for item in files: if hasattr(item, 'fileno'): result.add(item.fileno()) if isinstance(item, int): result.add(item) return result
python
def _files_preserve(self): result = set() files = [] if not self.files_preserve else self.files_preserve files.extend([self.stdin, self.stdout, self.stderr]) for item in files: if hasattr(item, 'fileno'): result.add(item.fileno()) if isinstance(item, int): result.add(item) return result
[ "def", "_files_preserve", "(", "self", ")", ":", "result", "=", "set", "(", ")", "files", "=", "[", "]", "if", "not", "self", ".", "files_preserve", "else", "self", ".", "files_preserve", "files", ".", "extend", "(", "[", "self", ".", "stdin", ",", "...
create a set of protected files create a set of files, based on self.files_preserve and self.stdin, self,stdout and self.stderr, that should not get closed while daemonizing. :return: set
[ "create", "a", "set", "of", "protected", "files" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/daemon.py#L162-L179
242,633
wal-e/wal-e
wal_e/pep3143daemon/daemon.py
DaemonContext.working_directory
def working_directory(self): """ The working_directory property :return: str """ if self.chroot_directory and not \ self._working_directory.startswith(self.chroot_directory): return self.chroot_directory + self._working_directory else: return self._working_directory
python
def working_directory(self): if self.chroot_directory and not \ self._working_directory.startswith(self.chroot_directory): return self.chroot_directory + self._working_directory else: return self._working_directory
[ "def", "working_directory", "(", "self", ")", ":", "if", "self", ".", "chroot_directory", "and", "not", "self", ".", "_working_directory", ".", "startswith", "(", "self", ".", "chroot_directory", ")", ":", "return", "self", ".", "chroot_directory", "+", "self"...
The working_directory property :return: str
[ "The", "working_directory", "property" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/daemon.py#L196-L205
242,634
treyhunner/django-simple-history
simple_history/__init__.py
register
def register( model, app=None, manager_name="history", records_class=None, table_name=None, **records_config ): """ Create historical model for `model` and attach history manager to `model`. Keyword arguments: app -- App to install historical model into (defaults to model.__module__) manager_name -- class attribute name to use for historical manager records_class -- class to use for history relation (defaults to HistoricalRecords) table_name -- Custom name for history table (defaults to 'APPNAME_historicalMODELNAME') This method should be used as an alternative to attaching an `HistoricalManager` instance directly to `model`. """ from . import models if records_class is None: records_class = models.HistoricalRecords records = records_class(**records_config) records.manager_name = manager_name records.table_name = table_name records.module = app and ("%s.models" % app) or model.__module__ records.cls = model records.add_extra_methods(model) records.finalize(model)
python
def register( model, app=None, manager_name="history", records_class=None, table_name=None, **records_config ): from . import models if records_class is None: records_class = models.HistoricalRecords records = records_class(**records_config) records.manager_name = manager_name records.table_name = table_name records.module = app and ("%s.models" % app) or model.__module__ records.cls = model records.add_extra_methods(model) records.finalize(model)
[ "def", "register", "(", "model", ",", "app", "=", "None", ",", "manager_name", "=", "\"history\"", ",", "records_class", "=", "None", ",", "table_name", "=", "None", ",", "*", "*", "records_config", ")", ":", "from", ".", "import", "models", "if", "recor...
Create historical model for `model` and attach history manager to `model`. Keyword arguments: app -- App to install historical model into (defaults to model.__module__) manager_name -- class attribute name to use for historical manager records_class -- class to use for history relation (defaults to HistoricalRecords) table_name -- Custom name for history table (defaults to 'APPNAME_historicalMODELNAME') This method should be used as an alternative to attaching an `HistoricalManager` instance directly to `model`.
[ "Create", "historical", "model", "for", "model", "and", "attach", "history", "manager", "to", "model", "." ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/__init__.py#L6-L39
242,635
treyhunner/django-simple-history
simple_history/admin.py
SimpleHistoryAdmin.get_urls
def get_urls(self): """Returns the additional urls used by the Reversion admin.""" urls = super(SimpleHistoryAdmin, self).get_urls() admin_site = self.admin_site opts = self.model._meta info = opts.app_label, opts.model_name history_urls = [ url( "^([^/]+)/history/([^/]+)/$", admin_site.admin_view(self.history_form_view), name="%s_%s_simple_history" % info, ) ] return history_urls + urls
python
def get_urls(self): urls = super(SimpleHistoryAdmin, self).get_urls() admin_site = self.admin_site opts = self.model._meta info = opts.app_label, opts.model_name history_urls = [ url( "^([^/]+)/history/([^/]+)/$", admin_site.admin_view(self.history_form_view), name="%s_%s_simple_history" % info, ) ] return history_urls + urls
[ "def", "get_urls", "(", "self", ")", ":", "urls", "=", "super", "(", "SimpleHistoryAdmin", ",", "self", ")", ".", "get_urls", "(", ")", "admin_site", "=", "self", ".", "admin_site", "opts", "=", "self", ".", "model", ".", "_meta", "info", "=", "opts", ...
Returns the additional urls used by the Reversion admin.
[ "Returns", "the", "additional", "urls", "used", "by", "the", "Reversion", "admin", "." ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/admin.py#L29-L42
242,636
treyhunner/django-simple-history
simple_history/admin.py
SimpleHistoryAdmin.save_model
def save_model(self, request, obj, form, change): """Set special model attribute to user for reference after save""" obj._history_user = request.user super(SimpleHistoryAdmin, self).save_model(request, obj, form, change)
python
def save_model(self, request, obj, form, change): obj._history_user = request.user super(SimpleHistoryAdmin, self).save_model(request, obj, form, change)
[ "def", "save_model", "(", "self", ",", "request", ",", "obj", ",", "form", ",", "change", ")", ":", "obj", ".", "_history_user", "=", "request", ".", "user", "super", "(", "SimpleHistoryAdmin", ",", "self", ")", ".", "save_model", "(", "request", ",", ...
Set special model attribute to user for reference after save
[ "Set", "special", "model", "attribute", "to", "user", "for", "reference", "after", "save" ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/admin.py#L203-L206
242,637
treyhunner/django-simple-history
simple_history/management/commands/populate_history.py
Command._bulk_history_create
def _bulk_history_create(self, model, batch_size): """Save a copy of all instances to the historical model. :param model: Model you want to bulk create :param batch_size: number of models to create at once. :return: """ instances = [] history = utils.get_history_manager_for_model(model) if self.verbosity >= 1: self.stdout.write( "Starting bulk creating history models for {} instances {}-{}".format( model, 0, batch_size ) ) iterator_kwargs = ( {"chunk_size": batch_size} if django.VERSION >= (2, 0, 0) else {} ) for index, instance in enumerate( model._default_manager.iterator(**iterator_kwargs) ): # Can't Just pass batch_size to bulk_create as this can lead to # Out of Memory Errors as we load too many models into memory after # creating them. So we only keep batch_size worth of models in # historical_instances and clear them after we hit batch_size if index % batch_size == 0: history.bulk_history_create(instances, batch_size=batch_size) instances = [] if self.verbosity >= 1: self.stdout.write( "Finished bulk creating history models for {} " "instances {}-{}, starting next {}".format( model, index - batch_size, index, batch_size ) ) instances.append(instance) # create any we didn't get in the last loop if instances: history.bulk_history_create(instances, batch_size=batch_size)
python
def _bulk_history_create(self, model, batch_size): instances = [] history = utils.get_history_manager_for_model(model) if self.verbosity >= 1: self.stdout.write( "Starting bulk creating history models for {} instances {}-{}".format( model, 0, batch_size ) ) iterator_kwargs = ( {"chunk_size": batch_size} if django.VERSION >= (2, 0, 0) else {} ) for index, instance in enumerate( model._default_manager.iterator(**iterator_kwargs) ): # Can't Just pass batch_size to bulk_create as this can lead to # Out of Memory Errors as we load too many models into memory after # creating them. So we only keep batch_size worth of models in # historical_instances and clear them after we hit batch_size if index % batch_size == 0: history.bulk_history_create(instances, batch_size=batch_size) instances = [] if self.verbosity >= 1: self.stdout.write( "Finished bulk creating history models for {} " "instances {}-{}, starting next {}".format( model, index - batch_size, index, batch_size ) ) instances.append(instance) # create any we didn't get in the last loop if instances: history.bulk_history_create(instances, batch_size=batch_size)
[ "def", "_bulk_history_create", "(", "self", ",", "model", ",", "batch_size", ")", ":", "instances", "=", "[", "]", "history", "=", "utils", ".", "get_history_manager_for_model", "(", "model", ")", "if", "self", ".", "verbosity", ">=", "1", ":", "self", "."...
Save a copy of all instances to the historical model. :param model: Model you want to bulk create :param batch_size: number of models to create at once. :return:
[ "Save", "a", "copy", "of", "all", "instances", "to", "the", "historical", "model", "." ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/management/commands/populate_history.py#L113-L158
242,638
treyhunner/django-simple-history
simple_history/models.py
transform_field
def transform_field(field): """Customize field appropriately for use in historical model""" field.name = field.attname if isinstance(field, models.AutoField): field.__class__ = models.IntegerField elif isinstance(field, models.FileField): # Don't copy file, just path. field.__class__ = models.TextField # Historical instance shouldn't change create/update timestamps field.auto_now = False field.auto_now_add = False if field.primary_key or field.unique: # Unique fields can no longer be guaranteed unique, # but they should still be indexed for faster lookups. field.primary_key = False field._unique = False field.db_index = True field.serialize = True
python
def transform_field(field): field.name = field.attname if isinstance(field, models.AutoField): field.__class__ = models.IntegerField elif isinstance(field, models.FileField): # Don't copy file, just path. field.__class__ = models.TextField # Historical instance shouldn't change create/update timestamps field.auto_now = False field.auto_now_add = False if field.primary_key or field.unique: # Unique fields can no longer be guaranteed unique, # but they should still be indexed for faster lookups. field.primary_key = False field._unique = False field.db_index = True field.serialize = True
[ "def", "transform_field", "(", "field", ")", ":", "field", ".", "name", "=", "field", ".", "attname", "if", "isinstance", "(", "field", ",", "models", ".", "AutoField", ")", ":", "field", ".", "__class__", "=", "models", ".", "IntegerField", "elif", "isi...
Customize field appropriately for use in historical model
[ "Customize", "field", "appropriately", "for", "use", "in", "historical", "model" ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/models.py#L528-L548
242,639
treyhunner/django-simple-history
simple_history/models.py
HistoricalRecords.create_history_model
def create_history_model(self, model, inherited): """ Creates a historical model to associate with the model provided. """ attrs = { "__module__": self.module, "_history_excluded_fields": self.excluded_fields, } app_module = "%s.models" % model._meta.app_label if inherited: # inherited use models module attrs["__module__"] = model.__module__ elif model.__module__ != self.module: # registered under different app attrs["__module__"] = self.module elif app_module != self.module: # Abuse an internal API because the app registry is loading. app = apps.app_configs[model._meta.app_label] models_module = app.name attrs["__module__"] = models_module fields = self.copy_fields(model) attrs.update(fields) attrs.update(self.get_extra_fields(model, fields)) # type in python2 wants str as a first argument attrs.update(Meta=type(str("Meta"), (), self.get_meta_options(model))) if self.table_name is not None: attrs["Meta"].db_table = self.table_name # Set as the default then check for overrides name = self.get_history_model_name(model) registered_models[model._meta.db_table] = model return python_2_unicode_compatible(type(str(name), self.bases, attrs))
python
def create_history_model(self, model, inherited): attrs = { "__module__": self.module, "_history_excluded_fields": self.excluded_fields, } app_module = "%s.models" % model._meta.app_label if inherited: # inherited use models module attrs["__module__"] = model.__module__ elif model.__module__ != self.module: # registered under different app attrs["__module__"] = self.module elif app_module != self.module: # Abuse an internal API because the app registry is loading. app = apps.app_configs[model._meta.app_label] models_module = app.name attrs["__module__"] = models_module fields = self.copy_fields(model) attrs.update(fields) attrs.update(self.get_extra_fields(model, fields)) # type in python2 wants str as a first argument attrs.update(Meta=type(str("Meta"), (), self.get_meta_options(model))) if self.table_name is not None: attrs["Meta"].db_table = self.table_name # Set as the default then check for overrides name = self.get_history_model_name(model) registered_models[model._meta.db_table] = model return python_2_unicode_compatible(type(str(name), self.bases, attrs))
[ "def", "create_history_model", "(", "self", ",", "model", ",", "inherited", ")", ":", "attrs", "=", "{", "\"__module__\"", ":", "self", ".", "module", ",", "\"_history_excluded_fields\"", ":", "self", ".", "excluded_fields", ",", "}", "app_module", "=", "\"%s....
Creates a historical model to associate with the model provided.
[ "Creates", "a", "historical", "model", "to", "associate", "with", "the", "model", "provided", "." ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/models.py#L193-L228
242,640
treyhunner/django-simple-history
simple_history/models.py
HistoricalRecords.copy_fields
def copy_fields(self, model): """ Creates copies of the model's original fields, returning a dictionary mapping field name to copied field object. """ fields = {} for field in self.fields_included(model): field = copy.copy(field) field.remote_field = copy.copy(field.remote_field) if isinstance(field, OrderWrt): # OrderWrt is a proxy field, switch to a plain IntegerField field.__class__ = models.IntegerField if isinstance(field, models.ForeignKey): old_field = field old_swappable = old_field.swappable old_field.swappable = False try: _name, _path, args, field_args = old_field.deconstruct() finally: old_field.swappable = old_swappable if getattr(old_field, "one_to_one", False) or isinstance( old_field, models.OneToOneField ): FieldType = models.ForeignKey else: FieldType = type(old_field) # If field_args['to'] is 'self' then we have a case where the object # has a foreign key to itself. If we pass the historical record's # field to = 'self', the foreign key will point to an historical # record rather than the base record. We can use old_field.model here. if field_args.get("to", None) == "self": field_args["to"] = old_field.model # Override certain arguments passed when creating the field # so that they work for the historical field. field_args.update( db_constraint=False, related_name="+", null=True, blank=True, primary_key=False, db_index=True, serialize=True, unique=False, on_delete=models.DO_NOTHING, ) field = FieldType(*args, **field_args) field.name = old_field.name else: transform_field(field) fields[field.name] = field return fields
python
def copy_fields(self, model): fields = {} for field in self.fields_included(model): field = copy.copy(field) field.remote_field = copy.copy(field.remote_field) if isinstance(field, OrderWrt): # OrderWrt is a proxy field, switch to a plain IntegerField field.__class__ = models.IntegerField if isinstance(field, models.ForeignKey): old_field = field old_swappable = old_field.swappable old_field.swappable = False try: _name, _path, args, field_args = old_field.deconstruct() finally: old_field.swappable = old_swappable if getattr(old_field, "one_to_one", False) or isinstance( old_field, models.OneToOneField ): FieldType = models.ForeignKey else: FieldType = type(old_field) # If field_args['to'] is 'self' then we have a case where the object # has a foreign key to itself. If we pass the historical record's # field to = 'self', the foreign key will point to an historical # record rather than the base record. We can use old_field.model here. if field_args.get("to", None) == "self": field_args["to"] = old_field.model # Override certain arguments passed when creating the field # so that they work for the historical field. field_args.update( db_constraint=False, related_name="+", null=True, blank=True, primary_key=False, db_index=True, serialize=True, unique=False, on_delete=models.DO_NOTHING, ) field = FieldType(*args, **field_args) field.name = old_field.name else: transform_field(field) fields[field.name] = field return fields
[ "def", "copy_fields", "(", "self", ",", "model", ")", ":", "fields", "=", "{", "}", "for", "field", "in", "self", ".", "fields_included", "(", "model", ")", ":", "field", "=", "copy", ".", "copy", "(", "field", ")", "field", ".", "remote_field", "=",...
Creates copies of the model's original fields, returning a dictionary mapping field name to copied field object.
[ "Creates", "copies", "of", "the", "model", "s", "original", "fields", "returning", "a", "dictionary", "mapping", "field", "name", "to", "copied", "field", "object", "." ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/models.py#L237-L289
242,641
treyhunner/django-simple-history
simple_history/models.py
HistoricalRecords.get_extra_fields
def get_extra_fields(self, model, fields): """Return dict of extra fields added to the historical record model""" def revert_url(self): """URL for this change in the default admin site.""" opts = model._meta app_label, model_name = opts.app_label, opts.model_name return reverse( "%s:%s_%s_simple_history" % (admin.site.name, app_label, model_name), args=[getattr(self, opts.pk.attname), self.history_id], ) def get_instance(self): attrs = { field.attname: getattr(self, field.attname) for field in fields.values() } if self._history_excluded_fields: excluded_attnames = [ model._meta.get_field(field).attname for field in self._history_excluded_fields ] values = ( model.objects.filter(pk=getattr(self, model._meta.pk.attname)) .values(*excluded_attnames) .get() ) attrs.update(values) return model(**attrs) def get_next_record(self): """ Get the next history record for the instance. `None` if last. """ history = utils.get_history_manager_for_model(self.instance) return ( history.filter(Q(history_date__gt=self.history_date)) .order_by("history_date") .first() ) def get_prev_record(self): """ Get the previous history record for the instance. `None` if first. """ history = utils.get_history_manager_for_model(self.instance) return ( history.filter(Q(history_date__lt=self.history_date)) .order_by("history_date") .last() ) extra_fields = { "history_id": self._get_history_id_field(), "history_date": models.DateTimeField(), "history_change_reason": self._get_history_change_reason_field(), "history_type": models.CharField( max_length=1, choices=(("+", _("Created")), ("~", _("Changed")), ("-", _("Deleted"))), ), "history_object": HistoricalObjectDescriptor( model, self.fields_included(model) ), "instance": property(get_instance), "instance_type": model, "next_record": property(get_next_record), "prev_record": property(get_prev_record), "revert_url": revert_url, "__str__": lambda self: "{} as of {}".format( self.history_object, self.history_date ), } extra_fields.update(self._get_history_related_field(model)) extra_fields.update(self._get_history_user_fields()) return extra_fields
python
def get_extra_fields(self, model, fields): def revert_url(self): """URL for this change in the default admin site.""" opts = model._meta app_label, model_name = opts.app_label, opts.model_name return reverse( "%s:%s_%s_simple_history" % (admin.site.name, app_label, model_name), args=[getattr(self, opts.pk.attname), self.history_id], ) def get_instance(self): attrs = { field.attname: getattr(self, field.attname) for field in fields.values() } if self._history_excluded_fields: excluded_attnames = [ model._meta.get_field(field).attname for field in self._history_excluded_fields ] values = ( model.objects.filter(pk=getattr(self, model._meta.pk.attname)) .values(*excluded_attnames) .get() ) attrs.update(values) return model(**attrs) def get_next_record(self): """ Get the next history record for the instance. `None` if last. """ history = utils.get_history_manager_for_model(self.instance) return ( history.filter(Q(history_date__gt=self.history_date)) .order_by("history_date") .first() ) def get_prev_record(self): """ Get the previous history record for the instance. `None` if first. """ history = utils.get_history_manager_for_model(self.instance) return ( history.filter(Q(history_date__lt=self.history_date)) .order_by("history_date") .last() ) extra_fields = { "history_id": self._get_history_id_field(), "history_date": models.DateTimeField(), "history_change_reason": self._get_history_change_reason_field(), "history_type": models.CharField( max_length=1, choices=(("+", _("Created")), ("~", _("Changed")), ("-", _("Deleted"))), ), "history_object": HistoricalObjectDescriptor( model, self.fields_included(model) ), "instance": property(get_instance), "instance_type": model, "next_record": property(get_next_record), "prev_record": property(get_prev_record), "revert_url": revert_url, "__str__": lambda self: "{} as of {}".format( self.history_object, self.history_date ), } extra_fields.update(self._get_history_related_field(model)) extra_fields.update(self._get_history_user_fields()) return extra_fields
[ "def", "get_extra_fields", "(", "self", ",", "model", ",", "fields", ")", ":", "def", "revert_url", "(", "self", ")", ":", "\"\"\"URL for this change in the default admin site.\"\"\"", "opts", "=", "model", ".", "_meta", "app_label", ",", "model_name", "=", "opts"...
Return dict of extra fields added to the historical record model
[ "Return", "dict", "of", "extra", "fields", "added", "to", "the", "historical", "record", "model" ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/models.py#L360-L435
242,642
treyhunner/django-simple-history
simple_history/models.py
HistoricalRecords.get_meta_options
def get_meta_options(self, model): """ Returns a dictionary of fields that will be added to the Meta inner class of the historical record model. """ meta_fields = { "ordering": ("-history_date", "-history_id"), "get_latest_by": "history_date", } if self.user_set_verbose_name: name = self.user_set_verbose_name else: name = format_lazy("historical {}", smart_text(model._meta.verbose_name)) meta_fields["verbose_name"] = name if self.app: meta_fields["app_label"] = self.app return meta_fields
python
def get_meta_options(self, model): meta_fields = { "ordering": ("-history_date", "-history_id"), "get_latest_by": "history_date", } if self.user_set_verbose_name: name = self.user_set_verbose_name else: name = format_lazy("historical {}", smart_text(model._meta.verbose_name)) meta_fields["verbose_name"] = name if self.app: meta_fields["app_label"] = self.app return meta_fields
[ "def", "get_meta_options", "(", "self", ",", "model", ")", ":", "meta_fields", "=", "{", "\"ordering\"", ":", "(", "\"-history_date\"", ",", "\"-history_id\"", ")", ",", "\"get_latest_by\"", ":", "\"history_date\"", ",", "}", "if", "self", ".", "user_set_verbose...
Returns a dictionary of fields that will be added to the Meta inner class of the historical record model.
[ "Returns", "a", "dictionary", "of", "fields", "that", "will", "be", "added", "to", "the", "Meta", "inner", "class", "of", "the", "historical", "record", "model", "." ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/models.py#L437-L453
242,643
treyhunner/django-simple-history
simple_history/models.py
HistoricalRecords.get_history_user
def get_history_user(self, instance): """Get the modifying user from instance or middleware.""" try: return instance._history_user except AttributeError: request = None try: if self.thread.request.user.is_authenticated: request = self.thread.request except AttributeError: pass return self.get_user(instance=instance, request=request)
python
def get_history_user(self, instance): try: return instance._history_user except AttributeError: request = None try: if self.thread.request.user.is_authenticated: request = self.thread.request except AttributeError: pass return self.get_user(instance=instance, request=request)
[ "def", "get_history_user", "(", "self", ",", "instance", ")", ":", "try", ":", "return", "instance", ".", "_history_user", "except", "AttributeError", ":", "request", "=", "None", "try", ":", "if", "self", ".", "thread", ".", "request", ".", "user", ".", ...
Get the modifying user from instance or middleware.
[ "Get", "the", "modifying", "user", "from", "instance", "or", "middleware", "." ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/models.py#L513-L525
242,644
treyhunner/django-simple-history
simple_history/manager.py
HistoryManager.most_recent
def most_recent(self): """ Returns the most recent copy of the instance available in the history. """ if not self.instance: raise TypeError( "Can't use most_recent() without a {} instance.".format( self.model._meta.object_name ) ) tmp = [] excluded_fields = getattr(self.model, "_history_excluded_fields", []) for field in self.instance._meta.fields: if field.name in excluded_fields: continue if isinstance(field, models.ForeignKey): tmp.append(field.name + "_id") else: tmp.append(field.name) fields = tuple(tmp) try: values = self.get_queryset().values_list(*fields)[0] except IndexError: raise self.instance.DoesNotExist( "%s has no historical record." % self.instance._meta.object_name ) return self.instance.__class__(*values)
python
def most_recent(self): if not self.instance: raise TypeError( "Can't use most_recent() without a {} instance.".format( self.model._meta.object_name ) ) tmp = [] excluded_fields = getattr(self.model, "_history_excluded_fields", []) for field in self.instance._meta.fields: if field.name in excluded_fields: continue if isinstance(field, models.ForeignKey): tmp.append(field.name + "_id") else: tmp.append(field.name) fields = tuple(tmp) try: values = self.get_queryset().values_list(*fields)[0] except IndexError: raise self.instance.DoesNotExist( "%s has no historical record." % self.instance._meta.object_name ) return self.instance.__class__(*values)
[ "def", "most_recent", "(", "self", ")", ":", "if", "not", "self", ".", "instance", ":", "raise", "TypeError", "(", "\"Can't use most_recent() without a {} instance.\"", ".", "format", "(", "self", ".", "model", ".", "_meta", ".", "object_name", ")", ")", "tmp"...
Returns the most recent copy of the instance available in the history.
[ "Returns", "the", "most", "recent", "copy", "of", "the", "instance", "available", "in", "the", "history", "." ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/manager.py#L37-L64
242,645
treyhunner/django-simple-history
simple_history/manager.py
HistoryManager.as_of
def as_of(self, date): """Get a snapshot as of a specific date. Returns an instance, or an iterable of the instances, of the original model with all the attributes set according to what was present on the object on the date provided. """ if not self.instance: return self._as_of_set(date) queryset = self.get_queryset().filter(history_date__lte=date) try: history_obj = queryset[0] except IndexError: raise self.instance.DoesNotExist( "%s had not yet been created." % self.instance._meta.object_name ) if history_obj.history_type == "-": raise self.instance.DoesNotExist( "%s had already been deleted." % self.instance._meta.object_name ) return history_obj.instance
python
def as_of(self, date): if not self.instance: return self._as_of_set(date) queryset = self.get_queryset().filter(history_date__lte=date) try: history_obj = queryset[0] except IndexError: raise self.instance.DoesNotExist( "%s had not yet been created." % self.instance._meta.object_name ) if history_obj.history_type == "-": raise self.instance.DoesNotExist( "%s had already been deleted." % self.instance._meta.object_name ) return history_obj.instance
[ "def", "as_of", "(", "self", ",", "date", ")", ":", "if", "not", "self", ".", "instance", ":", "return", "self", ".", "_as_of_set", "(", "date", ")", "queryset", "=", "self", ".", "get_queryset", "(", ")", ".", "filter", "(", "history_date__lte", "=", ...
Get a snapshot as of a specific date. Returns an instance, or an iterable of the instances, of the original model with all the attributes set according to what was present on the object on the date provided.
[ "Get", "a", "snapshot", "as", "of", "a", "specific", "date", "." ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/manager.py#L66-L86
242,646
treyhunner/django-simple-history
simple_history/manager.py
HistoryManager.bulk_history_create
def bulk_history_create(self, objs, batch_size=None): """Bulk create the history for the objects specified by objs""" historical_instances = [ self.model( history_date=getattr(instance, "_history_date", now()), history_user=getattr(instance, "_history_user", None), history_change_reason=getattr(instance, "changeReason", ""), history_type="+", **{ field.attname: getattr(instance, field.attname) for field in instance._meta.fields if field.name not in self.model._history_excluded_fields } ) for instance in objs ] return self.model.objects.bulk_create( historical_instances, batch_size=batch_size )
python
def bulk_history_create(self, objs, batch_size=None): historical_instances = [ self.model( history_date=getattr(instance, "_history_date", now()), history_user=getattr(instance, "_history_user", None), history_change_reason=getattr(instance, "changeReason", ""), history_type="+", **{ field.attname: getattr(instance, field.attname) for field in instance._meta.fields if field.name not in self.model._history_excluded_fields } ) for instance in objs ] return self.model.objects.bulk_create( historical_instances, batch_size=batch_size )
[ "def", "bulk_history_create", "(", "self", ",", "objs", ",", "batch_size", "=", "None", ")", ":", "historical_instances", "=", "[", "self", ".", "model", "(", "history_date", "=", "getattr", "(", "instance", ",", "\"_history_date\"", ",", "now", "(", ")", ...
Bulk create the history for the objects specified by objs
[ "Bulk", "create", "the", "history", "for", "the", "objects", "specified", "by", "objs" ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/manager.py#L101-L121
242,647
treyhunner/django-simple-history
simple_history/utils.py
get_history_manager_for_model
def get_history_manager_for_model(model): """Return the history manager for a given app model.""" try: manager_name = model._meta.simple_history_manager_attribute except AttributeError: raise NotHistoricalModelError( "Cannot find a historical model for {model}.".format(model=model) ) return getattr(model, manager_name)
python
def get_history_manager_for_model(model): try: manager_name = model._meta.simple_history_manager_attribute except AttributeError: raise NotHistoricalModelError( "Cannot find a historical model for {model}.".format(model=model) ) return getattr(model, manager_name)
[ "def", "get_history_manager_for_model", "(", "model", ")", ":", "try", ":", "manager_name", "=", "model", ".", "_meta", ".", "simple_history_manager_attribute", "except", "AttributeError", ":", "raise", "NotHistoricalModelError", "(", "\"Cannot find a historical model for {...
Return the history manager for a given app model.
[ "Return", "the", "history", "manager", "for", "a", "given", "app", "model", "." ]
85758ecfe608279508a3fb5b71654d3e202eb63d
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/utils.py#L23-L31
242,648
sony/nnabla
python/src/nnabla/parameter.py
pop_parameter
def pop_parameter(key): '''Remove and get parameter by key. Args: key(str): Key of parameter. Returns: ~nnabla.Variable Parameter if key found, otherwise None. ''' names = key.split('/') if len(names) > 1: with parameter_scope(names[0]): return pop_parameter('/'.join(names[1:])) global current_scope param = current_scope.get(key, None) if param is not None: del current_scope[key] return param
python
def pop_parameter(key): '''Remove and get parameter by key. Args: key(str): Key of parameter. Returns: ~nnabla.Variable Parameter if key found, otherwise None. ''' names = key.split('/') if len(names) > 1: with parameter_scope(names[0]): return pop_parameter('/'.join(names[1:])) global current_scope param = current_scope.get(key, None) if param is not None: del current_scope[key] return param
[ "def", "pop_parameter", "(", "key", ")", ":", "names", "=", "key", ".", "split", "(", "'/'", ")", "if", "len", "(", "names", ")", ">", "1", ":", "with", "parameter_scope", "(", "names", "[", "0", "]", ")", ":", "return", "pop_parameter", "(", "'/'"...
Remove and get parameter by key. Args: key(str): Key of parameter. Returns: ~nnabla.Variable Parameter if key found, otherwise None.
[ "Remove", "and", "get", "parameter", "by", "key", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parameter.py#L149-L167
242,649
sony/nnabla
python/src/nnabla/parameter.py
get_parameter_or_create
def get_parameter_or_create(name, shape=None, initializer=None, need_grad=True, as_need_grad=None): """ Returns an existing parameter variable with the provided name. If a variable with the provided name does not exist, a new variable with the provided name is returned. Args: name(str): The name under the current scope. If it already exists, the name is queried from the parameter manager. shape (:obj:`tuple` of :obj:`int`): Shape of created parameter. The shape of the specified parameter must match with this shape. The default is None which is only valid if initializer is given as an :obj:`numpy.ndarray`. initializer (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): An initialization function to be applied to the parameter. :obj:`numpy.ndarray` can also be given to initialize parameters from numpy array data. need_grad (bool): Register the parameter with the specified ``need_grad`` flag. The default is True. If the flag is different from the previously specified one, the flag will be overwritten, but the values will be kept. as_need_grad (bool): Get a parameter variable with the specified ``need_grad`` flag. Note that this doesn't overwrite the flag of the registered parameter variable with the provided name. Instead, if the given flag mismatches with the previously registered ``need_grad`` flag, it returns a new variable referring to the same array contents but with ``need_grad=as_need_grad``. """ names = name.split('/') if len(names) > 1: with parameter_scope(names[0]): return get_parameter_or_create('/'.join(names[1:]), shape, initializer, need_grad, as_need_grad) param = get_parameter(names[0]) if param is None: class VariableInfo: pass info = VariableInfo() info.initializer = initializer if initializer is not None: if isinstance(initializer, numpy.ndarray): # numpy init param = nn.Variable(initializer.shape, need_grad=need_grad) param.d = initializer # initializer init elif isinstance(initializer, nn.initializer.BaseInitializer) or initializer.__name__ == "<lambda>": assert shape is not None param = nn.Variable(shape, need_grad=need_grad) param.d = initializer(shape=param.shape) else: raise ValueError( "`initializer` must be either the :obj:`numpy.ndarray` or an instance inherited from `nnabla.initializer.BaseInitializer`.") else: # default init assert shape is not None param = nn.Variable(shape, need_grad=need_grad) set_parameter(name, param) else: if param.shape != tuple(shape): raise ValueError( 'The size of existing parameter "{}" {} is different from the size of new parameter {}.\n' 'To clear all parameters, call nn.clear_parameters().'.format(name, param.shape, tuple(shape))) if need_grad != param.need_grad: param.need_grad = need_grad if as_need_grad is None: return param if param.need_grad != as_need_grad: param = param.get_unlinked_variable(need_grad=as_need_grad) return param
python
def get_parameter_or_create(name, shape=None, initializer=None, need_grad=True, as_need_grad=None): names = name.split('/') if len(names) > 1: with parameter_scope(names[0]): return get_parameter_or_create('/'.join(names[1:]), shape, initializer, need_grad, as_need_grad) param = get_parameter(names[0]) if param is None: class VariableInfo: pass info = VariableInfo() info.initializer = initializer if initializer is not None: if isinstance(initializer, numpy.ndarray): # numpy init param = nn.Variable(initializer.shape, need_grad=need_grad) param.d = initializer # initializer init elif isinstance(initializer, nn.initializer.BaseInitializer) or initializer.__name__ == "<lambda>": assert shape is not None param = nn.Variable(shape, need_grad=need_grad) param.d = initializer(shape=param.shape) else: raise ValueError( "`initializer` must be either the :obj:`numpy.ndarray` or an instance inherited from `nnabla.initializer.BaseInitializer`.") else: # default init assert shape is not None param = nn.Variable(shape, need_grad=need_grad) set_parameter(name, param) else: if param.shape != tuple(shape): raise ValueError( 'The size of existing parameter "{}" {} is different from the size of new parameter {}.\n' 'To clear all parameters, call nn.clear_parameters().'.format(name, param.shape, tuple(shape))) if need_grad != param.need_grad: param.need_grad = need_grad if as_need_grad is None: return param if param.need_grad != as_need_grad: param = param.get_unlinked_variable(need_grad=as_need_grad) return param
[ "def", "get_parameter_or_create", "(", "name", ",", "shape", "=", "None", ",", "initializer", "=", "None", ",", "need_grad", "=", "True", ",", "as_need_grad", "=", "None", ")", ":", "names", "=", "name", ".", "split", "(", "'/'", ")", "if", "len", "(",...
Returns an existing parameter variable with the provided name. If a variable with the provided name does not exist, a new variable with the provided name is returned. Args: name(str): The name under the current scope. If it already exists, the name is queried from the parameter manager. shape (:obj:`tuple` of :obj:`int`): Shape of created parameter. The shape of the specified parameter must match with this shape. The default is None which is only valid if initializer is given as an :obj:`numpy.ndarray`. initializer (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): An initialization function to be applied to the parameter. :obj:`numpy.ndarray` can also be given to initialize parameters from numpy array data. need_grad (bool): Register the parameter with the specified ``need_grad`` flag. The default is True. If the flag is different from the previously specified one, the flag will be overwritten, but the values will be kept. as_need_grad (bool): Get a parameter variable with the specified ``need_grad`` flag. Note that this doesn't overwrite the flag of the registered parameter variable with the provided name. Instead, if the given flag mismatches with the previously registered ``need_grad`` flag, it returns a new variable referring to the same array contents but with ``need_grad=as_need_grad``.
[ "Returns", "an", "existing", "parameter", "variable", "with", "the", "provided", "name", ".", "If", "a", "variable", "with", "the", "provided", "name", "does", "not", "exist", "a", "new", "variable", "with", "the", "provided", "name", "is", "returned", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parameter.py#L179-L245
242,650
sony/nnabla
python/src/nnabla/parameter.py
get_parameters
def get_parameters(params=None, path='', grad_only=True): """Get parameter Variables under the current parameter scope. Args: params (dict): Internal use. User doesn't set it manually. path (str): Internal use. User doesn't set it manually. grad_only (bool): Retrieve all parameters under the current scope if False, while only parameters with need_grad=True are retrieved if True. Returns: dict: {:obj:`str` : :obj:`~nnabla.Variable`} """ global current_scope if params is None: params = OrderedDict() for k, v in iteritems(current_scope): if isinstance(v, dict): with parameter_scope(k): params = get_parameters( params, '/'.join([path, k]) if path else k, grad_only=grad_only) else: assert isinstance(v, nn.Variable) if not grad_only or v.need_grad: params['/'.join([path, k]) if path else k] = v return params
python
def get_parameters(params=None, path='', grad_only=True): global current_scope if params is None: params = OrderedDict() for k, v in iteritems(current_scope): if isinstance(v, dict): with parameter_scope(k): params = get_parameters( params, '/'.join([path, k]) if path else k, grad_only=grad_only) else: assert isinstance(v, nn.Variable) if not grad_only or v.need_grad: params['/'.join([path, k]) if path else k] = v return params
[ "def", "get_parameters", "(", "params", "=", "None", ",", "path", "=", "''", ",", "grad_only", "=", "True", ")", ":", "global", "current_scope", "if", "params", "is", "None", ":", "params", "=", "OrderedDict", "(", ")", "for", "k", ",", "v", "in", "i...
Get parameter Variables under the current parameter scope. Args: params (dict): Internal use. User doesn't set it manually. path (str): Internal use. User doesn't set it manually. grad_only (bool): Retrieve all parameters under the current scope if False, while only parameters with need_grad=True are retrieved if True. Returns: dict: {:obj:`str` : :obj:`~nnabla.Variable`}
[ "Get", "parameter", "Variables", "under", "the", "current", "parameter", "scope", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parameter.py#L248-L275
242,651
sony/nnabla
python/src/nnabla/ext_utils.py
import_extension_module
def import_extension_module(ext_name): ''' Import an extension module by name. The extension modules are installed under the `nnabla_ext` package as namespace packages. All extension modules provide a unified set of APIs. Args: ext_name(str): Extension name. e.g. 'cpu', 'cuda', 'cudnn' etc. Returns: module An Python module of a particular NNabla extension. Example: .. code-block:: python ext = import_extension_module('cudnn') available_devices = ext.get_devices() print(available_devices) ext.device_synchronize(available_devices[0]) ext.clear_memory_cache() ''' import importlib try: return importlib.import_module('.' + ext_name, 'nnabla_ext') except ImportError as e: from nnabla import logger logger.error('Extension `{}` does not exist.'.format(ext_name)) raise e
python
def import_extension_module(ext_name): ''' Import an extension module by name. The extension modules are installed under the `nnabla_ext` package as namespace packages. All extension modules provide a unified set of APIs. Args: ext_name(str): Extension name. e.g. 'cpu', 'cuda', 'cudnn' etc. Returns: module An Python module of a particular NNabla extension. Example: .. code-block:: python ext = import_extension_module('cudnn') available_devices = ext.get_devices() print(available_devices) ext.device_synchronize(available_devices[0]) ext.clear_memory_cache() ''' import importlib try: return importlib.import_module('.' + ext_name, 'nnabla_ext') except ImportError as e: from nnabla import logger logger.error('Extension `{}` does not exist.'.format(ext_name)) raise e
[ "def", "import_extension_module", "(", "ext_name", ")", ":", "import", "importlib", "try", ":", "return", "importlib", ".", "import_module", "(", "'.'", "+", "ext_name", ",", "'nnabla_ext'", ")", "except", "ImportError", "as", "e", ":", "from", "nnabla", "impo...
Import an extension module by name. The extension modules are installed under the `nnabla_ext` package as namespace packages. All extension modules provide a unified set of APIs. Args: ext_name(str): Extension name. e.g. 'cpu', 'cuda', 'cudnn' etc. Returns: module An Python module of a particular NNabla extension. Example: .. code-block:: python ext = import_extension_module('cudnn') available_devices = ext.get_devices() print(available_devices) ext.device_synchronize(available_devices[0]) ext.clear_memory_cache()
[ "Import", "an", "extension", "module", "by", "name", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/ext_utils.py#L20-L50
242,652
sony/nnabla
python/src/nnabla/ext_utils.py
list_extensions
def list_extensions(): ''' List up available extensions. Note: It may not work on some platforms/environments since it depends on the directory structure of the namespace packages. Returns: list of str Names of available extensions. ''' import nnabla_ext.cpu from os.path import dirname, join, realpath from os import listdir ext_dir = realpath((join(dirname(nnabla_ext.cpu.__file__), '..'))) return listdir(ext_dir)
python
def list_extensions(): ''' List up available extensions. Note: It may not work on some platforms/environments since it depends on the directory structure of the namespace packages. Returns: list of str Names of available extensions. ''' import nnabla_ext.cpu from os.path import dirname, join, realpath from os import listdir ext_dir = realpath((join(dirname(nnabla_ext.cpu.__file__), '..'))) return listdir(ext_dir)
[ "def", "list_extensions", "(", ")", ":", "import", "nnabla_ext", ".", "cpu", "from", "os", ".", "path", "import", "dirname", ",", "join", ",", "realpath", "from", "os", "import", "listdir", "ext_dir", "=", "realpath", "(", "(", "join", "(", "dirname", "(...
List up available extensions. Note: It may not work on some platforms/environments since it depends on the directory structure of the namespace packages. Returns: list of str Names of available extensions.
[ "List", "up", "available", "extensions", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/ext_utils.py#L53-L69
242,653
sony/nnabla
python/src/nnabla/utils/image_utils/pil_utils.py
imsave
def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True): """ Save image by pillow module. Currently, pillow supports only uint8 to save. Args: path (str): output filename img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default. channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is considered as (height, width, channel) as_uint16 (bool): In this backend, this argument is always False because pillow dose not support uint16. If True, exception will be raised. auto_scale (bool) : Whether upscale pixel values or not. If you want to save float image, this argument must be True. In pillow backend, only float ([0, 1]) to uint8 ([0, 255]) is supported. """ img = _imsave_before(img, channel_first, auto_scale) if img.dtype == np.uint16 or as_uint16: raise ValueError("Pillow only supports uint8 image to save. Cast img to uint8." "If you want to save image as uint16, install pypng or cv2 " "and nnabla.utils.image_utils automatically change backend to use these module.") if auto_scale and img.dtype != np.uint8: img = (img * 255).astype(np.uint8) Image.fromarray(img).save(path)
python
def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True): img = _imsave_before(img, channel_first, auto_scale) if img.dtype == np.uint16 or as_uint16: raise ValueError("Pillow only supports uint8 image to save. Cast img to uint8." "If you want to save image as uint16, install pypng or cv2 " "and nnabla.utils.image_utils automatically change backend to use these module.") if auto_scale and img.dtype != np.uint8: img = (img * 255).astype(np.uint8) Image.fromarray(img).save(path)
[ "def", "imsave", "(", "path", ",", "img", ",", "channel_first", "=", "False", ",", "as_uint16", "=", "False", ",", "auto_scale", "=", "True", ")", ":", "img", "=", "_imsave_before", "(", "img", ",", "channel_first", ",", "auto_scale", ")", "if", "img", ...
Save image by pillow module. Currently, pillow supports only uint8 to save. Args: path (str): output filename img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default. channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is considered as (height, width, channel) as_uint16 (bool): In this backend, this argument is always False because pillow dose not support uint16. If True, exception will be raised. auto_scale (bool) : Whether upscale pixel values or not. If you want to save float image, this argument must be True. In pillow backend, only float ([0, 1]) to uint8 ([0, 255]) is supported.
[ "Save", "image", "by", "pillow", "module", ".", "Currently", "pillow", "supports", "only", "uint8", "to", "save", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/image_utils/pil_utils.py#L118-L147
242,654
sony/nnabla
python/src/nnabla/utils/nnp_graph.py
NnpLoader.get_network
def get_network(self, name, batch_size=None, callback=None): '''Create a variable graph given network by name Returns: NnpNetwork ''' network_proto = nnabla_pb2.Network() network_proto.CopyFrom(self.network_dict[name]) return NnpNetwork(network_proto, self._params, batch_size, callback=callback)
python
def get_network(self, name, batch_size=None, callback=None): '''Create a variable graph given network by name Returns: NnpNetwork ''' network_proto = nnabla_pb2.Network() network_proto.CopyFrom(self.network_dict[name]) return NnpNetwork(network_proto, self._params, batch_size, callback=callback)
[ "def", "get_network", "(", "self", ",", "name", ",", "batch_size", "=", "None", ",", "callback", "=", "None", ")", ":", "network_proto", "=", "nnabla_pb2", ".", "Network", "(", ")", "network_proto", ".", "CopyFrom", "(", "self", ".", "network_dict", "[", ...
Create a variable graph given network by name Returns: NnpNetwork
[ "Create", "a", "variable", "graph", "given", "network", "by", "name" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/nnp_graph.py#L463-L471
242,655
sony/nnabla
python/src/nnabla/utils/converter/onnx/importer.py
set_function_name
def set_function_name(func, node_name, base_name, func_counter): """Set a sufficient name for the function""" # NNabla requires each function to have a unique name # so we generate one here. func.name, count = generate_function_name(func.type, base_name, node_name, func_counter) update_function_counter(func.type, func_counter, count)
python
def set_function_name(func, node_name, base_name, func_counter): # NNabla requires each function to have a unique name # so we generate one here. func.name, count = generate_function_name(func.type, base_name, node_name, func_counter) update_function_counter(func.type, func_counter, count)
[ "def", "set_function_name", "(", "func", ",", "node_name", ",", "base_name", ",", "func_counter", ")", ":", "# NNabla requires each function to have a unique name", "# so we generate one here.", "func", ".", "name", ",", "count", "=", "generate_function_name", "(", "func"...
Set a sufficient name for the function
[ "Set", "a", "sufficient", "name", "for", "the", "function" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/importer.py#L153-L159
242,656
sony/nnabla
python/src/nnabla/utils/converter/onnx/importer.py
generate_transpose
def generate_transpose(node_name, in_name, out_name, axes, base_name, func_counter): """Generate a Transpose operator to transpose the specified buffer. """ trans = nnabla_pb2.Function() trans.type = "Transpose" set_function_name(trans, node_name, base_name, func_counter) trans.input.extend([in_name]) trans.output.extend([out_name]) tp = trans.transpose_param tp.axes.extend(axes) return trans
python
def generate_transpose(node_name, in_name, out_name, axes, base_name, func_counter): trans = nnabla_pb2.Function() trans.type = "Transpose" set_function_name(trans, node_name, base_name, func_counter) trans.input.extend([in_name]) trans.output.extend([out_name]) tp = trans.transpose_param tp.axes.extend(axes) return trans
[ "def", "generate_transpose", "(", "node_name", ",", "in_name", ",", "out_name", ",", "axes", ",", "base_name", ",", "func_counter", ")", ":", "trans", "=", "nnabla_pb2", ".", "Function", "(", ")", "trans", ".", "type", "=", "\"Transpose\"", "set_function_name"...
Generate a Transpose operator to transpose the specified buffer.
[ "Generate", "a", "Transpose", "operator", "to", "transpose", "the", "specified", "buffer", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/importer.py#L162-L172
242,657
sony/nnabla
python/src/nnabla/utils/converter/onnx/importer.py
generate_broadcast_to
def generate_broadcast_to(node_name, x, y, out_name, axis, base_name, func_counter): """Generate a BroadcastTo operator to brodcast specified buffer""" bt = nnabla_pb2.Function() bt.type = "BroadcastTo" set_function_name(bt, node_name, base_name, func_counter) bt.input.extend([x, y]) bt.output.extend([out_name]) btp = bt.broadcast_to_param btp.axis = axis return bt
python
def generate_broadcast_to(node_name, x, y, out_name, axis, base_name, func_counter): bt = nnabla_pb2.Function() bt.type = "BroadcastTo" set_function_name(bt, node_name, base_name, func_counter) bt.input.extend([x, y]) bt.output.extend([out_name]) btp = bt.broadcast_to_param btp.axis = axis return bt
[ "def", "generate_broadcast_to", "(", "node_name", ",", "x", ",", "y", ",", "out_name", ",", "axis", ",", "base_name", ",", "func_counter", ")", ":", "bt", "=", "nnabla_pb2", ".", "Function", "(", ")", "bt", ".", "type", "=", "\"BroadcastTo\"", "set_functio...
Generate a BroadcastTo operator to brodcast specified buffer
[ "Generate", "a", "BroadcastTo", "operator", "to", "brodcast", "specified", "buffer" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/importer.py#L175-L184
242,658
sony/nnabla
python/src/nnabla/utils/converter/onnx/importer.py
convert_parameter_shape
def convert_parameter_shape(pb): """Convert the shape of some parameters so they fit NNabla's requirements. We do this as a post conversion because in the future we may be able to delete the whole conversion if NNabla's code gets changed""" if len(pb.network) != 1: raise ValueError( "NNP with more then a single network is currently not supported") net = pb.network[0] batch_norm_constants = [] for f in net.function: if f.type == "BatchNormalization": # BatchNormalization in ONNX requires the scale, bias, mean, and variance input to be # one dimensional (https://github.com/onnx/onnx/blob/master/docs/Operators.md#batchnormalization). # However in NNabla these input must have a specific shape that matches the input shape. # For example if the input shape is (1,3,3,3), the above variables must have the shape (1,3,1,1) and not (3). # (1,3,1,1) is actually the same as a one-dimensional tensor of size 3, # but NNabla's check currently does not allow this. # Thus, we convert the shape of the above input so we can pass NNabla's check. # If NNabla lightens the shape check, we should be able to remove this conversion. # copy all input names for scale, bias, mean, variance batch_norm_constants.extend(f.input[1:5]) # This loop should be fairly slow since we loop through all variables and parameters per constant for c in batch_norm_constants: # Reshape all BatchNormalization constant inputs assuming the size is (1,size,1,1) for v in net.variable: if v.name == c: size = v.shape.dim[0] del v.shape.dim[:] v.shape.dim.extend([1, size, 1, 1]) break for p in pb.parameter: if p.variable_name == c: size = p.shape.dim[0] del p.shape.dim[:] p.shape.dim.extend([1, size, 1, 1]) break
python
def convert_parameter_shape(pb): if len(pb.network) != 1: raise ValueError( "NNP with more then a single network is currently not supported") net = pb.network[0] batch_norm_constants = [] for f in net.function: if f.type == "BatchNormalization": # BatchNormalization in ONNX requires the scale, bias, mean, and variance input to be # one dimensional (https://github.com/onnx/onnx/blob/master/docs/Operators.md#batchnormalization). # However in NNabla these input must have a specific shape that matches the input shape. # For example if the input shape is (1,3,3,3), the above variables must have the shape (1,3,1,1) and not (3). # (1,3,1,1) is actually the same as a one-dimensional tensor of size 3, # but NNabla's check currently does not allow this. # Thus, we convert the shape of the above input so we can pass NNabla's check. # If NNabla lightens the shape check, we should be able to remove this conversion. # copy all input names for scale, bias, mean, variance batch_norm_constants.extend(f.input[1:5]) # This loop should be fairly slow since we loop through all variables and parameters per constant for c in batch_norm_constants: # Reshape all BatchNormalization constant inputs assuming the size is (1,size,1,1) for v in net.variable: if v.name == c: size = v.shape.dim[0] del v.shape.dim[:] v.shape.dim.extend([1, size, 1, 1]) break for p in pb.parameter: if p.variable_name == c: size = p.shape.dim[0] del p.shape.dim[:] p.shape.dim.extend([1, size, 1, 1]) break
[ "def", "convert_parameter_shape", "(", "pb", ")", ":", "if", "len", "(", "pb", ".", "network", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"NNP with more then a single network is currently not supported\"", ")", "net", "=", "pb", ".", "network", "[", "0",...
Convert the shape of some parameters so they fit NNabla's requirements. We do this as a post conversion because in the future we may be able to delete the whole conversion if NNabla's code gets changed
[ "Convert", "the", "shape", "of", "some", "parameters", "so", "they", "fit", "NNabla", "s", "requirements", ".", "We", "do", "this", "as", "a", "post", "conversion", "because", "in", "the", "future", "we", "may", "be", "able", "to", "delete", "the", "whol...
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/importer.py#L362-L398
242,659
sony/nnabla
python/src/nnabla/utils/converter/onnx/importer.py
add_tensor_as_parameter
def add_tensor_as_parameter(pb, tensor): """Add given tensor as a parameter""" p = pb.parameter.add() p.variable_name = tensor.name p.shape.dim.extend(tensor.dims) if tensor.data_type == TensorProto.FLOAT: # convert raw bytestream to floating points if tensor.raw_data: p.data.extend(np.fromstring(tensor.raw_data, dtype=np.float32)) elif len(tensor.float_data) > 0: p.data.extend(tensor.float_data) else: raise ValueError("float data not found for {}".format(tensor.name)) elif tensor.data_type == TensorProto.INT32: # convert raw bytestream to integer if tensor.raw_data: p.data.extend(np.fromstring(tensor.raw_data, dtype=np.int32)) elif len(tensor.int32_data) > 0: p.data.extend(tensor.int32_data) else: raise ValueError("int32 data not found for {}".format(tensor.name)) elif tensor.data_type == TensorProto.INT64: # convert raw bytestream to integer if tensor.raw_data: p.data.extend(np.fromstring(tensor.raw_data, dtype=np.int64)) elif len(tensor.int64_data) > 0: p.data.extend(tensor.int64_data) else: raise ValueError("int64 data not found for {}".format(tensor.name)) elif tensor.data_type == TensorProto.BOOL: if tensor.raw_data: p.data.extend(np.fromstring(tensor.raw_data, dtype=np.bool)) else: raise ValueError("bool data not found for {}".format(tensor.name)) else: raise ValueError("Unsupported tensor data type for {}: {}" .format(tensor.name, tensor.data_type)) p.need_grad = False
python
def add_tensor_as_parameter(pb, tensor): p = pb.parameter.add() p.variable_name = tensor.name p.shape.dim.extend(tensor.dims) if tensor.data_type == TensorProto.FLOAT: # convert raw bytestream to floating points if tensor.raw_data: p.data.extend(np.fromstring(tensor.raw_data, dtype=np.float32)) elif len(tensor.float_data) > 0: p.data.extend(tensor.float_data) else: raise ValueError("float data not found for {}".format(tensor.name)) elif tensor.data_type == TensorProto.INT32: # convert raw bytestream to integer if tensor.raw_data: p.data.extend(np.fromstring(tensor.raw_data, dtype=np.int32)) elif len(tensor.int32_data) > 0: p.data.extend(tensor.int32_data) else: raise ValueError("int32 data not found for {}".format(tensor.name)) elif tensor.data_type == TensorProto.INT64: # convert raw bytestream to integer if tensor.raw_data: p.data.extend(np.fromstring(tensor.raw_data, dtype=np.int64)) elif len(tensor.int64_data) > 0: p.data.extend(tensor.int64_data) else: raise ValueError("int64 data not found for {}".format(tensor.name)) elif tensor.data_type == TensorProto.BOOL: if tensor.raw_data: p.data.extend(np.fromstring(tensor.raw_data, dtype=np.bool)) else: raise ValueError("bool data not found for {}".format(tensor.name)) else: raise ValueError("Unsupported tensor data type for {}: {}" .format(tensor.name, tensor.data_type)) p.need_grad = False
[ "def", "add_tensor_as_parameter", "(", "pb", ",", "tensor", ")", ":", "p", "=", "pb", ".", "parameter", ".", "add", "(", ")", "p", ".", "variable_name", "=", "tensor", ".", "name", "p", ".", "shape", ".", "dim", ".", "extend", "(", "tensor", ".", "...
Add given tensor as a parameter
[ "Add", "given", "tensor", "as", "a", "parameter" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/importer.py#L401-L439
242,660
sony/nnabla
python/src/nnabla/utils/converter/onnx/importer.py
OnnxImporter.BroadcastOperator
def BroadcastOperator(self, func_name, func_list, n): """Converts a broadcasting operator to a composite with BroadcastTo""" broadcasting = False broadcast_axis = -1 func = self.generate_default_function(func_name, n) for attr in n.attribute: if attr.name == "axis": if attr.type != AttributeProto.INT: raise ValueError( "Only INT is supported for axis in {} op_type".format(n.op_type)) broadcast_axis = attr.i elif attr.name == "broadcast": if attr.type != AttributeProto.INT: raise ValueError( "Only INT is supported for broadcast in {} op_type".format(n.op_type)) if attr.i == 1: broadcasting = True else: raise ValueError("Unsupported attribute {} was specified at {}" .format(attr.name, n.op_type)) if not broadcasting: input0_shape = self.get_func_input_shape(func.input[0]) input1_shape = self.get_func_input_shape(func.input[1]) output_shape = [] for i in range(len(input0_shape)): output_shape.append(max(input0_shape[i], input1_shape[i])) self._shape_output[func.output[0]] = output_shape func_list.append(func) return # Create a BroadcastTo operator to broadcast input B b_idx = 1 # B is the second input broadcasted_postfix = "_broadcasted" input = n.input[:] bin = n.input[b_idx] bout = bin+broadcasted_postfix bt = generate_broadcast_to(n.name, n.input[0], bin, bout, broadcast_axis, self._graph.name, self._func_counter) self._shape_output[bout] = self.get_func_input_shape(n.input[0]) func_list.append(bt) input[b_idx] = bout # rewire input to broadcasted input # update input with the converted inputs del func.input[:] func.input.extend(input) self._shape_output[func.output[0]] = self._shape_output[bout] func_list.append(func)
python
def BroadcastOperator(self, func_name, func_list, n): broadcasting = False broadcast_axis = -1 func = self.generate_default_function(func_name, n) for attr in n.attribute: if attr.name == "axis": if attr.type != AttributeProto.INT: raise ValueError( "Only INT is supported for axis in {} op_type".format(n.op_type)) broadcast_axis = attr.i elif attr.name == "broadcast": if attr.type != AttributeProto.INT: raise ValueError( "Only INT is supported for broadcast in {} op_type".format(n.op_type)) if attr.i == 1: broadcasting = True else: raise ValueError("Unsupported attribute {} was specified at {}" .format(attr.name, n.op_type)) if not broadcasting: input0_shape = self.get_func_input_shape(func.input[0]) input1_shape = self.get_func_input_shape(func.input[1]) output_shape = [] for i in range(len(input0_shape)): output_shape.append(max(input0_shape[i], input1_shape[i])) self._shape_output[func.output[0]] = output_shape func_list.append(func) return # Create a BroadcastTo operator to broadcast input B b_idx = 1 # B is the second input broadcasted_postfix = "_broadcasted" input = n.input[:] bin = n.input[b_idx] bout = bin+broadcasted_postfix bt = generate_broadcast_to(n.name, n.input[0], bin, bout, broadcast_axis, self._graph.name, self._func_counter) self._shape_output[bout] = self.get_func_input_shape(n.input[0]) func_list.append(bt) input[b_idx] = bout # rewire input to broadcasted input # update input with the converted inputs del func.input[:] func.input.extend(input) self._shape_output[func.output[0]] = self._shape_output[bout] func_list.append(func)
[ "def", "BroadcastOperator", "(", "self", ",", "func_name", ",", "func_list", ",", "n", ")", ":", "broadcasting", "=", "False", "broadcast_axis", "=", "-", "1", "func", "=", "self", ".", "generate_default_function", "(", "func_name", ",", "n", ")", "for", "...
Converts a broadcasting operator to a composite with BroadcastTo
[ "Converts", "a", "broadcasting", "operator", "to", "a", "composite", "with", "BroadcastTo" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/importer.py#L889-L933
242,661
sony/nnabla
python/src/nnabla/utils/image_utils/pypng_utils.py
imread
def imread(path, grayscale=False, size=None, interpolate="bilinear", channel_first=False, as_uint16=False, num_channels=-1): """ Read image by pypng module. Args: path (str or 'file object'): File path or object to read. grayscale (bool): size (tupple of int): (width, height). If None, output img shape depends on the files to read. channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is (height, width, channel). interpolate (str): must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. as_uint16 (bool): If True, this function reads image as uint16. num_channels (int): channel size of output array. Default is -1 which preserves raw image shape. Returns: numpy.ndarray """ _imread_before(grayscale, num_channels) f = path if hasattr(path, "read") else open(path, "rb") r = png.Reader(file=f) width, height, pixels, metadata = r.asDirect() bit_depth = metadata.get("bitdepth") if bit_depth not in [8, 16]: raise ValueError("The bit-depth of the image you want to read is unsupported ({}bit)." "Currently, pypng backend`s imread supports only [8, 16] bit-depth." "the path for this image is {}".format(bit_depth, path)) img = read_result_to_ndarray( pixels, width, height, metadata, grayscale, as_uint16, num_channels) return _imread_after(img, size, interpolate, channel_first, imresize)
python
def imread(path, grayscale=False, size=None, interpolate="bilinear", channel_first=False, as_uint16=False, num_channels=-1): _imread_before(grayscale, num_channels) f = path if hasattr(path, "read") else open(path, "rb") r = png.Reader(file=f) width, height, pixels, metadata = r.asDirect() bit_depth = metadata.get("bitdepth") if bit_depth not in [8, 16]: raise ValueError("The bit-depth of the image you want to read is unsupported ({}bit)." "Currently, pypng backend`s imread supports only [8, 16] bit-depth." "the path for this image is {}".format(bit_depth, path)) img = read_result_to_ndarray( pixels, width, height, metadata, grayscale, as_uint16, num_channels) return _imread_after(img, size, interpolate, channel_first, imresize)
[ "def", "imread", "(", "path", ",", "grayscale", "=", "False", ",", "size", "=", "None", ",", "interpolate", "=", "\"bilinear\"", ",", "channel_first", "=", "False", ",", "as_uint16", "=", "False", ",", "num_channels", "=", "-", "1", ")", ":", "_imread_be...
Read image by pypng module. Args: path (str or 'file object'): File path or object to read. grayscale (bool): size (tupple of int): (width, height). If None, output img shape depends on the files to read. channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is (height, width, channel). interpolate (str): must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. as_uint16 (bool): If True, this function reads image as uint16. num_channels (int): channel size of output array. Default is -1 which preserves raw image shape. Returns: numpy.ndarray
[ "Read", "image", "by", "pypng", "module", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/image_utils/pypng_utils.py#L79-L122
242,662
sony/nnabla
python/src/nnabla/utils/image_utils/pypng_utils.py
imsave
def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True): """ Save image by pypng module. Args: path (str): output filename img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default. channel_first: This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is (height, width, channel) as_uint16 (bool): If True, save image as uint16. auto_scale (bool) : Whether upscale pixel values or not. If you want to save float image, this argument must be True. In pypng backend, all below are supported. - float ([0, 1]) to uint8 ([0, 255]) (if img.dtype==float and upscale==True and as_uint16==False) - float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True) - uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True) """ img = _imsave_before(img, channel_first, auto_scale) if auto_scale: img = upscale_pixel_intensity(img, as_uint16) img = check_type_and_cast_if_necessary(img, as_uint16) bitdepth = 8 if img.dtype == np.uint8 else 16 grayscale = True if len(img.shape) == 2 or ( len(img.shape) == 3 and img.shape[-1] == 1) else False writer = png.Writer(img.shape[1], img.shape[0], greyscale=grayscale, bitdepth=bitdepth) writer.write(open(path, "wb"), img.reshape(img.shape[0], -1))
python
def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True): img = _imsave_before(img, channel_first, auto_scale) if auto_scale: img = upscale_pixel_intensity(img, as_uint16) img = check_type_and_cast_if_necessary(img, as_uint16) bitdepth = 8 if img.dtype == np.uint8 else 16 grayscale = True if len(img.shape) == 2 or ( len(img.shape) == 3 and img.shape[-1] == 1) else False writer = png.Writer(img.shape[1], img.shape[0], greyscale=grayscale, bitdepth=bitdepth) writer.write(open(path, "wb"), img.reshape(img.shape[0], -1))
[ "def", "imsave", "(", "path", ",", "img", ",", "channel_first", "=", "False", ",", "as_uint16", "=", "False", ",", "auto_scale", "=", "True", ")", ":", "img", "=", "_imsave_before", "(", "img", ",", "channel_first", ",", "auto_scale", ")", "if", "auto_sc...
Save image by pypng module. Args: path (str): output filename img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default. channel_first: This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is (height, width, channel) as_uint16 (bool): If True, save image as uint16. auto_scale (bool) : Whether upscale pixel values or not. If you want to save float image, this argument must be True. In pypng backend, all below are supported. - float ([0, 1]) to uint8 ([0, 255]) (if img.dtype==float and upscale==True and as_uint16==False) - float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True) - uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True)
[ "Save", "image", "by", "pypng", "module", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/image_utils/pypng_utils.py#L125-L160
242,663
sony/nnabla
python/src/nnabla/context.py
context_scope
def context_scope(ctx): """ Context as Python context. .. code-block:: python import nnabla as nn import nnabla.functions as F x = nn.Variable([2, 3 ,4]) ctx = nnabla_ext.cuda.context('0') with context_scope(ctx): # Inside with scope, the specified context is used. with parameter_scope('w1'): l1 = F.relu(F.affine(x, 64)) with parameter_scope('w2'): l2 = F.relu(F.affine(x, 64)) """ global current_ctx global context_level context_level += 1 prev_context = current_ctx current_ctx = ctx try: yield finally: context_level -= 1 current_ctx = prev_context
python
def context_scope(ctx): global current_ctx global context_level context_level += 1 prev_context = current_ctx current_ctx = ctx try: yield finally: context_level -= 1 current_ctx = prev_context
[ "def", "context_scope", "(", "ctx", ")", ":", "global", "current_ctx", "global", "context_level", "context_level", "+=", "1", "prev_context", "=", "current_ctx", "current_ctx", "=", "ctx", "try", ":", "yield", "finally", ":", "context_level", "-=", "1", "current...
Context as Python context. .. code-block:: python import nnabla as nn import nnabla.functions as F x = nn.Variable([2, 3 ,4]) ctx = nnabla_ext.cuda.context('0') with context_scope(ctx): # Inside with scope, the specified context is used. with parameter_scope('w1'): l1 = F.relu(F.affine(x, 64)) with parameter_scope('w2'): l2 = F.relu(F.affine(x, 64))
[ "Context", "as", "Python", "context", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/context.py#L29-L56
242,664
sony/nnabla
python/src/nnabla/utils/converter/onnx/exporter.py
generate_scalar_constant
def generate_scalar_constant(output_name, tensor_name, scalar): """Convert a scalar value to a Constant buffer. This is mainly used for xxScalar operators.""" t = onnx.helper.make_tensor(tensor_name, data_type=TensorProto.FLOAT, dims=[1], vals=[scalar]) c = onnx.helper.make_node("Constant", [], [output_name], value=t) return c
python
def generate_scalar_constant(output_name, tensor_name, scalar): t = onnx.helper.make_tensor(tensor_name, data_type=TensorProto.FLOAT, dims=[1], vals=[scalar]) c = onnx.helper.make_node("Constant", [], [output_name], value=t) return c
[ "def", "generate_scalar_constant", "(", "output_name", ",", "tensor_name", ",", "scalar", ")", ":", "t", "=", "onnx", ".", "helper", ".", "make_tensor", "(", "tensor_name", ",", "data_type", "=", "TensorProto", ".", "FLOAT", ",", "dims", "=", "[", "1", "]"...
Convert a scalar value to a Constant buffer. This is mainly used for xxScalar operators.
[ "Convert", "a", "scalar", "value", "to", "a", "Constant", "buffer", ".", "This", "is", "mainly", "used", "for", "xxScalar", "operators", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/exporter.py#L42-L52
242,665
sony/nnabla
python/src/nnabla/utils/converter/onnx/exporter.py
replace_negative_size_with_batch_size
def replace_negative_size_with_batch_size(shape, batch_size): """Replace all dimensions with negative values to batch size""" sl = [] for d in shape.dim: if d < 0: # Negative size means batch size sl.append(batch_size) else: sl.append(d) out_shape = nnabla_pb2.Shape() out_shape.dim.extend(sl) return out_shape
python
def replace_negative_size_with_batch_size(shape, batch_size): sl = [] for d in shape.dim: if d < 0: # Negative size means batch size sl.append(batch_size) else: sl.append(d) out_shape = nnabla_pb2.Shape() out_shape.dim.extend(sl) return out_shape
[ "def", "replace_negative_size_with_batch_size", "(", "shape", ",", "batch_size", ")", ":", "sl", "=", "[", "]", "for", "d", "in", "shape", ".", "dim", ":", "if", "d", "<", "0", ":", "# Negative size means batch size", "sl", ".", "append", "(", "batch_size", ...
Replace all dimensions with negative values to batch size
[ "Replace", "all", "dimensions", "with", "negative", "values", "to", "batch", "size" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/exporter.py#L121-L132
242,666
sony/nnabla
python/src/nnabla/utils/converter/onnx/exporter.py
OnnxExporter.BinarySigmoid
def BinarySigmoid(self, func): ''' Currently, caffe2 does not support this function. ''' n = onnx.helper.make_node( 'HardSigmoid', func.input, func.output, alpha=1.0, beta=0.0 ) return [n]
python
def BinarySigmoid(self, func): ''' Currently, caffe2 does not support this function. ''' n = onnx.helper.make_node( 'HardSigmoid', func.input, func.output, alpha=1.0, beta=0.0 ) return [n]
[ "def", "BinarySigmoid", "(", "self", ",", "func", ")", ":", "n", "=", "onnx", ".", "helper", ".", "make_node", "(", "'HardSigmoid'", ",", "func", ".", "input", ",", "func", ".", "output", ",", "alpha", "=", "1.0", ",", "beta", "=", "0.0", ")", "ret...
Currently, caffe2 does not support this function.
[ "Currently", "caffe2", "does", "not", "support", "this", "function", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/exporter.py#L392-L403
242,667
sony/nnabla
python/src/nnabla/experimental/graph_converters/sequential.py
SequentialConverter.convert
def convert(self, vroot, entry_variables): """Convert a given graph. Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. """ for converter in self.converters: vroot = converter.convert(vroot, entry_variables) return vroot
python
def convert(self, vroot, entry_variables): for converter in self.converters: vroot = converter.convert(vroot, entry_variables) return vroot
[ "def", "convert", "(", "self", ",", "vroot", ",", "entry_variables", ")", ":", "for", "converter", "in", "self", ".", "converters", ":", "vroot", "=", "converter", ".", "convert", "(", "vroot", ",", "entry_variables", ")", "return", "vroot" ]
Convert a given graph. Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
[ "Convert", "a", "given", "graph", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/experimental/graph_converters/sequential.py#L17-L29
242,668
sony/nnabla
python/src/nnabla/initializer.py
calc_normal_std_he_forward
def calc_normal_std_he_forward(inmaps, outmaps, kernel=(1, 1)): r"""Calculates the standard deviation proposed by He et al. .. math:: \sigma = \sqrt{\frac{2}{NK}} Args: inmaps (int): Map size of an input Variable, :math:`N`. outmaps (int): Map size of an output Variable, :math:`M`. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape. In above definition, :math:`K` is the product of shape dimensions. In Affine, the default value should be used. Example: .. code-block:: python import nnabla as nn import nnabla.parametric_functions as PF import nnabla.initializer as I x = nn.Variable([60,1,28,28]) s = I.calc_normal_std_he_forward(x.shape[1],64) w = I.NormalInitializer(s) b = I.ConstantInitializer(0) h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv') References: * `He, et al. Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification. <https://arxiv.org/abs/1502.01852>`_ """ return np.sqrt(2. / (np.prod(kernel) * inmaps))
python
def calc_normal_std_he_forward(inmaps, outmaps, kernel=(1, 1)): r"""Calculates the standard deviation proposed by He et al. .. math:: \sigma = \sqrt{\frac{2}{NK}} Args: inmaps (int): Map size of an input Variable, :math:`N`. outmaps (int): Map size of an output Variable, :math:`M`. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape. In above definition, :math:`K` is the product of shape dimensions. In Affine, the default value should be used. Example: .. code-block:: python import nnabla as nn import nnabla.parametric_functions as PF import nnabla.initializer as I x = nn.Variable([60,1,28,28]) s = I.calc_normal_std_he_forward(x.shape[1],64) w = I.NormalInitializer(s) b = I.ConstantInitializer(0) h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv') References: * `He, et al. Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification. <https://arxiv.org/abs/1502.01852>`_ """ return np.sqrt(2. / (np.prod(kernel) * inmaps))
[ "def", "calc_normal_std_he_forward", "(", "inmaps", ",", "outmaps", ",", "kernel", "=", "(", "1", ",", "1", ")", ")", ":", "return", "np", ".", "sqrt", "(", "2.", "/", "(", "np", ".", "prod", "(", "kernel", ")", "*", "inmaps", ")", ")" ]
r"""Calculates the standard deviation proposed by He et al. .. math:: \sigma = \sqrt{\frac{2}{NK}} Args: inmaps (int): Map size of an input Variable, :math:`N`. outmaps (int): Map size of an output Variable, :math:`M`. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape. In above definition, :math:`K` is the product of shape dimensions. In Affine, the default value should be used. Example: .. code-block:: python import nnabla as nn import nnabla.parametric_functions as PF import nnabla.initializer as I x = nn.Variable([60,1,28,28]) s = I.calc_normal_std_he_forward(x.shape[1],64) w = I.NormalInitializer(s) b = I.ConstantInitializer(0) h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv') References: * `He, et al. Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification. <https://arxiv.org/abs/1502.01852>`_
[ "r", "Calculates", "the", "standard", "deviation", "proposed", "by", "He", "et", "al", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/initializer.py#L216-L249
242,669
sony/nnabla
python/src/nnabla/initializer.py
calc_normal_std_glorot
def calc_normal_std_glorot(inmaps, outmaps, kernel=(1, 1)): r"""Calculates the standard deviation proposed by Glorot et al. .. math:: \sigma = \sqrt{\frac{2}{NK + M}} Args: inmaps (int): Map size of an input Variable, :math:`N`. outmaps (int): Map size of an output Variable, :math:`M`. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape. In above definition, :math:`K` is the product of shape dimensions. In Affine, the default value should be used. Example: .. code-block:: python import nnabla as nn import nnabla.parametric_functions as PF import nnabla.initializer as I x = nn.Variable([60,1,28,28]) s = I.calc_normal_std_glorot(x.shape[1],64) w = I.NormalInitializer(s) b = I.ConstantInitializer(0) h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv') References: * `Glorot and Bengio. Understanding the difficulty of training deep feedforward neural networks <http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_ """ return np.sqrt(2. / (np.prod(kernel) * inmaps + outmaps))
python
def calc_normal_std_glorot(inmaps, outmaps, kernel=(1, 1)): r"""Calculates the standard deviation proposed by Glorot et al. .. math:: \sigma = \sqrt{\frac{2}{NK + M}} Args: inmaps (int): Map size of an input Variable, :math:`N`. outmaps (int): Map size of an output Variable, :math:`M`. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape. In above definition, :math:`K` is the product of shape dimensions. In Affine, the default value should be used. Example: .. code-block:: python import nnabla as nn import nnabla.parametric_functions as PF import nnabla.initializer as I x = nn.Variable([60,1,28,28]) s = I.calc_normal_std_glorot(x.shape[1],64) w = I.NormalInitializer(s) b = I.ConstantInitializer(0) h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv') References: * `Glorot and Bengio. Understanding the difficulty of training deep feedforward neural networks <http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_ """ return np.sqrt(2. / (np.prod(kernel) * inmaps + outmaps))
[ "def", "calc_normal_std_glorot", "(", "inmaps", ",", "outmaps", ",", "kernel", "=", "(", "1", ",", "1", ")", ")", ":", "return", "np", ".", "sqrt", "(", "2.", "/", "(", "np", ".", "prod", "(", "kernel", ")", "*", "inmaps", "+", "outmaps", ")", ")...
r"""Calculates the standard deviation proposed by Glorot et al. .. math:: \sigma = \sqrt{\frac{2}{NK + M}} Args: inmaps (int): Map size of an input Variable, :math:`N`. outmaps (int): Map size of an output Variable, :math:`M`. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape. In above definition, :math:`K` is the product of shape dimensions. In Affine, the default value should be used. Example: .. code-block:: python import nnabla as nn import nnabla.parametric_functions as PF import nnabla.initializer as I x = nn.Variable([60,1,28,28]) s = I.calc_normal_std_glorot(x.shape[1],64) w = I.NormalInitializer(s) b = I.ConstantInitializer(0) h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv') References: * `Glorot and Bengio. Understanding the difficulty of training deep feedforward neural networks <http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
[ "r", "Calculates", "the", "standard", "deviation", "proposed", "by", "Glorot", "et", "al", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/initializer.py#L288-L321
242,670
sony/nnabla
python/src/nnabla/initializer.py
calc_uniform_lim_glorot
def calc_uniform_lim_glorot(inmaps, outmaps, kernel=(1, 1)): r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al. .. math:: b &= \sqrt{\frac{6}{NK + M}}\\ a &= -b Args: inmaps (int): Map size of an input Variable, :math:`N`. outmaps (int): Map size of an output Variable, :math:`M`. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape. In above definition, :math:`K` is the product of shape dimensions. In Affine, the default value should be used. Example: .. code-block:: python import nnabla as nn import nnabla.parametric_functions as PF import nnabla.initializer as I x = nn.Variable([60,1,28,28]) lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64) w = I.UniformInitializer((lb,ub)) b = I.ConstantInitializer(0) h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv') References: * `Glorot and Bengio. Understanding the difficulty of training deep feedforward neural networks <http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_ """ d = np.sqrt(6. / (np.prod(kernel) * inmaps + outmaps)) return -d, d
python
def calc_uniform_lim_glorot(inmaps, outmaps, kernel=(1, 1)): r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al. .. math:: b &= \sqrt{\frac{6}{NK + M}}\\ a &= -b Args: inmaps (int): Map size of an input Variable, :math:`N`. outmaps (int): Map size of an output Variable, :math:`M`. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape. In above definition, :math:`K` is the product of shape dimensions. In Affine, the default value should be used. Example: .. code-block:: python import nnabla as nn import nnabla.parametric_functions as PF import nnabla.initializer as I x = nn.Variable([60,1,28,28]) lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64) w = I.UniformInitializer((lb,ub)) b = I.ConstantInitializer(0) h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv') References: * `Glorot and Bengio. Understanding the difficulty of training deep feedforward neural networks <http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_ """ d = np.sqrt(6. / (np.prod(kernel) * inmaps + outmaps)) return -d, d
[ "def", "calc_uniform_lim_glorot", "(", "inmaps", ",", "outmaps", ",", "kernel", "=", "(", "1", ",", "1", ")", ")", ":", "d", "=", "np", ".", "sqrt", "(", "6.", "/", "(", "np", ".", "prod", "(", "kernel", ")", "*", "inmaps", "+", "outmaps", ")", ...
r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al. .. math:: b &= \sqrt{\frac{6}{NK + M}}\\ a &= -b Args: inmaps (int): Map size of an input Variable, :math:`N`. outmaps (int): Map size of an output Variable, :math:`M`. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape. In above definition, :math:`K` is the product of shape dimensions. In Affine, the default value should be used. Example: .. code-block:: python import nnabla as nn import nnabla.parametric_functions as PF import nnabla.initializer as I x = nn.Variable([60,1,28,28]) lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64) w = I.UniformInitializer((lb,ub)) b = I.ConstantInitializer(0) h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv') References: * `Glorot and Bengio. Understanding the difficulty of training deep feedforward neural networks <http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
[ "r", "Calculates", "the", "lower", "bound", "and", "the", "upper", "bound", "of", "the", "uniform", "distribution", "proposed", "by", "Glorot", "et", "al", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/initializer.py#L324-L360
242,671
sony/nnabla
python/src/nnabla/utils/save.py
_get_unique_function_name
def _get_unique_function_name(function_type, functions): '''Get a unique function name. Args: function_type(str): Name of Function. Ex) Convolution, Affine functions(OrderedDict of (str, Function) Returns: str A unique function name ''' function_name = function_name_base = function_type count = 2 while function_name in functions: function_name = '{}_{}'.format(function_name_base, count) count += 1 return function_name
python
def _get_unique_function_name(function_type, functions): '''Get a unique function name. Args: function_type(str): Name of Function. Ex) Convolution, Affine functions(OrderedDict of (str, Function) Returns: str A unique function name ''' function_name = function_name_base = function_type count = 2 while function_name in functions: function_name = '{}_{}'.format(function_name_base, count) count += 1 return function_name
[ "def", "_get_unique_function_name", "(", "function_type", ",", "functions", ")", ":", "function_name", "=", "function_name_base", "=", "function_type", "count", "=", "2", "while", "function_name", "in", "functions", ":", "function_name", "=", "'{}_{}'", ".", "format...
Get a unique function name. Args: function_type(str): Name of Function. Ex) Convolution, Affine functions(OrderedDict of (str, Function) Returns: str A unique function name
[ "Get", "a", "unique", "function", "name", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/save.py#L41-L56
242,672
sony/nnabla
python/src/nnabla/utils/save.py
_get_unique_variable_name
def _get_unique_variable_name(vname, variables): '''Get a unique variable name. Args: vname(str): A candidate name. variable(OrderedDict of str and Variable) Returns: str A unique variable name ''' count = 2 vname_base = vname while vname in variables: vname = '{}_{}'.format(vname_base, count) count += 1 return vname
python
def _get_unique_variable_name(vname, variables): '''Get a unique variable name. Args: vname(str): A candidate name. variable(OrderedDict of str and Variable) Returns: str A unique variable name ''' count = 2 vname_base = vname while vname in variables: vname = '{}_{}'.format(vname_base, count) count += 1 return vname
[ "def", "_get_unique_variable_name", "(", "vname", ",", "variables", ")", ":", "count", "=", "2", "vname_base", "=", "vname", "while", "vname", "in", "variables", ":", "vname", "=", "'{}_{}'", ".", "format", "(", "vname_base", ",", "count", ")", "count", "+...
Get a unique variable name. Args: vname(str): A candidate name. variable(OrderedDict of str and Variable) Returns: str A unique variable name
[ "Get", "a", "unique", "variable", "name", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/save.py#L59-L74
242,673
sony/nnabla
python/src/nnabla/functions.py
sum
def sum(x, axis=None, keepdims=False): """Reduction along axes with sum operation. Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which the sum is calculated. Passing the default value `None` will reduce all dimensions. keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element. Returns: ~nnabla.Variable: N-D array. """ from .function_bases import sum as sum_base if axis is None: axis = range(x.ndim) elif not hasattr(axis, '__iter__'): axis = [axis] return sum_base(x, axis, keepdims)
python
def sum(x, axis=None, keepdims=False): from .function_bases import sum as sum_base if axis is None: axis = range(x.ndim) elif not hasattr(axis, '__iter__'): axis = [axis] return sum_base(x, axis, keepdims)
[ "def", "sum", "(", "x", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ")", ":", "from", ".", "function_bases", "import", "sum", "as", "sum_base", "if", "axis", "is", "None", ":", "axis", "=", "range", "(", "x", ".", "ndim", ")", "elif", ...
Reduction along axes with sum operation. Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which the sum is calculated. Passing the default value `None` will reduce all dimensions. keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element. Returns: ~nnabla.Variable: N-D array.
[ "Reduction", "along", "axes", "with", "sum", "operation", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L21-L38
242,674
sony/nnabla
python/src/nnabla/functions.py
mean
def mean(x, axis=None, keepdims=False): """Reduction along axes with mean operation. Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which mean is calculated. Passing the default value `None` will reduce all dimensions. keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element. Returns: ~nnabla.Variable: N-D array. """ from .function_bases import mean as mean_base if axis is None: axis = range(x.ndim) elif not hasattr(axis, '__iter__'): axis = [axis] return mean_base(x, axis, keepdims)
python
def mean(x, axis=None, keepdims=False): from .function_bases import mean as mean_base if axis is None: axis = range(x.ndim) elif not hasattr(axis, '__iter__'): axis = [axis] return mean_base(x, axis, keepdims)
[ "def", "mean", "(", "x", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ")", ":", "from", ".", "function_bases", "import", "mean", "as", "mean_base", "if", "axis", "is", "None", ":", "axis", "=", "range", "(", "x", ".", "ndim", ")", "elif...
Reduction along axes with mean operation. Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which mean is calculated. Passing the default value `None` will reduce all dimensions. keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element. Returns: ~nnabla.Variable: N-D array.
[ "Reduction", "along", "axes", "with", "mean", "operation", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L41-L59
242,675
sony/nnabla
python/src/nnabla/functions.py
prod
def prod(x, axis=None, keepdims=False): """Reduction along axes with product operation. Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which product is calculated. Passing the default value `None` will reduce all dimensions. keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element. Returns: ~nnabla.Variable: N-D array. Note: Backward computation is not accurate in a zero value input. """ from .function_bases import prod as prod_base if axis is None: axis = range(x.ndim) elif not hasattr(axis, '__iter__'): axis = [axis] return prod_base(x, axis, keepdims)
python
def prod(x, axis=None, keepdims=False): from .function_bases import prod as prod_base if axis is None: axis = range(x.ndim) elif not hasattr(axis, '__iter__'): axis = [axis] return prod_base(x, axis, keepdims)
[ "def", "prod", "(", "x", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ")", ":", "from", ".", "function_bases", "import", "prod", "as", "prod_base", "if", "axis", "is", "None", ":", "axis", "=", "range", "(", "x", ".", "ndim", ")", "elif...
Reduction along axes with product operation. Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which product is calculated. Passing the default value `None` will reduce all dimensions. keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element. Returns: ~nnabla.Variable: N-D array. Note: Backward computation is not accurate in a zero value input.
[ "Reduction", "along", "axes", "with", "product", "operation", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L162-L183
242,676
sony/nnabla
python/src/nnabla/functions.py
reduce
def reduce(x, op='sum'): """Reduction function with given operation. Args: x (Variable): An input. op (str): 'sum' or 'mean'. Note: This is deprecated. Use ``mean`` or ``sum`` instead. """ import warnings warnings.warn( "Deprecated API. Use ``sum`` or ``mean`` instead.", DeprecationWarning) from .function_bases import reduce_sum, reduce_mean if op == 'sum': return reduce_sum(x) elif op == 'mean': return reduce_mean(x) raise ValueError()
python
def reduce(x, op='sum'): import warnings warnings.warn( "Deprecated API. Use ``sum`` or ``mean`` instead.", DeprecationWarning) from .function_bases import reduce_sum, reduce_mean if op == 'sum': return reduce_sum(x) elif op == 'mean': return reduce_mean(x) raise ValueError()
[ "def", "reduce", "(", "x", ",", "op", "=", "'sum'", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"Deprecated API. Use ``sum`` or ``mean`` instead.\"", ",", "DeprecationWarning", ")", "from", ".", "function_bases", "import", "reduce_sum", ",", "r...
Reduction function with given operation. Args: x (Variable): An input. op (str): 'sum' or 'mean'. Note: This is deprecated. Use ``mean`` or ``sum`` instead.
[ "Reduction", "function", "with", "given", "operation", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L186-L205
242,677
sony/nnabla
python/src/nnabla/functions.py
split
def split(x, axis=0): """ Split arrays at the specified axis. It returns a number corresponding the size of the given axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s. Args: x(~nnabla.Variable): N-D array axis(int): Axis Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s See Also: :func:`nnabla.function_bases.split`. """ from .function_bases import split as split_base return split_base(x, axis, x.shape[axis])
python
def split(x, axis=0): from .function_bases import split as split_base return split_base(x, axis, x.shape[axis])
[ "def", "split", "(", "x", ",", "axis", "=", "0", ")", ":", "from", ".", "function_bases", "import", "split", "as", "split_base", "return", "split_base", "(", "x", ",", "axis", ",", "x", ".", "shape", "[", "axis", "]", ")" ]
Split arrays at the specified axis. It returns a number corresponding the size of the given axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s. Args: x(~nnabla.Variable): N-D array axis(int): Axis Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s See Also: :func:`nnabla.function_bases.split`.
[ "Split", "arrays", "at", "the", "specified", "axis", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L208-L226
242,678
sony/nnabla
python/src/nnabla/functions.py
batch_normalization
def batch_normalization(x, beta, gamma, mean, variance, axes=[1], decay_rate=0.9, eps=1e-05, batch_stat=True, output_stat=False, n_outputs=None): r""" Batch normalization. .. math:: \begin{eqnarray} \mu &=& \frac{1}{M} \sum x_i \\ \sigma^2 &=& \frac{1}{M} \sum \left(x_i - \mu\right)^2 \\ \hat{x}_i &=& \frac{x_i - \mu}{\sqrt{\sigma^2 + \epsilon}} \\ y_i &=& \hat{x}_i \gamma + \beta. \end{eqnarray} At testing time, the mean and variance values used are those that were computed during training by moving average. References: * `Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. <https://arxiv.org/abs/1502.03167>`_ Args: x(~nnabla.Variable): N-D array of input. beta(~nnabla.Variable): N-D array of beta which is learned. gamma(~nnabla.Variable): N-D array of gamma which is learned. mean(~nnabla.Variable): N-D array of running mean (modified during forward execution). variance(~nnabla.Variable): N-D array of running variance (modified during forward execution). axes(repeated int64): Axes mean and variance are taken. decay_rate(float): Decay rate of running mean and variance. eps(float): Tiny value to avoid zero division by std. batch_stat(bool): Use mini-batch statistics rather than running ones. output_stat(bool): It true, the batch statistics of mean and variance, will be returned as Variables. They are also differentiable. Returns: Returns batch normalization output as :obj:`~nnabla.Variable`. If ``output_stat=True``, it also returns the mean and variance of the mini-batch * :obj:`~nnabla.Variable`: Output of the batch normalization * :obj:`~nnabla.Variable`: Mean (if ``output_stat=True`) * :obj:`~nnabla.Variable`: Variance (if ``output_stat=True`) See Also: ``nnabla.function_bases.batch_normalization``. """ from .function_bases import batch_normalization as batch_normalization_base n_outputs = 3 if output_stat else 1 assert batch_stat or (not output_stat) if batch_stat and (mean.parent or variance.parent) is not None: raise ValueError( "if batch_stat is True, mean and variable must not have a parent function") if len(axes) == 1: return batch_normalization_base(x, beta, gamma, mean, variance, axes=axes, decay_rate=decay_rate, eps=eps, batch_stat=batch_stat, n_outputs=n_outputs) def transpose_and_reshape(x, axes): transposed = transpose(x, transpose_axes) return reshape(transposed, [rd(lambda x, y: x * y, transposed.shape[:len(axes)])] + list( transposed.shape[len(axes):])), transposed.shape def inverse_transpose_and_reshape(x, axes, variable_shape): un_reshaped = reshape( x, list(variable_shape[:len(axes)] + variable_shape[len(axes):])) return transpose(un_reshaped, inv_transpose_axes) def get_tranpose_args(ndim, axes): transpose_axes = [i for i in list( axes)] + [i for i in range(ndim) if i not in list(axes)] inv_transpose_axes = np.argsort(transpose_axes).tolist() return transpose_axes, inv_transpose_axes transpose_axes, inv_transpose_axes = get_tranpose_args(len(x.shape), axes) inp, transposed_inp_shape = transpose_and_reshape(x, axes) beta, transposed_beta_shape = transpose_and_reshape(beta, axes) gamma, transposed_gamma_shape = transpose_and_reshape(gamma, axes) mean, transposed_mean_shape = transpose_and_reshape(mean, axes) variance, transposed_variance_shape = transpose_and_reshape(variance, axes) if n_outputs == 1: out = batch_normalization_base(inp, beta, gamma, mean, variance, axes=[0], decay_rate=decay_rate, eps=eps, batch_stat=batch_stat, n_outputs=n_outputs) return inverse_transpose_and_reshape(out, axes, transposed_inp_shape) out, mean, variance = batch_normalization_base(inp, beta, gamma, mean, variance, axes=[0], decay_rate=decay_rate, eps=eps, batch_stat=batch_stat, n_outputs=n_outputs) out = inverse_transpose_and_reshape(out, axes, transposed_inp_shape) mean = inverse_transpose_and_reshape(mean, axes, transposed_mean_shape) variance = inverse_transpose_and_reshape( variance, axes, transposed_variance_shape) return out, mean, variance
python
def batch_normalization(x, beta, gamma, mean, variance, axes=[1], decay_rate=0.9, eps=1e-05, batch_stat=True, output_stat=False, n_outputs=None): r""" Batch normalization. .. math:: \begin{eqnarray} \mu &=& \frac{1}{M} \sum x_i \\ \sigma^2 &=& \frac{1}{M} \sum \left(x_i - \mu\right)^2 \\ \hat{x}_i &=& \frac{x_i - \mu}{\sqrt{\sigma^2 + \epsilon}} \\ y_i &=& \hat{x}_i \gamma + \beta. \end{eqnarray} At testing time, the mean and variance values used are those that were computed during training by moving average. References: * `Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. <https://arxiv.org/abs/1502.03167>`_ Args: x(~nnabla.Variable): N-D array of input. beta(~nnabla.Variable): N-D array of beta which is learned. gamma(~nnabla.Variable): N-D array of gamma which is learned. mean(~nnabla.Variable): N-D array of running mean (modified during forward execution). variance(~nnabla.Variable): N-D array of running variance (modified during forward execution). axes(repeated int64): Axes mean and variance are taken. decay_rate(float): Decay rate of running mean and variance. eps(float): Tiny value to avoid zero division by std. batch_stat(bool): Use mini-batch statistics rather than running ones. output_stat(bool): It true, the batch statistics of mean and variance, will be returned as Variables. They are also differentiable. Returns: Returns batch normalization output as :obj:`~nnabla.Variable`. If ``output_stat=True``, it also returns the mean and variance of the mini-batch * :obj:`~nnabla.Variable`: Output of the batch normalization * :obj:`~nnabla.Variable`: Mean (if ``output_stat=True`) * :obj:`~nnabla.Variable`: Variance (if ``output_stat=True`) See Also: ``nnabla.function_bases.batch_normalization``. """ from .function_bases import batch_normalization as batch_normalization_base n_outputs = 3 if output_stat else 1 assert batch_stat or (not output_stat) if batch_stat and (mean.parent or variance.parent) is not None: raise ValueError( "if batch_stat is True, mean and variable must not have a parent function") if len(axes) == 1: return batch_normalization_base(x, beta, gamma, mean, variance, axes=axes, decay_rate=decay_rate, eps=eps, batch_stat=batch_stat, n_outputs=n_outputs) def transpose_and_reshape(x, axes): transposed = transpose(x, transpose_axes) return reshape(transposed, [rd(lambda x, y: x * y, transposed.shape[:len(axes)])] + list( transposed.shape[len(axes):])), transposed.shape def inverse_transpose_and_reshape(x, axes, variable_shape): un_reshaped = reshape( x, list(variable_shape[:len(axes)] + variable_shape[len(axes):])) return transpose(un_reshaped, inv_transpose_axes) def get_tranpose_args(ndim, axes): transpose_axes = [i for i in list( axes)] + [i for i in range(ndim) if i not in list(axes)] inv_transpose_axes = np.argsort(transpose_axes).tolist() return transpose_axes, inv_transpose_axes transpose_axes, inv_transpose_axes = get_tranpose_args(len(x.shape), axes) inp, transposed_inp_shape = transpose_and_reshape(x, axes) beta, transposed_beta_shape = transpose_and_reshape(beta, axes) gamma, transposed_gamma_shape = transpose_and_reshape(gamma, axes) mean, transposed_mean_shape = transpose_and_reshape(mean, axes) variance, transposed_variance_shape = transpose_and_reshape(variance, axes) if n_outputs == 1: out = batch_normalization_base(inp, beta, gamma, mean, variance, axes=[0], decay_rate=decay_rate, eps=eps, batch_stat=batch_stat, n_outputs=n_outputs) return inverse_transpose_and_reshape(out, axes, transposed_inp_shape) out, mean, variance = batch_normalization_base(inp, beta, gamma, mean, variance, axes=[0], decay_rate=decay_rate, eps=eps, batch_stat=batch_stat, n_outputs=n_outputs) out = inverse_transpose_and_reshape(out, axes, transposed_inp_shape) mean = inverse_transpose_and_reshape(mean, axes, transposed_mean_shape) variance = inverse_transpose_and_reshape( variance, axes, transposed_variance_shape) return out, mean, variance
[ "def", "batch_normalization", "(", "x", ",", "beta", ",", "gamma", ",", "mean", ",", "variance", ",", "axes", "=", "[", "1", "]", ",", "decay_rate", "=", "0.9", ",", "eps", "=", "1e-05", ",", "batch_stat", "=", "True", ",", "output_stat", "=", "False...
r""" Batch normalization. .. math:: \begin{eqnarray} \mu &=& \frac{1}{M} \sum x_i \\ \sigma^2 &=& \frac{1}{M} \sum \left(x_i - \mu\right)^2 \\ \hat{x}_i &=& \frac{x_i - \mu}{\sqrt{\sigma^2 + \epsilon}} \\ y_i &=& \hat{x}_i \gamma + \beta. \end{eqnarray} At testing time, the mean and variance values used are those that were computed during training by moving average. References: * `Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. <https://arxiv.org/abs/1502.03167>`_ Args: x(~nnabla.Variable): N-D array of input. beta(~nnabla.Variable): N-D array of beta which is learned. gamma(~nnabla.Variable): N-D array of gamma which is learned. mean(~nnabla.Variable): N-D array of running mean (modified during forward execution). variance(~nnabla.Variable): N-D array of running variance (modified during forward execution). axes(repeated int64): Axes mean and variance are taken. decay_rate(float): Decay rate of running mean and variance. eps(float): Tiny value to avoid zero division by std. batch_stat(bool): Use mini-batch statistics rather than running ones. output_stat(bool): It true, the batch statistics of mean and variance, will be returned as Variables. They are also differentiable. Returns: Returns batch normalization output as :obj:`~nnabla.Variable`. If ``output_stat=True``, it also returns the mean and variance of the mini-batch * :obj:`~nnabla.Variable`: Output of the batch normalization * :obj:`~nnabla.Variable`: Mean (if ``output_stat=True`) * :obj:`~nnabla.Variable`: Variance (if ``output_stat=True`) See Also: ``nnabla.function_bases.batch_normalization``.
[ "r", "Batch", "normalization", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L278-L380
242,679
sony/nnabla
python/src/nnabla/functions.py
fixed_point_quantize
def fixed_point_quantize(x, sign=True, n=8, delta=2**-4, quantize=True, ste_fine_grained=True, outputs=None): r"""Fixed Point Quantize Args: x (Variable): An input variable. sign (bool): Indicate the signed number or the unsigned number. Default is true. n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. delta (float): Step size. quantize (bool): If true, quantize input, otherwise not. ste_fine_grained (bool): If true, STE is not 1. Returns: ~nnabla.Variable: N-D array. See Also: ``nnabla.function_bases.fixed_point_quantize``. In the forward pass, .. math:: \begin{equation} q_i= \left\{ \begin{array}{ll} max & if \ \ \ x_i > max \\ sign(x_i) \times floor(|x_i| \delta^{-1} + 2^{-1}) \times \delta & if \ \ min \le x_i \le max \\ min & if \ \ x_i < min \\ \end{array} \right., \end{equation} where :math:`\delta` is the step size, :math:`(min, max) :=(- (2^{n-1} - 1)\delta, (2^{n-1} - 1)\delta)` if :math:`sign` is true, :math:`(min, max) := (0, (2^n - 1) \delta)` otherwise, and :math:`n` is the total bit-width used. In the backward pass when using `ste_fine_grained` as false, .. math:: \begin{equation} \frac{\partial q_i}{\partial x_i} = 1. \end{equation} In the backward pass when using `ste_fine_grained` as true, .. math:: \begin{equation} \frac{\partial q_i}{\partial x_i}= \left\{ \begin{array}{ll} 0 & if \ \ \ x_i > max \\ 1 & if \ \ min \le x_i \le max \\ 0 & if \ \ x_i < min \\ \end{array} \right.. \end{equation} .. note:: Quantized values are stored as floating point number, since this function is for simulation purposes. """ from .function_bases import fixed_point_quantize as fixed_point_quantize_base if not quantize: return x return fixed_point_quantize_base(x, sign, n, delta, ste_fine_grained, outputs=outputs)
python
def fixed_point_quantize(x, sign=True, n=8, delta=2**-4, quantize=True, ste_fine_grained=True, outputs=None): r"""Fixed Point Quantize Args: x (Variable): An input variable. sign (bool): Indicate the signed number or the unsigned number. Default is true. n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. delta (float): Step size. quantize (bool): If true, quantize input, otherwise not. ste_fine_grained (bool): If true, STE is not 1. Returns: ~nnabla.Variable: N-D array. See Also: ``nnabla.function_bases.fixed_point_quantize``. In the forward pass, .. math:: \begin{equation} q_i= \left\{ \begin{array}{ll} max & if \ \ \ x_i > max \\ sign(x_i) \times floor(|x_i| \delta^{-1} + 2^{-1}) \times \delta & if \ \ min \le x_i \le max \\ min & if \ \ x_i < min \\ \end{array} \right., \end{equation} where :math:`\delta` is the step size, :math:`(min, max) :=(- (2^{n-1} - 1)\delta, (2^{n-1} - 1)\delta)` if :math:`sign` is true, :math:`(min, max) := (0, (2^n - 1) \delta)` otherwise, and :math:`n` is the total bit-width used. In the backward pass when using `ste_fine_grained` as false, .. math:: \begin{equation} \frac{\partial q_i}{\partial x_i} = 1. \end{equation} In the backward pass when using `ste_fine_grained` as true, .. math:: \begin{equation} \frac{\partial q_i}{\partial x_i}= \left\{ \begin{array}{ll} 0 & if \ \ \ x_i > max \\ 1 & if \ \ min \le x_i \le max \\ 0 & if \ \ x_i < min \\ \end{array} \right.. \end{equation} .. note:: Quantized values are stored as floating point number, since this function is for simulation purposes. """ from .function_bases import fixed_point_quantize as fixed_point_quantize_base if not quantize: return x return fixed_point_quantize_base(x, sign, n, delta, ste_fine_grained, outputs=outputs)
[ "def", "fixed_point_quantize", "(", "x", ",", "sign", "=", "True", ",", "n", "=", "8", ",", "delta", "=", "2", "**", "-", "4", ",", "quantize", "=", "True", ",", "ste_fine_grained", "=", "True", ",", "outputs", "=", "None", ")", ":", "from", ".", ...
r"""Fixed Point Quantize Args: x (Variable): An input variable. sign (bool): Indicate the signed number or the unsigned number. Default is true. n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. delta (float): Step size. quantize (bool): If true, quantize input, otherwise not. ste_fine_grained (bool): If true, STE is not 1. Returns: ~nnabla.Variable: N-D array. See Also: ``nnabla.function_bases.fixed_point_quantize``. In the forward pass, .. math:: \begin{equation} q_i= \left\{ \begin{array}{ll} max & if \ \ \ x_i > max \\ sign(x_i) \times floor(|x_i| \delta^{-1} + 2^{-1}) \times \delta & if \ \ min \le x_i \le max \\ min & if \ \ x_i < min \\ \end{array} \right., \end{equation} where :math:`\delta` is the step size, :math:`(min, max) :=(- (2^{n-1} - 1)\delta, (2^{n-1} - 1)\delta)` if :math:`sign` is true, :math:`(min, max) := (0, (2^n - 1) \delta)` otherwise, and :math:`n` is the total bit-width used. In the backward pass when using `ste_fine_grained` as false, .. math:: \begin{equation} \frac{\partial q_i}{\partial x_i} = 1. \end{equation} In the backward pass when using `ste_fine_grained` as true, .. math:: \begin{equation} \frac{\partial q_i}{\partial x_i}= \left\{ \begin{array}{ll} 0 & if \ \ \ x_i > max \\ 1 & if \ \ min \le x_i \le max \\ 0 & if \ \ x_i < min \\ \end{array} \right.. \end{equation} .. note:: Quantized values are stored as floating point number, since this function is for simulation purposes.
[ "r", "Fixed", "Point", "Quantize" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L424-L488
242,680
sony/nnabla
python/src/nnabla/functions.py
pow2_quantize
def pow2_quantize(x, sign=True, with_zero=True, n=8, m=1, quantize=True, ste_fine_grained=True, outputs=None): r"""Pow2 Quantize Args: x (Variable): An input variable. sign (bool): Indicate the signed number or the unsigned number. Default is true. with_zero (bool): Indicate using zero as a quantized value. Default is true. Note that `zero` consumes one bit. n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. Default is 8. m (int): :math:`2^m` is the upper bound of the dynamic range and :math:`-2^m` is the lower bound, :math:`m \in \mathcal{Z}`. Default is 1. quantize (bool): If true, quantize input, otherwise not. ste_fine_grained (bool): If true, STE is not 1. Returns: ~nnabla.Variable: N-D array. See Also: ``nnabla.function_bases.pow2_quantize``. In the forward pass of `signed` case, .. math:: q_i= \left\{ \begin{array}{ll} max_{+} & if \ \ \overline{q_i} > max_{+} \\ \overline{q_i} & if \ \ min_{+} \le \overline{q_i} \le max_{+} \\ min_{+} & if \ \ 0 \le \overline{q_i} < min_{+} \\ min_{-} & if \ \ min_{-} < \overline{q_i} < 0 \\ \overline{q_i} & if \ \ max_{-} \le \overline{q_i} \le min_{-}\\ max_{-} & if \ \ \overline{q_i} < max_{-} \\ \end{array} \right., where .. math:: && max_{+} = 2^{m}, min_{+} = 2^{m - (2^{n-1} - 1)},\\ && max_{-} = -2^{m}, min_{-} = -2^{m - (2^{n-1} - 1)},\\ && \overline{q_i} = sign(x_i) \times 2^{round(\log_2 |x_i|)}. This quantization uses the geometric mean between two power-of-two numbers as quantization threshold. In the forward pass of `unsigned` case, .. math:: q_i= \left\{ \begin{array}{ll} max & if \ \ \overline{q_i} > max \\ \overline{q_i} & if \ \ min \le \overline{q_i} \le max \\ min & if \ \ 0 < \overline{q_i} < min \\ \end{array} \right., where .. math:: && max = 2^{m}, min = 2^{m - (2^{n} - 1)},\\ && \overline{q_i} = 2^{int(\log_2 |x_i|)}. When using `with_zero` as true, a pruning threshold is used to round an input to 0 or :math:`min`. The pruning threshold is defined in this function as the following, .. math:: pruning\ threshold = min \times 2^{-\frac{1}{2}}. If an absolute value of the input is lesser than this value, the input is rounded to 0, otherwise :math:`min`. In the backward pass when using ste_fine_grained as false, .. math:: \frac{\partial q_i}{\partial x_i} = 1. In the backward pass when using ste_fine_grained as true, .. math:: \frac{\partial q_i}{\partial x_i}= \left\{ \begin{array}{ll} 0 & if \ \ \overline{q_i} > max_{+} \\ 1 & if \ \ otherwise \\ 0 & if \ \ \overline{q_i} < max_{-} \\ \end{array} \right.. """ from .function_bases import pow2_quantize as pow2_quantize_base if not quantize: return x return pow2_quantize_base(x, sign, with_zero, n, m, ste_fine_grained, outputs=outputs)
python
def pow2_quantize(x, sign=True, with_zero=True, n=8, m=1, quantize=True, ste_fine_grained=True, outputs=None): r"""Pow2 Quantize Args: x (Variable): An input variable. sign (bool): Indicate the signed number or the unsigned number. Default is true. with_zero (bool): Indicate using zero as a quantized value. Default is true. Note that `zero` consumes one bit. n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. Default is 8. m (int): :math:`2^m` is the upper bound of the dynamic range and :math:`-2^m` is the lower bound, :math:`m \in \mathcal{Z}`. Default is 1. quantize (bool): If true, quantize input, otherwise not. ste_fine_grained (bool): If true, STE is not 1. Returns: ~nnabla.Variable: N-D array. See Also: ``nnabla.function_bases.pow2_quantize``. In the forward pass of `signed` case, .. math:: q_i= \left\{ \begin{array}{ll} max_{+} & if \ \ \overline{q_i} > max_{+} \\ \overline{q_i} & if \ \ min_{+} \le \overline{q_i} \le max_{+} \\ min_{+} & if \ \ 0 \le \overline{q_i} < min_{+} \\ min_{-} & if \ \ min_{-} < \overline{q_i} < 0 \\ \overline{q_i} & if \ \ max_{-} \le \overline{q_i} \le min_{-}\\ max_{-} & if \ \ \overline{q_i} < max_{-} \\ \end{array} \right., where .. math:: && max_{+} = 2^{m}, min_{+} = 2^{m - (2^{n-1} - 1)},\\ && max_{-} = -2^{m}, min_{-} = -2^{m - (2^{n-1} - 1)},\\ && \overline{q_i} = sign(x_i) \times 2^{round(\log_2 |x_i|)}. This quantization uses the geometric mean between two power-of-two numbers as quantization threshold. In the forward pass of `unsigned` case, .. math:: q_i= \left\{ \begin{array}{ll} max & if \ \ \overline{q_i} > max \\ \overline{q_i} & if \ \ min \le \overline{q_i} \le max \\ min & if \ \ 0 < \overline{q_i} < min \\ \end{array} \right., where .. math:: && max = 2^{m}, min = 2^{m - (2^{n} - 1)},\\ && \overline{q_i} = 2^{int(\log_2 |x_i|)}. When using `with_zero` as true, a pruning threshold is used to round an input to 0 or :math:`min`. The pruning threshold is defined in this function as the following, .. math:: pruning\ threshold = min \times 2^{-\frac{1}{2}}. If an absolute value of the input is lesser than this value, the input is rounded to 0, otherwise :math:`min`. In the backward pass when using ste_fine_grained as false, .. math:: \frac{\partial q_i}{\partial x_i} = 1. In the backward pass when using ste_fine_grained as true, .. math:: \frac{\partial q_i}{\partial x_i}= \left\{ \begin{array}{ll} 0 & if \ \ \overline{q_i} > max_{+} \\ 1 & if \ \ otherwise \\ 0 & if \ \ \overline{q_i} < max_{-} \\ \end{array} \right.. """ from .function_bases import pow2_quantize as pow2_quantize_base if not quantize: return x return pow2_quantize_base(x, sign, with_zero, n, m, ste_fine_grained, outputs=outputs)
[ "def", "pow2_quantize", "(", "x", ",", "sign", "=", "True", ",", "with_zero", "=", "True", ",", "n", "=", "8", ",", "m", "=", "1", ",", "quantize", "=", "True", ",", "ste_fine_grained", "=", "True", ",", "outputs", "=", "None", ")", ":", "from", ...
r"""Pow2 Quantize Args: x (Variable): An input variable. sign (bool): Indicate the signed number or the unsigned number. Default is true. with_zero (bool): Indicate using zero as a quantized value. Default is true. Note that `zero` consumes one bit. n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. Default is 8. m (int): :math:`2^m` is the upper bound of the dynamic range and :math:`-2^m` is the lower bound, :math:`m \in \mathcal{Z}`. Default is 1. quantize (bool): If true, quantize input, otherwise not. ste_fine_grained (bool): If true, STE is not 1. Returns: ~nnabla.Variable: N-D array. See Also: ``nnabla.function_bases.pow2_quantize``. In the forward pass of `signed` case, .. math:: q_i= \left\{ \begin{array}{ll} max_{+} & if \ \ \overline{q_i} > max_{+} \\ \overline{q_i} & if \ \ min_{+} \le \overline{q_i} \le max_{+} \\ min_{+} & if \ \ 0 \le \overline{q_i} < min_{+} \\ min_{-} & if \ \ min_{-} < \overline{q_i} < 0 \\ \overline{q_i} & if \ \ max_{-} \le \overline{q_i} \le min_{-}\\ max_{-} & if \ \ \overline{q_i} < max_{-} \\ \end{array} \right., where .. math:: && max_{+} = 2^{m}, min_{+} = 2^{m - (2^{n-1} - 1)},\\ && max_{-} = -2^{m}, min_{-} = -2^{m - (2^{n-1} - 1)},\\ && \overline{q_i} = sign(x_i) \times 2^{round(\log_2 |x_i|)}. This quantization uses the geometric mean between two power-of-two numbers as quantization threshold. In the forward pass of `unsigned` case, .. math:: q_i= \left\{ \begin{array}{ll} max & if \ \ \overline{q_i} > max \\ \overline{q_i} & if \ \ min \le \overline{q_i} \le max \\ min & if \ \ 0 < \overline{q_i} < min \\ \end{array} \right., where .. math:: && max = 2^{m}, min = 2^{m - (2^{n} - 1)},\\ && \overline{q_i} = 2^{int(\log_2 |x_i|)}. When using `with_zero` as true, a pruning threshold is used to round an input to 0 or :math:`min`. The pruning threshold is defined in this function as the following, .. math:: pruning\ threshold = min \times 2^{-\frac{1}{2}}. If an absolute value of the input is lesser than this value, the input is rounded to 0, otherwise :math:`min`. In the backward pass when using ste_fine_grained as false, .. math:: \frac{\partial q_i}{\partial x_i} = 1. In the backward pass when using ste_fine_grained as true, .. math:: \frac{\partial q_i}{\partial x_i}= \left\{ \begin{array}{ll} 0 & if \ \ \overline{q_i} > max_{+} \\ 1 & if \ \ otherwise \\ 0 & if \ \ \overline{q_i} < max_{-} \\ \end{array} \right..
[ "r", "Pow2", "Quantize" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L491-L584
242,681
sony/nnabla
python/src/nnabla/functions.py
clip_by_value
def clip_by_value(x, min, max): r"""Clip inputs by values. .. math:: y = \begin{cases} max & (x > max) \\ x & (otherwise) \\ min & (x < min) \end{cases}. Args: x (Variable): An input variable. min (Variable): A min variable by which `x` is clipped. Note that the shape of `min` must be the same as `x`'s. max (Variable): A max variable by which `x` is clipped. Note that the shape of `max` must be the same as `x`'s Returns: ~nnabla.Variable: N-D array. """ from .function_bases import maximum2 as maximum2_base from .function_bases import minimum2 as minimum2_base return minimum2_base(maximum2_base(x, min), max)
python
def clip_by_value(x, min, max): r"""Clip inputs by values. .. math:: y = \begin{cases} max & (x > max) \\ x & (otherwise) \\ min & (x < min) \end{cases}. Args: x (Variable): An input variable. min (Variable): A min variable by which `x` is clipped. Note that the shape of `min` must be the same as `x`'s. max (Variable): A max variable by which `x` is clipped. Note that the shape of `max` must be the same as `x`'s Returns: ~nnabla.Variable: N-D array. """ from .function_bases import maximum2 as maximum2_base from .function_bases import minimum2 as minimum2_base return minimum2_base(maximum2_base(x, min), max)
[ "def", "clip_by_value", "(", "x", ",", "min", ",", "max", ")", ":", "from", ".", "function_bases", "import", "maximum2", "as", "maximum2_base", "from", ".", "function_bases", "import", "minimum2", "as", "minimum2_base", "return", "minimum2_base", "(", "maximum2_...
r"""Clip inputs by values. .. math:: y = \begin{cases} max & (x > max) \\ x & (otherwise) \\ min & (x < min) \end{cases}. Args: x (Variable): An input variable. min (Variable): A min variable by which `x` is clipped. Note that the shape of `min` must be the same as `x`'s. max (Variable): A max variable by which `x` is clipped. Note that the shape of `max` must be the same as `x`'s Returns: ~nnabla.Variable: N-D array.
[ "r", "Clip", "inputs", "by", "values", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L587-L609
242,682
sony/nnabla
python/src/nnabla/functions.py
interpolate
def interpolate(x, scale=None, output_size=None, mode='linear', align_corners=None): ''' Resize an ND array with interpolation. Scaling factors for spatial dimensions are determined by either ``scale`` or ``output_size``. ``nd = len(scale)`` or ``nd = len(output_size)`` determines the number of spatial dimensions, and the last ``nd`` dimensions of the input ``x`` are considered as the spatial dimensions to be resized. If ``scale`` is given, the ``output_size`` is calculated by ``output_size[i] = floor(scale[i] * x.shape[i - len(scale)])``. Example: .. code-block:: python import numpy as np import nnabla as nn import nnabla.functions as F x_data = np.random.rand(64, 3, 224, 224) x = nn.Variable.from_numpy_array(x_data) # Resize by scales y = F.interpolate(x, scale=(2, 2), mode='linear') print(y.shape) # (64, 3, 448, 448) y.forward() print(y.d) # Print output # Resize to a size y2 = F.interpolate(x, output_size=(320, 257), mode='linear') print(y2.shape) # (64, 3, 320, 257) y2.forward() print(y2.d) # Print output Args: x(~nnabla.Variable): N-D array with an arbitrary number of dimensions. scale(tuple of ints): Scale factors along axes. The default is ``None``, and if this is omitted, ``output_size`` must be specified. output_size(tuple of ints): The output sizes for axes. If this is given, the scale factors are determined by the output sizes and the input sizes. The default is ``None``, and if this is omitted, ``scale`` must be specified. mode(str): Interpolation mode chosen from ('linear'|'nearest'). The default is 'linear'. align_corners(bool): If true, the corner pixels of input and output arrays are aligned, such that the output corner pixels have the same values with the input corner pixels. The default is ``None``, and it becomes ``True`` if mode is 'linear', otherwise ``False``. Returns: ~nnabla.Variable: N-D array. ''' from .function_bases import interpolate as interpolate_base import math if scale is None and output_size is None: raise ValueError('Either scale or output_size must be given') elif output_size is None: output_size = [int(math.floor(s * d)) for d, s in zip(x.shape[-len(scale):], scale)] if align_corners is None: if mode == 'linear': align_corners = True else: align_corners = False return interpolate_base(x, output_size, mode, align_corners)
python
def interpolate(x, scale=None, output_size=None, mode='linear', align_corners=None): ''' Resize an ND array with interpolation. Scaling factors for spatial dimensions are determined by either ``scale`` or ``output_size``. ``nd = len(scale)`` or ``nd = len(output_size)`` determines the number of spatial dimensions, and the last ``nd`` dimensions of the input ``x`` are considered as the spatial dimensions to be resized. If ``scale`` is given, the ``output_size`` is calculated by ``output_size[i] = floor(scale[i] * x.shape[i - len(scale)])``. Example: .. code-block:: python import numpy as np import nnabla as nn import nnabla.functions as F x_data = np.random.rand(64, 3, 224, 224) x = nn.Variable.from_numpy_array(x_data) # Resize by scales y = F.interpolate(x, scale=(2, 2), mode='linear') print(y.shape) # (64, 3, 448, 448) y.forward() print(y.d) # Print output # Resize to a size y2 = F.interpolate(x, output_size=(320, 257), mode='linear') print(y2.shape) # (64, 3, 320, 257) y2.forward() print(y2.d) # Print output Args: x(~nnabla.Variable): N-D array with an arbitrary number of dimensions. scale(tuple of ints): Scale factors along axes. The default is ``None``, and if this is omitted, ``output_size`` must be specified. output_size(tuple of ints): The output sizes for axes. If this is given, the scale factors are determined by the output sizes and the input sizes. The default is ``None``, and if this is omitted, ``scale`` must be specified. mode(str): Interpolation mode chosen from ('linear'|'nearest'). The default is 'linear'. align_corners(bool): If true, the corner pixels of input and output arrays are aligned, such that the output corner pixels have the same values with the input corner pixels. The default is ``None``, and it becomes ``True`` if mode is 'linear', otherwise ``False``. Returns: ~nnabla.Variable: N-D array. ''' from .function_bases import interpolate as interpolate_base import math if scale is None and output_size is None: raise ValueError('Either scale or output_size must be given') elif output_size is None: output_size = [int(math.floor(s * d)) for d, s in zip(x.shape[-len(scale):], scale)] if align_corners is None: if mode == 'linear': align_corners = True else: align_corners = False return interpolate_base(x, output_size, mode, align_corners)
[ "def", "interpolate", "(", "x", ",", "scale", "=", "None", ",", "output_size", "=", "None", ",", "mode", "=", "'linear'", ",", "align_corners", "=", "None", ")", ":", "from", ".", "function_bases", "import", "interpolate", "as", "interpolate_base", "import",...
Resize an ND array with interpolation. Scaling factors for spatial dimensions are determined by either ``scale`` or ``output_size``. ``nd = len(scale)`` or ``nd = len(output_size)`` determines the number of spatial dimensions, and the last ``nd`` dimensions of the input ``x`` are considered as the spatial dimensions to be resized. If ``scale`` is given, the ``output_size`` is calculated by ``output_size[i] = floor(scale[i] * x.shape[i - len(scale)])``. Example: .. code-block:: python import numpy as np import nnabla as nn import nnabla.functions as F x_data = np.random.rand(64, 3, 224, 224) x = nn.Variable.from_numpy_array(x_data) # Resize by scales y = F.interpolate(x, scale=(2, 2), mode='linear') print(y.shape) # (64, 3, 448, 448) y.forward() print(y.d) # Print output # Resize to a size y2 = F.interpolate(x, output_size=(320, 257), mode='linear') print(y2.shape) # (64, 3, 320, 257) y2.forward() print(y2.d) # Print output Args: x(~nnabla.Variable): N-D array with an arbitrary number of dimensions. scale(tuple of ints): Scale factors along axes. The default is ``None``, and if this is omitted, ``output_size`` must be specified. output_size(tuple of ints): The output sizes for axes. If this is given, the scale factors are determined by the output sizes and the input sizes. The default is ``None``, and if this is omitted, ``scale`` must be specified. mode(str): Interpolation mode chosen from ('linear'|'nearest'). The default is 'linear'. align_corners(bool): If true, the corner pixels of input and output arrays are aligned, such that the output corner pixels have the same values with the input corner pixels. The default is ``None``, and it becomes ``True`` if mode is 'linear', otherwise ``False``. Returns: ~nnabla.Variable: N-D array.
[ "Resize", "an", "ND", "array", "with", "interpolation", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L654-L724
242,683
sony/nnabla
python/src/nnabla/functions.py
sort
def sort(x, axis=-1, reverse=False, with_index=False, only_index=False): """Sorts the elements of `x` along a given `axis` in ascending order by value. A negative `axis` counts from the last dimension of `x`, so the default of -1 sorts along the last dimension. If `reverse` is True, then the elements are soreted in descending order. If `with_index` is True, result is a tuple ``(sorted, indices)`` or only ``indices`` if `only_index` is True. Setting `only_index` to True implies that `with_index` is also True. .. code-block:: python import numpy as np import nnabla as nn import nnabla.functions as F nn.set_auto_forward(True) x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4)) sorted = F.sort(x) assert np.allclose(sorted.d, np.sort(x.d)) sorted, indices = F.sort(x, with_index=True) assert np.allclose(sorted.d, np.sort(x.d)) assert np.all(indices.d == np.argsort(x.d)) indices = F.sort(x, only_index=True) assert np.all(indices.d == np.argsort(x.d)) Args: x(~nnabla.Variable): N-D array axis(int): Axis along which to sort. reverse(bool): Sort in descending order. with_index(bool): Return sorted values and index. only_index(bool): Return only the sort index. Returns: :obj:`~nnabla.Variable` `sorted` or :obj:`~nnabla.Variable` `indices` or (:obj:`~nnabla.Variable` `sorted`, :obj:`~nnabla.Variable` `indices`) """ from .function_bases import sort as sort_base n_outputs = 2 if with_index and not only_index else 1 return sort_base(x, axis, reverse, with_index, only_index, n_outputs)
python
def sort(x, axis=-1, reverse=False, with_index=False, only_index=False): from .function_bases import sort as sort_base n_outputs = 2 if with_index and not only_index else 1 return sort_base(x, axis, reverse, with_index, only_index, n_outputs)
[ "def", "sort", "(", "x", ",", "axis", "=", "-", "1", ",", "reverse", "=", "False", ",", "with_index", "=", "False", ",", "only_index", "=", "False", ")", ":", "from", ".", "function_bases", "import", "sort", "as", "sort_base", "n_outputs", "=", "2", ...
Sorts the elements of `x` along a given `axis` in ascending order by value. A negative `axis` counts from the last dimension of `x`, so the default of -1 sorts along the last dimension. If `reverse` is True, then the elements are soreted in descending order. If `with_index` is True, result is a tuple ``(sorted, indices)`` or only ``indices`` if `only_index` is True. Setting `only_index` to True implies that `with_index` is also True. .. code-block:: python import numpy as np import nnabla as nn import nnabla.functions as F nn.set_auto_forward(True) x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4)) sorted = F.sort(x) assert np.allclose(sorted.d, np.sort(x.d)) sorted, indices = F.sort(x, with_index=True) assert np.allclose(sorted.d, np.sort(x.d)) assert np.all(indices.d == np.argsort(x.d)) indices = F.sort(x, only_index=True) assert np.all(indices.d == np.argsort(x.d)) Args: x(~nnabla.Variable): N-D array axis(int): Axis along which to sort. reverse(bool): Sort in descending order. with_index(bool): Return sorted values and index. only_index(bool): Return only the sort index. Returns: :obj:`~nnabla.Variable` `sorted` or :obj:`~nnabla.Variable` `indices` or (:obj:`~nnabla.Variable` `sorted`, :obj:`~nnabla.Variable` `indices`)
[ "Sorts", "the", "elements", "of", "x", "along", "a", "given", "axis", "in", "ascending", "order", "by", "value", ".", "A", "negative", "axis", "counts", "from", "the", "last", "dimension", "of", "x", "so", "the", "default", "of", "-", "1", "sorts", "al...
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L727-L768
242,684
sony/nnabla
python/src/nnabla/utils/download.py
download
def download(url, output_file=None, open_file=True, allow_overwrite=False): '''Download a file from URL. Args: url (str): URL. output_file (str, optional): If given, the downloaded file is written to the given path. open_file (bool): If True, it returns an opened file stream of the downloaded file. allow_overwrite (bool): If True, it overwrites an existing file. Returns: Returns file object if open_file is True, otherwise None. ''' filename = url.split('/')[-1] if output_file is None: cache = os.path.join(get_data_home(), filename) else: cache = output_file if os.path.exists(cache) and not allow_overwrite: logger.info("> {} already exists.".format(cache)) logger.info("> If you have any issue when using this file, ") logger.info("> manually remove the file and try download again.") else: r = request.urlopen(url) try: if six.PY2: content_length = int(r.info().dict['content-length']) elif six.PY3: content_length = int(r.info()['Content-Length']) except: content_length = 0 unit = 1000000 content = b'' with tqdm(total=content_length, desc=filename, unit='B', unit_scale=True, unit_divisor=1024) as t: while True: data = r.read(unit) l = len(data) t.update(l) if l == 0: break content += data with open(cache, 'wb') as f: f.write(content) if not open_file: return return open(cache, 'rb')
python
def download(url, output_file=None, open_file=True, allow_overwrite=False): '''Download a file from URL. Args: url (str): URL. output_file (str, optional): If given, the downloaded file is written to the given path. open_file (bool): If True, it returns an opened file stream of the downloaded file. allow_overwrite (bool): If True, it overwrites an existing file. Returns: Returns file object if open_file is True, otherwise None. ''' filename = url.split('/')[-1] if output_file is None: cache = os.path.join(get_data_home(), filename) else: cache = output_file if os.path.exists(cache) and not allow_overwrite: logger.info("> {} already exists.".format(cache)) logger.info("> If you have any issue when using this file, ") logger.info("> manually remove the file and try download again.") else: r = request.urlopen(url) try: if six.PY2: content_length = int(r.info().dict['content-length']) elif six.PY3: content_length = int(r.info()['Content-Length']) except: content_length = 0 unit = 1000000 content = b'' with tqdm(total=content_length, desc=filename, unit='B', unit_scale=True, unit_divisor=1024) as t: while True: data = r.read(unit) l = len(data) t.update(l) if l == 0: break content += data with open(cache, 'wb') as f: f.write(content) if not open_file: return return open(cache, 'rb')
[ "def", "download", "(", "url", ",", "output_file", "=", "None", ",", "open_file", "=", "True", ",", "allow_overwrite", "=", "False", ")", ":", "filename", "=", "url", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "if", "output_file", "is", "None...
Download a file from URL. Args: url (str): URL. output_file (str, optional): If given, the downloaded file is written to the given path. open_file (bool): If True, it returns an opened file stream of the downloaded file. allow_overwrite (bool): If True, it overwrites an existing file. Returns: Returns file object if open_file is True, otherwise None.
[ "Download", "a", "file", "from", "URL", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/download.py#L35-L80
242,685
sony/nnabla
python/src/nnabla/utils/image_utils/cv2_utils.py
imread
def imread(path, grayscale=False, size=None, interpolate="bilinear", channel_first=False, as_uint16=False, num_channels=-1): """ Read image by cv2 module. Args: path (str or 'file object'): File path or object to read. grayscale (bool): size (tupple of int): (width, height). If None, output img shape depends on the files to read. channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is (height, width, channel). interpolate (str): must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. as_uint16 (bool): If True, this function reads image as uint16. num_channels (int): channel size of output array. Default is -1 which preserves raw image shape. Returns: numpy.ndarray """ _imread_before(grayscale, num_channels) r_mode = cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_UNCHANGED img = _imread_helper(path, r_mode) if as_uint16 and img.dtype != np.uint16: if img.dtype == np.uint8: logger.warning("You want to read image as uint16, but the original bit-depth is 8 bit." "All pixel values are simply increased by 256 times.") img = img.astype(np.uint16) * 256 else: raise ValueError( "casting {} to uint16 is not safe.".format(img.dtype)) img = _cvtColor_helper(img, num_channels) img = _imread_after(img, size, interpolate, channel_first, imresize) return img
python
def imread(path, grayscale=False, size=None, interpolate="bilinear", channel_first=False, as_uint16=False, num_channels=-1): _imread_before(grayscale, num_channels) r_mode = cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_UNCHANGED img = _imread_helper(path, r_mode) if as_uint16 and img.dtype != np.uint16: if img.dtype == np.uint8: logger.warning("You want to read image as uint16, but the original bit-depth is 8 bit." "All pixel values are simply increased by 256 times.") img = img.astype(np.uint16) * 256 else: raise ValueError( "casting {} to uint16 is not safe.".format(img.dtype)) img = _cvtColor_helper(img, num_channels) img = _imread_after(img, size, interpolate, channel_first, imresize) return img
[ "def", "imread", "(", "path", ",", "grayscale", "=", "False", ",", "size", "=", "None", ",", "interpolate", "=", "\"bilinear\"", ",", "channel_first", "=", "False", ",", "as_uint16", "=", "False", ",", "num_channels", "=", "-", "1", ")", ":", "_imread_be...
Read image by cv2 module. Args: path (str or 'file object'): File path or object to read. grayscale (bool): size (tupple of int): (width, height). If None, output img shape depends on the files to read. channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is (height, width, channel). interpolate (str): must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. as_uint16 (bool): If True, this function reads image as uint16. num_channels (int): channel size of output array. Default is -1 which preserves raw image shape. Returns: numpy.ndarray
[ "Read", "image", "by", "cv2", "module", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/image_utils/cv2_utils.py#L105-L149
242,686
sony/nnabla
python/src/nnabla/utils/learning_rate_scheduler.py
PolynomialScheduler.get_learning_rate
def get_learning_rate(self, iter): ''' Get learning rate with polymomial decay based on current iteration. Args: iter (int): current iteration (starting with 0). Returns: float: Learning rate ''' return self.init_lr * ((1.0 - iter * 1.0 / self.max_iter) ** self.power)
python
def get_learning_rate(self, iter): ''' Get learning rate with polymomial decay based on current iteration. Args: iter (int): current iteration (starting with 0). Returns: float: Learning rate ''' return self.init_lr * ((1.0 - iter * 1.0 / self.max_iter) ** self.power)
[ "def", "get_learning_rate", "(", "self", ",", "iter", ")", ":", "return", "self", ".", "init_lr", "*", "(", "(", "1.0", "-", "iter", "*", "1.0", "/", "self", ".", "max_iter", ")", "**", "self", ".", "power", ")" ]
Get learning rate with polymomial decay based on current iteration. Args: iter (int): current iteration (starting with 0). Returns: float: Learning rate
[ "Get", "learning", "rate", "with", "polymomial", "decay", "based", "on", "current", "iteration", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/learning_rate_scheduler.py#L59-L69
242,687
sony/nnabla
python/src/nnabla/utils/learning_rate_scheduler.py
CosineScheduler.get_learning_rate
def get_learning_rate(self, iter): ''' Get learning rate with cosine decay based on current iteration. Args: iter (int): Current iteration (starting with 0). Returns: float: Learning rate ''' return self.init_lr * ((math.cos(iter * 1.0 / (self.max_iter) * math.pi) + 1.0) * 0.5)
python
def get_learning_rate(self, iter): ''' Get learning rate with cosine decay based on current iteration. Args: iter (int): Current iteration (starting with 0). Returns: float: Learning rate ''' return self.init_lr * ((math.cos(iter * 1.0 / (self.max_iter) * math.pi) + 1.0) * 0.5)
[ "def", "get_learning_rate", "(", "self", ",", "iter", ")", ":", "return", "self", ".", "init_lr", "*", "(", "(", "math", ".", "cos", "(", "iter", "*", "1.0", "/", "(", "self", ".", "max_iter", ")", "*", "math", ".", "pi", ")", "+", "1.0", ")", ...
Get learning rate with cosine decay based on current iteration. Args: iter (int): Current iteration (starting with 0). Returns: float: Learning rate
[ "Get", "learning", "rate", "with", "cosine", "decay", "based", "on", "current", "iteration", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/learning_rate_scheduler.py#L87-L97
242,688
sony/nnabla
python/src/nnabla/parametric_functions.py
affine
def affine(inp, n_outmaps, base_axis=1, w_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True, apply_w=None, apply_b=None): """ The affine layer, also known as the fully connected layer. Computes .. math:: {\\mathbf y} = {\\mathbf A} {\\mathbf x} + {\\mathbf b}. where :math:`{\\mathbf x}, {\\mathbf y}` are the inputs and outputs respectively, and :math:`{\\mathbf A}, {\\mathbf b}` are constants. Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix. n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. apply_w (function): Lambda, function, or callable object applied to the weights. apply_b (function): Lambda, function, or callable object applied to the bias. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)f """ if not hasattr(n_outmaps, '__iter__'): n_outmaps = [n_outmaps] n_outmaps = list(n_outmaps) n_outmap = int(np.prod(n_outmaps)) if w_init is None: inmaps = np.prod(inp.shape[base_axis:]) w_init = UniformInitializer( calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, True, not fix_parameters) if apply_w is not None: w = apply_w(w) b = None if with_bias: b = get_parameter_or_create( "b", n_outmaps, b_init, True, not fix_parameters) if apply_b is not None: b = apply_b(b) return F.affine(inp, w, b, base_axis)
python
def affine(inp, n_outmaps, base_axis=1, w_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True, apply_w=None, apply_b=None): if not hasattr(n_outmaps, '__iter__'): n_outmaps = [n_outmaps] n_outmaps = list(n_outmaps) n_outmap = int(np.prod(n_outmaps)) if w_init is None: inmaps = np.prod(inp.shape[base_axis:]) w_init = UniformInitializer( calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, True, not fix_parameters) if apply_w is not None: w = apply_w(w) b = None if with_bias: b = get_parameter_or_create( "b", n_outmaps, b_init, True, not fix_parameters) if apply_b is not None: b = apply_b(b) return F.affine(inp, w, b, base_axis)
[ "def", "affine", "(", "inp", ",", "n_outmaps", ",", "base_axis", "=", "1", ",", "w_init", "=", "None", ",", "b_init", "=", "None", ",", "fix_parameters", "=", "False", ",", "rng", "=", "None", ",", "with_bias", "=", "True", ",", "apply_w", "=", "None...
The affine layer, also known as the fully connected layer. Computes .. math:: {\\mathbf y} = {\\mathbf A} {\\mathbf x} + {\\mathbf b}. where :math:`{\\mathbf x}, {\\mathbf y}` are the inputs and outputs respectively, and :math:`{\\mathbf A}, {\\mathbf b}` are constants. Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix. n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. apply_w (function): Lambda, function, or callable object applied to the weights. apply_b (function): Lambda, function, or callable object applied to the bias. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)f
[ "The", "affine", "layer", "also", "known", "as", "the", "fully", "connected", "layer", ".", "Computes" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L132-L183
242,689
sony/nnabla
python/src/nnabla/parametric_functions.py
binary_weight_affine
def binary_weight_affine(inp, n_outmaps, base_axis=1, quantize_zero_to=1.0, w_init=None, wb_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True): """Binary Weight Affine, multiplier-less inner-product with a scale factor. Binary Weight Affine is the affine function, but the inner product in this function is the following, .. math:: y_j = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}} \sum_{i} sign(w_{ji}) x_i Therefore :math:`sign(w_{ji})` is either :math:`1` or :math:`-1` and the inner product simplifies to addition followed by scaling factor :math:`\\alpha = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}}`. The number of ::math:`\\alpha` is the outmaps of the affine function. References: Rastegari, Mohammad, et al. "XNOR-Net: ImageNet Classification Using Binary Convolutional Neural Networks." arXiv preprint arXiv:1603.05279 (2016). .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the binarized weights (`binary_weight`) 2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the binary weights will not be in sync. 3) Quantized values are stored as floating point number for `binary_weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix. n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. quantize_zero_to (float): Input value at zero is quantized to this value. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By defalut, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weight and bias will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable` """ if not hasattr(n_outmaps, '__iter__'): n_outmaps = [n_outmaps] n_outmaps = list(n_outmaps) n_outmap = int(np.prod(n_outmaps)) if w_init is None: fan_in = np.prod(inp.shape[base_axis:]) w_init = UniformInitializer( calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng) if wb_init is None: fan_in = np.prod(inp.shape[base_axis:]) wb_init = UniformInitializer( calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng) if b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, True, not fix_parameters) wb = get_parameter_or_create( "Wb", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, wb_init, False) alpha = get_parameter_or_create( "alpha", n_outmaps, ConstantInitializer(0), False) b = None if with_bias: b = get_parameter_or_create( "b", n_outmaps, b_init, True, not fix_parameters) return F.binary_weight_affine(inp, w, wb, alpha, b, base_axis, quantize_zero_to)
python
def binary_weight_affine(inp, n_outmaps, base_axis=1, quantize_zero_to=1.0, w_init=None, wb_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True): if not hasattr(n_outmaps, '__iter__'): n_outmaps = [n_outmaps] n_outmaps = list(n_outmaps) n_outmap = int(np.prod(n_outmaps)) if w_init is None: fan_in = np.prod(inp.shape[base_axis:]) w_init = UniformInitializer( calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng) if wb_init is None: fan_in = np.prod(inp.shape[base_axis:]) wb_init = UniformInitializer( calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng) if b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, True, not fix_parameters) wb = get_parameter_or_create( "Wb", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, wb_init, False) alpha = get_parameter_or_create( "alpha", n_outmaps, ConstantInitializer(0), False) b = None if with_bias: b = get_parameter_or_create( "b", n_outmaps, b_init, True, not fix_parameters) return F.binary_weight_affine(inp, w, wb, alpha, b, base_axis, quantize_zero_to)
[ "def", "binary_weight_affine", "(", "inp", ",", "n_outmaps", ",", "base_axis", "=", "1", ",", "quantize_zero_to", "=", "1.0", ",", "w_init", "=", "None", ",", "wb_init", "=", "None", ",", "b_init", "=", "None", ",", "fix_parameters", "=", "False", ",", "...
Binary Weight Affine, multiplier-less inner-product with a scale factor. Binary Weight Affine is the affine function, but the inner product in this function is the following, .. math:: y_j = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}} \sum_{i} sign(w_{ji}) x_i Therefore :math:`sign(w_{ji})` is either :math:`1` or :math:`-1` and the inner product simplifies to addition followed by scaling factor :math:`\\alpha = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}}`. The number of ::math:`\\alpha` is the outmaps of the affine function. References: Rastegari, Mohammad, et al. "XNOR-Net: ImageNet Classification Using Binary Convolutional Neural Networks." arXiv preprint arXiv:1603.05279 (2016). .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the binarized weights (`binary_weight`) 2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the binary weights will not be in sync. 3) Quantized values are stored as floating point number for `binary_weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix. n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. quantize_zero_to (float): Input value at zero is quantized to this value. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By defalut, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weight and bias will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable`
[ "Binary", "Weight", "Affine", "multiplier", "-", "less", "inner", "-", "product", "with", "a", "scale", "factor", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L409-L488
242,690
sony/nnabla
python/src/nnabla/parametric_functions.py
inq_affine
def inq_affine(inp, n_outmaps, base_axis=1, num_bits=4, inq_iterations=(), selection_algorithm='random', seed=-1, w_init=None, i_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True): """Incremental Network Quantization Affine Layer During training, the weights are sequentially quantized to power-of-two values, which allows the training of a multiplierless network. Using `inq_iterations`, one can specify after how many forward passes half of the learnable weights are fixed and quantized to powers-of-two. After reaching the last value in `inq_iterations`, all weights are fixed. For more details, please refer to the reference. Reference: Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization: Towards lossless CNNs with low-precision weights. <https://arxiv.org/abs/1702.03044> Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix. n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. quantize_zero_to (float): Input value at zero is quantized to this value. num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0" inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights. selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly) seed (int): Random seed for INQ algorithm w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weight and bias will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable` """ if not hasattr(n_outmaps, '__iter__'): n_outmaps = [n_outmaps] n_outmaps = list(n_outmaps) n_outmap = int(np.prod(n_outmaps)) if w_init is None: fan_in = np.prod(inp.shape[base_axis:]) w_init = UniformInitializer( calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng) if i_init is None: fan_in = np.prod(inp.shape[base_axis:]) i_init = ConstantInitializer() if b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, True, not fix_parameters) i = get_parameter_or_create( "I", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, i_init, False) b = None if with_bias: b = get_parameter_or_create( "b", n_outmaps, b_init, True, not fix_parameters) return F.inq_affine(inp, w, i, b, base_axis, num_bits, inq_iterations, selection_algorithm, seed)
python
def inq_affine(inp, n_outmaps, base_axis=1, num_bits=4, inq_iterations=(), selection_algorithm='random', seed=-1, w_init=None, i_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True): if not hasattr(n_outmaps, '__iter__'): n_outmaps = [n_outmaps] n_outmaps = list(n_outmaps) n_outmap = int(np.prod(n_outmaps)) if w_init is None: fan_in = np.prod(inp.shape[base_axis:]) w_init = UniformInitializer( calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng) if i_init is None: fan_in = np.prod(inp.shape[base_axis:]) i_init = ConstantInitializer() if b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, True, not fix_parameters) i = get_parameter_or_create( "I", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, i_init, False) b = None if with_bias: b = get_parameter_or_create( "b", n_outmaps, b_init, True, not fix_parameters) return F.inq_affine(inp, w, i, b, base_axis, num_bits, inq_iterations, selection_algorithm, seed)
[ "def", "inq_affine", "(", "inp", ",", "n_outmaps", ",", "base_axis", "=", "1", ",", "num_bits", "=", "4", ",", "inq_iterations", "=", "(", ")", ",", "selection_algorithm", "=", "'random'", ",", "seed", "=", "-", "1", ",", "w_init", "=", "None", ",", ...
Incremental Network Quantization Affine Layer During training, the weights are sequentially quantized to power-of-two values, which allows the training of a multiplierless network. Using `inq_iterations`, one can specify after how many forward passes half of the learnable weights are fixed and quantized to powers-of-two. After reaching the last value in `inq_iterations`, all weights are fixed. For more details, please refer to the reference. Reference: Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization: Towards lossless CNNs with low-precision weights. <https://arxiv.org/abs/1702.03044> Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix. n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. quantize_zero_to (float): Input value at zero is quantized to this value. num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0" inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights. selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly) seed (int): Random seed for INQ algorithm w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weight and bias will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable`
[ "Incremental", "Network", "Quantization", "Affine", "Layer" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L496-L559
242,691
sony/nnabla
python/src/nnabla/parametric_functions.py
binary_connect_convolution
def binary_connect_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, quantize_zero_to=1.0, w_init=None, wb_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True): """Binary Connect Convolution, multiplier-less inner-product. Binary Connect Convolution is the convolution function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}. Therefore :math:`sign(w_i)` is either :math:`1` or :math:`-1` and the inner product simplifies to addition. This function should be used together with BatchNormalization. References: M. Courbariaux, Y. Bengio, and J.-P. David. "BinaryConnect: Training Deep Neural Networks with binary weights during propagations." Advances in Neural Information Processing Systems. 2015. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the binarized weights (`binary_weight`) 2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the binary weights will not be in sync. 3) Quantized values are stored as floating point number for `binary_weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction. quantize_zero_to (float): Input value at zero is quantized to this value. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable` """ if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if wb_init is None: wb_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis]) + tuple(kernel), w_init, True, not fix_parameters) wb = get_parameter_or_create( "Wb", (outmaps, inp.shape[base_axis]) + tuple(kernel), wb_init, False) b = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) return F.binary_connect_convolution(inp, w, wb, b, base_axis, pad, stride, dilation, group, quantize_zero_to)
python
def binary_connect_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, quantize_zero_to=1.0, w_init=None, wb_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True): if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if wb_init is None: wb_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis]) + tuple(kernel), w_init, True, not fix_parameters) wb = get_parameter_or_create( "Wb", (outmaps, inp.shape[base_axis]) + tuple(kernel), wb_init, False) b = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) return F.binary_connect_convolution(inp, w, wb, b, base_axis, pad, stride, dilation, group, quantize_zero_to)
[ "def", "binary_connect_convolution", "(", "inp", ",", "outmaps", ",", "kernel", ",", "pad", "=", "None", ",", "stride", "=", "None", ",", "dilation", "=", "None", ",", "group", "=", "1", ",", "quantize_zero_to", "=", "1.0", ",", "w_init", "=", "None", ...
Binary Connect Convolution, multiplier-less inner-product. Binary Connect Convolution is the convolution function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}. Therefore :math:`sign(w_i)` is either :math:`1` or :math:`-1` and the inner product simplifies to addition. This function should be used together with BatchNormalization. References: M. Courbariaux, Y. Bengio, and J.-P. David. "BinaryConnect: Training Deep Neural Networks with binary weights during propagations." Advances in Neural Information Processing Systems. 2015. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the binarized weights (`binary_weight`) 2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the binary weights will not be in sync. 3) Quantized values are stored as floating point number for `binary_weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction. quantize_zero_to (float): Input value at zero is quantized to this value. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable`
[ "Binary", "Connect", "Convolution", "multiplier", "-", "less", "inner", "-", "product", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L942-L1022
242,692
sony/nnabla
python/src/nnabla/parametric_functions.py
inq_convolution
def inq_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, num_bits=4, inq_iterations=(), selection_algorithm='random', seed=-1, w_init=None, i_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True): """Incremental Network Quantization Convolution Layer During training, the weights are sequentially quantized to power-of-two values, which allows the training of a multiplierless network. Using `inq_iterations`, one can specify after how many forward passes half of the learnable weights are fixed and quantized to powers-of-two. After reaching the last value in `inq_iterations`, all weights are fixed. For more details, please refer to the reference. Reference: Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization: Towards lossless CNNs with low-precision weights. <https://arxiv.org/abs/1702.03044> Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix. n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0" inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights. selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly) seed (int): Random seed for INQ algorithm w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weight and bias will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable` """ if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if i_init is None: i_init = ConstantInitializer() if b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis]) + tuple(kernel), w_init, True, not fix_parameters) i = get_parameter_or_create( "I", (outmaps, inp.shape[base_axis]) + tuple(kernel), i_init, False) b = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) return F.inq_convolution(inp, w, i, b, base_axis, pad, stride, dilation, group, num_bits, inq_iterations, selection_algorithm, seed)
python
def inq_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, num_bits=4, inq_iterations=(), selection_algorithm='random', seed=-1, w_init=None, i_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True): if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if i_init is None: i_init = ConstantInitializer() if b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis]) + tuple(kernel), w_init, True, not fix_parameters) i = get_parameter_or_create( "I", (outmaps, inp.shape[base_axis]) + tuple(kernel), i_init, False) b = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) return F.inq_convolution(inp, w, i, b, base_axis, pad, stride, dilation, group, num_bits, inq_iterations, selection_algorithm, seed)
[ "def", "inq_convolution", "(", "inp", ",", "outmaps", ",", "kernel", ",", "pad", "=", "None", ",", "stride", "=", "None", ",", "dilation", "=", "None", ",", "group", "=", "1", ",", "num_bits", "=", "4", ",", "inq_iterations", "=", "(", ")", ",", "s...
Incremental Network Quantization Convolution Layer During training, the weights are sequentially quantized to power-of-two values, which allows the training of a multiplierless network. Using `inq_iterations`, one can specify after how many forward passes half of the learnable weights are fixed and quantized to powers-of-two. After reaching the last value in `inq_iterations`, all weights are fixed. For more details, please refer to the reference. Reference: Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization: Towards lossless CNNs with low-precision weights. <https://arxiv.org/abs/1702.03044> Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix. n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0" inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights. selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly) seed (int): Random seed for INQ algorithm w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weight and bias will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable`
[ "Incremental", "Network", "Quantization", "Convolution", "Layer" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1122-L1180
242,693
sony/nnabla
python/src/nnabla/parametric_functions.py
depthwise_convolution
def depthwise_convolution(inp, kernel, pad=None, stride=None, dilation=None, multiplier=1, w_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True): """ N-D Depthwise Convolution with a bias term. Reference: - F. Chollet: Chollet, Francois. "Xception: Deep Learning with Depthwise Separable Convolutions. https://arxiv.org/abs/1610.02357 Args: inp (~nnabla.Variable): N-D array. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. multiplier (:obj:`int`): Number of output feature maps per input feature map. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable`: N-D array. See :obj:`~nnabla.functions.depthwise_convolution` for the output shape. """ if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot( inp.shape[base_axis] * multiplier, inp.shape[base_axis], tuple(kernel)), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", (inp.shape[base_axis] * multiplier,) + tuple(kernel), w_init, True, not fix_parameters) b = None if with_bias: b = get_parameter_or_create( "b", (inp.shape[base_axis] * multiplier,), b_init, True, not fix_parameters) return F.depthwise_convolution(inp, w, b, base_axis, pad, stride, dilation, multiplier)
python
def depthwise_convolution(inp, kernel, pad=None, stride=None, dilation=None, multiplier=1, w_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True): if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot( inp.shape[base_axis] * multiplier, inp.shape[base_axis], tuple(kernel)), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", (inp.shape[base_axis] * multiplier,) + tuple(kernel), w_init, True, not fix_parameters) b = None if with_bias: b = get_parameter_or_create( "b", (inp.shape[base_axis] * multiplier,), b_init, True, not fix_parameters) return F.depthwise_convolution(inp, w, b, base_axis, pad, stride, dilation, multiplier)
[ "def", "depthwise_convolution", "(", "inp", ",", "kernel", ",", "pad", "=", "None", ",", "stride", "=", "None", ",", "dilation", "=", "None", ",", "multiplier", "=", "1", ",", "w_init", "=", "None", ",", "b_init", "=", "None", ",", "base_axis", "=", ...
N-D Depthwise Convolution with a bias term. Reference: - F. Chollet: Chollet, Francois. "Xception: Deep Learning with Depthwise Separable Convolutions. https://arxiv.org/abs/1610.02357 Args: inp (~nnabla.Variable): N-D array. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. multiplier (:obj:`int`): Number of output feature maps per input feature map. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable`: N-D array. See :obj:`~nnabla.functions.depthwise_convolution` for the output shape.
[ "N", "-", "D", "Depthwise", "Convolution", "with", "a", "bias", "term", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1187-L1233
242,694
sony/nnabla
python/src/nnabla/parametric_functions.py
batch_normalization
def batch_normalization(inp, axes=[1], decay_rate=0.9, eps=1e-5, batch_stat=True, output_stat=False, fix_parameters=False, param_init=None): """ Batch normalization layer. .. math:: \\begin{array}{lcl} \\mu &=& \\frac{1}{M} \\sum x_i\\\\ \\sigma^2 &=& \\frac{1}{M} \\sum \\left(x_i - \\mu\\right)^2\\\\ \\hat{x}_i &=& \\frac{x_i - \\mu}{\\sqrt{\\sigma^2 + \\epsilon }}\\\\ y_i &= & \\hat{x}_i \\gamma + \\beta. \\end{array} where :math:`x_i, y_i` are the inputs. In testing, the mean and variance computed by moving average calculated during training are used. Args: inp (~nnabla.Variable): N-D array of input. axes (:obj:`tuple` of :obj:`int`): Mean and variance for each element in ``axes`` are calculated using elements on the rest axes. For example, if an input is 4 dimensions, and ``axes`` is ``[1]``, batch mean is calculated as ``np.mean(inp.d, axis=(0, 2, 3), keepdims=True)`` (using numpy expression as an example). decay_rate (float): Decay rate of running mean and variance. eps (float): Tiny value to avoid zero division by std. batch_stat (bool): Use mini-batch statistics rather than running ones. output_stat (bool): Output batch mean and variance. fix_parameters (bool): When set to `True`, the beta and gamma will not be updated. param_init (dict): Parameter initializers can be set with a dict. A key of the dict must be ``'beta'``, ``'gamma'``, ``'mean'`` or ``'var'``. A value of the dict must be an :obj:`~nnabla.initializer.Initializer` or a :obj:`numpy.ndarray`. E.g. ``{'beta': ConstantIntializer(0), 'gamma': np.ones(gamma_shape) * 2}``. Returns: :class:`~nnabla.Variable`: N-D array. References: - Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. https://arxiv.org/abs/1502.03167 The shape of parameters has the same number of dimensions with the input data, and the shapes in ``axes`` has the same dimensions with the input, while the rest has ``1``. If an input is 4-dim and ``axes=[1]``, the parameter shape will be ``param_shape = np.mean(inp.d, axis=(0, 2, 3), keepdims=True).shape`` (using numpy expression as an example). """ shape_stat = [1 for _ in inp.shape] for i in range(len(axes)): shape_stat[axes[i]] = inp.shape[axes[i]] if param_init is None: param_init = {} beta_init = param_init.get('beta', ConstantInitializer(0)) gamma_init = param_init.get('gamma', ConstantInitializer(1)) mean_init = param_init.get('mean', ConstantInitializer(0)) var_init = param_init.get('var', ConstantInitializer(1)) beta = get_parameter_or_create( "beta", shape_stat, beta_init, True, not fix_parameters) gamma = get_parameter_or_create( "gamma", shape_stat, gamma_init, True, not fix_parameters) mean = get_parameter_or_create( "mean", shape_stat, mean_init, False) var = get_parameter_or_create( "var", shape_stat, var_init, False) return F.batch_normalization(inp, beta, gamma, mean, var, axes, decay_rate, eps, batch_stat, output_stat)
python
def batch_normalization(inp, axes=[1], decay_rate=0.9, eps=1e-5, batch_stat=True, output_stat=False, fix_parameters=False, param_init=None): shape_stat = [1 for _ in inp.shape] for i in range(len(axes)): shape_stat[axes[i]] = inp.shape[axes[i]] if param_init is None: param_init = {} beta_init = param_init.get('beta', ConstantInitializer(0)) gamma_init = param_init.get('gamma', ConstantInitializer(1)) mean_init = param_init.get('mean', ConstantInitializer(0)) var_init = param_init.get('var', ConstantInitializer(1)) beta = get_parameter_or_create( "beta", shape_stat, beta_init, True, not fix_parameters) gamma = get_parameter_or_create( "gamma", shape_stat, gamma_init, True, not fix_parameters) mean = get_parameter_or_create( "mean", shape_stat, mean_init, False) var = get_parameter_or_create( "var", shape_stat, var_init, False) return F.batch_normalization(inp, beta, gamma, mean, var, axes, decay_rate, eps, batch_stat, output_stat)
[ "def", "batch_normalization", "(", "inp", ",", "axes", "=", "[", "1", "]", ",", "decay_rate", "=", "0.9", ",", "eps", "=", "1e-5", ",", "batch_stat", "=", "True", ",", "output_stat", "=", "False", ",", "fix_parameters", "=", "False", ",", "param_init", ...
Batch normalization layer. .. math:: \\begin{array}{lcl} \\mu &=& \\frac{1}{M} \\sum x_i\\\\ \\sigma^2 &=& \\frac{1}{M} \\sum \\left(x_i - \\mu\\right)^2\\\\ \\hat{x}_i &=& \\frac{x_i - \\mu}{\\sqrt{\\sigma^2 + \\epsilon }}\\\\ y_i &= & \\hat{x}_i \\gamma + \\beta. \\end{array} where :math:`x_i, y_i` are the inputs. In testing, the mean and variance computed by moving average calculated during training are used. Args: inp (~nnabla.Variable): N-D array of input. axes (:obj:`tuple` of :obj:`int`): Mean and variance for each element in ``axes`` are calculated using elements on the rest axes. For example, if an input is 4 dimensions, and ``axes`` is ``[1]``, batch mean is calculated as ``np.mean(inp.d, axis=(0, 2, 3), keepdims=True)`` (using numpy expression as an example). decay_rate (float): Decay rate of running mean and variance. eps (float): Tiny value to avoid zero division by std. batch_stat (bool): Use mini-batch statistics rather than running ones. output_stat (bool): Output batch mean and variance. fix_parameters (bool): When set to `True`, the beta and gamma will not be updated. param_init (dict): Parameter initializers can be set with a dict. A key of the dict must be ``'beta'``, ``'gamma'``, ``'mean'`` or ``'var'``. A value of the dict must be an :obj:`~nnabla.initializer.Initializer` or a :obj:`numpy.ndarray`. E.g. ``{'beta': ConstantIntializer(0), 'gamma': np.ones(gamma_shape) * 2}``. Returns: :class:`~nnabla.Variable`: N-D array. References: - Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. https://arxiv.org/abs/1502.03167 The shape of parameters has the same number of dimensions with the input data, and the shapes in ``axes`` has the same dimensions with the input, while the rest has ``1``. If an input is 4-dim and ``axes=[1]``, the parameter shape will be ``param_shape = np.mean(inp.d, axis=(0, 2, 3), keepdims=True).shape`` (using numpy expression as an example).
[ "Batch", "normalization", "layer", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1611-L1682
242,695
sony/nnabla
python/src/nnabla/parametric_functions.py
mean_subtraction
def mean_subtraction(inp, base_axis=1, update_running_mean=True, fix_parameters=False): """ Mean subtraction layer. It subtracts the mean of the elements of the input array, and normalizes it to :math:`0`. Preprocessing arrays with this function has the effect of improving accuracy in various tasks such as image classification. At training time, this function is defined as .. math:: \\begin{array}{lcl} \\mu &=& \\frac{1}{M} \\sum x_i \\\\ y_i &=& x_i - \\mu \\end{array} At testing time, the mean values used are those that were computed during training by moving average. Note: The backward performs an approximated differentiation that takes into account only the latest mini-batch. Args: inp (~nnabla.Variable): N-D array of input. base_axis (int): Base axis of Mean Subtraction operation. Dimensions up to base_axis is treated as sample dimension. update_running_mean (bool): When set to `True`, the running mean will not be updated. fix_parameters (bool): dummy parameter. This argument dose not affect anything. Returns: ~nnabla.Variable: N-D array. """ assert len(inp.shape) >= base_axis shape = inp.shape[base_axis:] mean = get_parameter_or_create( "mean", shape, ConstantInitializer(0), False) t = get_parameter_or_create( "t", (1, ), ConstantInitializer(0), False) return F.mean_subtraction(inp, mean, t, base_axis=base_axis, update_running_mean=update_running_mean)
python
def mean_subtraction(inp, base_axis=1, update_running_mean=True, fix_parameters=False): assert len(inp.shape) >= base_axis shape = inp.shape[base_axis:] mean = get_parameter_or_create( "mean", shape, ConstantInitializer(0), False) t = get_parameter_or_create( "t", (1, ), ConstantInitializer(0), False) return F.mean_subtraction(inp, mean, t, base_axis=base_axis, update_running_mean=update_running_mean)
[ "def", "mean_subtraction", "(", "inp", ",", "base_axis", "=", "1", ",", "update_running_mean", "=", "True", ",", "fix_parameters", "=", "False", ")", ":", "assert", "len", "(", "inp", ".", "shape", ")", ">=", "base_axis", "shape", "=", "inp", ".", "shape...
Mean subtraction layer. It subtracts the mean of the elements of the input array, and normalizes it to :math:`0`. Preprocessing arrays with this function has the effect of improving accuracy in various tasks such as image classification. At training time, this function is defined as .. math:: \\begin{array}{lcl} \\mu &=& \\frac{1}{M} \\sum x_i \\\\ y_i &=& x_i - \\mu \\end{array} At testing time, the mean values used are those that were computed during training by moving average. Note: The backward performs an approximated differentiation that takes into account only the latest mini-batch. Args: inp (~nnabla.Variable): N-D array of input. base_axis (int): Base axis of Mean Subtraction operation. Dimensions up to base_axis is treated as sample dimension. update_running_mean (bool): When set to `True`, the running mean will not be updated. fix_parameters (bool): dummy parameter. This argument dose not affect anything. Returns: ~nnabla.Variable: N-D array.
[ "Mean", "subtraction", "layer", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1689-L1726
242,696
sony/nnabla
python/src/nnabla/parametric_functions.py
prelu
def prelu(inp, base_axis=1, shared=True, fix_parameters=False): """ Parametrized Rectified Linear Unit function defined as .. math:: y_i = \max(0, x_i) + w_i \min(0, -x_i) where negative slope :math:`w` is learned and can vary across channels (an axis specified with base_axis). Weights are initialized with :math:`-1`. Args: x(~nnabla.Variable): N-D array as input base_axis(int): Dimensions up to base_axis is treated as sample dimension. shared(bool): Use shared weight value or not fix_parameters (bool): When set to `True`, the negative slope values will not be updated. Returns: ~nnabla.Variable: N-D array. """ shape = tuple() if shared else (inp.shape[base_axis],) w = get_parameter_or_create("slope", shape, ConstantInitializer(-1), True, not fix_parameters) return F.prelu(inp, w, base_axis)
python
def prelu(inp, base_axis=1, shared=True, fix_parameters=False): shape = tuple() if shared else (inp.shape[base_axis],) w = get_parameter_or_create("slope", shape, ConstantInitializer(-1), True, not fix_parameters) return F.prelu(inp, w, base_axis)
[ "def", "prelu", "(", "inp", ",", "base_axis", "=", "1", ",", "shared", "=", "True", ",", "fix_parameters", "=", "False", ")", ":", "shape", "=", "tuple", "(", ")", "if", "shared", "else", "(", "inp", ".", "shape", "[", "base_axis", "]", ",", ")", ...
Parametrized Rectified Linear Unit function defined as .. math:: y_i = \max(0, x_i) + w_i \min(0, -x_i) where negative slope :math:`w` is learned and can vary across channels (an axis specified with base_axis). Weights are initialized with :math:`-1`. Args: x(~nnabla.Variable): N-D array as input base_axis(int): Dimensions up to base_axis is treated as sample dimension. shared(bool): Use shared weight value or not fix_parameters (bool): When set to `True`, the negative slope values will not be updated. Returns: ~nnabla.Variable: N-D array.
[ "Parametrized", "Rectified", "Linear", "Unit", "function", "defined", "as" ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1762-L1786
242,697
sony/nnabla
python/src/nnabla/parametric_functions.py
fixed_point_quantized_affine
def fixed_point_quantized_affine(inp, n_outmaps, base_axis=1, w_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True, quantize_w=True, sign_w=True, n_w=8, delta_w=2**-4, ste_fine_grained_w=True, quantize_b=True, sign_b=True, n_b=8, delta_b=2**-4, ste_fine_grained_b=True): """Fixed-Point Quantized Affine. Fixed-Point Quantized Affine is the affine function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_j = \sum_{i} Q(w_{ji}) x_i, where :math:`Q(w_{ji})` is the fixed-point quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) CPU and GPU implementations now use float value for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix. n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. sign_w (bool): Use signed quantization if `True`. n_w (int): Bit width used for weight. delta_w (float): Step size for weight. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. n_b (int): Bit width used for bias. delta_w (float): Step size for bias. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`) """ if not hasattr(n_outmaps, '__iter__'): n_outmaps = [n_outmaps] n_outmaps = list(n_outmaps) n_outmap = int(np.prod(n_outmaps)) if w_init is None: inmaps = np.prod(inp.shape[base_axis:]) w_init = UniformInitializer( calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() # Floating Weight w = get_parameter_or_create( "W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, True, not fix_parameters) # Quantized Weight if quantize_w: w_q = get_parameter_or_create( "W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, False) # Link computation graph real_w_q = F.fixed_point_quantize(w, quantize=quantize_w, sign=sign_w, n=n_w, delta=delta_w, ste_fine_grained=ste_fine_grained_w, outputs=[w_q.data]) real_w_q.persistent = True else: real_w_q = w # Bias # Floating b = None b_q = None real_b_q = None if with_bias: b = get_parameter_or_create( "b", n_outmaps, b_init, True, not fix_parameters) if quantize_b: b_q = get_parameter_or_create( "b_q", n_outmaps, b_init, False) # Link computation graph real_b_q = F.fixed_point_quantize(b, quantize=quantize_b, sign=sign_b, n=n_b, delta=delta_b, ste_fine_grained=ste_fine_grained_b, outputs=[b_q.data]) real_b_q.persistent = True else: real_b_q = b return F.affine(inp, real_w_q, real_b_q, base_axis)
python
def fixed_point_quantized_affine(inp, n_outmaps, base_axis=1, w_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True, quantize_w=True, sign_w=True, n_w=8, delta_w=2**-4, ste_fine_grained_w=True, quantize_b=True, sign_b=True, n_b=8, delta_b=2**-4, ste_fine_grained_b=True): if not hasattr(n_outmaps, '__iter__'): n_outmaps = [n_outmaps] n_outmaps = list(n_outmaps) n_outmap = int(np.prod(n_outmaps)) if w_init is None: inmaps = np.prod(inp.shape[base_axis:]) w_init = UniformInitializer( calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() # Floating Weight w = get_parameter_or_create( "W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, True, not fix_parameters) # Quantized Weight if quantize_w: w_q = get_parameter_or_create( "W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, False) # Link computation graph real_w_q = F.fixed_point_quantize(w, quantize=quantize_w, sign=sign_w, n=n_w, delta=delta_w, ste_fine_grained=ste_fine_grained_w, outputs=[w_q.data]) real_w_q.persistent = True else: real_w_q = w # Bias # Floating b = None b_q = None real_b_q = None if with_bias: b = get_parameter_or_create( "b", n_outmaps, b_init, True, not fix_parameters) if quantize_b: b_q = get_parameter_or_create( "b_q", n_outmaps, b_init, False) # Link computation graph real_b_q = F.fixed_point_quantize(b, quantize=quantize_b, sign=sign_b, n=n_b, delta=delta_b, ste_fine_grained=ste_fine_grained_b, outputs=[b_q.data]) real_b_q.persistent = True else: real_b_q = b return F.affine(inp, real_w_q, real_b_q, base_axis)
[ "def", "fixed_point_quantized_affine", "(", "inp", ",", "n_outmaps", ",", "base_axis", "=", "1", ",", "w_init", "=", "None", ",", "b_init", "=", "None", ",", "fix_parameters", "=", "False", ",", "rng", "=", "None", ",", "with_bias", "=", "True", ",", "qu...
Fixed-Point Quantized Affine. Fixed-Point Quantized Affine is the affine function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_j = \sum_{i} Q(w_{ji}) x_i, where :math:`Q(w_{ji})` is the fixed-point quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) CPU and GPU implementations now use float value for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix. n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. sign_w (bool): Use signed quantization if `True`. n_w (int): Bit width used for weight. delta_w (float): Step size for weight. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. n_b (int): Bit width used for bias. delta_w (float): Step size for bias. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
[ "Fixed", "-", "Point", "Quantized", "Affine", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1795-L1901
242,698
sony/nnabla
python/src/nnabla/parametric_functions.py
fixed_point_quantized_convolution
def fixed_point_quantized_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, w_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True, quantize_w=True, sign_w=True, n_w=8, delta_w=2**-4, ste_fine_grained_w=True, quantize_b=True, sign_b=True, n_b=8, delta_b=2**-4, ste_fine_grained_b=True,): """Fixed-Point Quantized Convolution. Fixed-Point Quantized Convolution is the convolution function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j}, where :math:`Q(w_{n, m, i, j})` is the fixed-point quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) CPU and GPU implementations now use float value for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. quantize_bias (bool): Quantize bias if `True`. sign_w (bool): Use signed quantization if `True`. n_w (int): Bit width used for weight. delta_w (float): Step size for weight. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. n_b (int): Bit width used for bias. delta_w (float): Step size for bias. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: N-D array. """ if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() # Floating Weight w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel), w_init, True, not fix_parameters) # Quantized Weight if quantize_w: w_q = get_parameter_or_create( "W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel), w_init, False) # Link computation graph real_w_q = F.fixed_point_quantize(w, quantize=quantize_w, sign=sign_w, n=n_w, delta=delta_w, ste_fine_grained=ste_fine_grained_w, outputs=[w_q.data]) real_w_q.persistent = True else: real_w_q = w # Bias # Floating b = None b_q = None real_b_q = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) if quantize_b: b_q = get_parameter_or_create( "b_q", (outmaps,), b_init, False) # Link computation graph real_b_q = F.fixed_point_quantize(b, quantize=quantize_b, sign=sign_b, n=n_b, delta=delta_b, ste_fine_grained=ste_fine_grained_b, outputs=[b_q.data]) real_b_q.persistent = True else: real_b_q = b return F.convolution(inp, real_w_q, real_b_q, base_axis, pad, stride, dilation, group)
python
def fixed_point_quantized_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, w_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True, quantize_w=True, sign_w=True, n_w=8, delta_w=2**-4, ste_fine_grained_w=True, quantize_b=True, sign_b=True, n_b=8, delta_b=2**-4, ste_fine_grained_b=True,): if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() # Floating Weight w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel), w_init, True, not fix_parameters) # Quantized Weight if quantize_w: w_q = get_parameter_or_create( "W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel), w_init, False) # Link computation graph real_w_q = F.fixed_point_quantize(w, quantize=quantize_w, sign=sign_w, n=n_w, delta=delta_w, ste_fine_grained=ste_fine_grained_w, outputs=[w_q.data]) real_w_q.persistent = True else: real_w_q = w # Bias # Floating b = None b_q = None real_b_q = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) if quantize_b: b_q = get_parameter_or_create( "b_q", (outmaps,), b_init, False) # Link computation graph real_b_q = F.fixed_point_quantize(b, quantize=quantize_b, sign=sign_b, n=n_b, delta=delta_b, ste_fine_grained=ste_fine_grained_b, outputs=[b_q.data]) real_b_q.persistent = True else: real_b_q = b return F.convolution(inp, real_w_q, real_b_q, base_axis, pad, stride, dilation, group)
[ "def", "fixed_point_quantized_convolution", "(", "inp", ",", "outmaps", ",", "kernel", ",", "pad", "=", "None", ",", "stride", "=", "None", ",", "dilation", "=", "None", ",", "group", "=", "1", ",", "w_init", "=", "None", ",", "b_init", "=", "None", ",...
Fixed-Point Quantized Convolution. Fixed-Point Quantized Convolution is the convolution function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j}, where :math:`Q(w_{n, m, i, j})` is the fixed-point quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) CPU and GPU implementations now use float value for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. quantize_bias (bool): Quantize bias if `True`. sign_w (bool): Use signed quantization if `True`. n_w (int): Bit width used for weight. delta_w (float): Step size for weight. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. n_b (int): Bit width used for bias. delta_w (float): Step size for bias. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: N-D array.
[ "Fixed", "-", "Point", "Quantized", "Convolution", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1910-L2017
242,699
sony/nnabla
python/src/nnabla/parametric_functions.py
pow2_quantized_affine
def pow2_quantized_affine(inp, n_outmaps, base_axis=1, w_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True, quantize_w=True, sign_w=True, with_zero_w=False, n_w=8, m_w=2, ste_fine_grained_w=True, quantize_b=True, sign_b=True, with_zero_b=False, n_b=8, m_b=2, ste_fine_grained_b=True): """Pow2 Quantized Affine. Pow2 Quantized Affine is the affine function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_j = \sum_{i} Q(w_{ji}) x_i, where :math:`Q(w_{ji})` is the power-of-2 quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) Quantized values are stored as floating point number for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix. n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. sign_w (bool): Use signed quantization if `True`. with_zero_w (bool): Indicate using zero as a quantized value. Default is false. n_w (int): Bit width used for weight. m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. with_zero_b (bool): Indicate using zero as a quantized value. Default is false. n_b (int): Bit width used for bias. m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`) """ if not hasattr(n_outmaps, '__iter__'): n_outmaps = [n_outmaps] n_outmaps = list(n_outmaps) n_outmap = int(np.prod(n_outmaps)) if w_init is None: inmaps = np.prod(inp.shape[base_axis:]) w_init = UniformInitializer( calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() # Floating Weight w = get_parameter_or_create( "W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, True, not fix_parameters) # Quantized Weight if quantize_w: w_q = get_parameter_or_create( "W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, False) # Link computation graph real_w_q = F.pow2_quantize(w, quantize=quantize_w, sign=sign_w, with_zero=with_zero_w, n=n_w, m=m_w, ste_fine_grained=ste_fine_grained_w, outputs=[w_q.data]) real_w_q.persistent = True else: real_w_q = w # Bias # Floating b = None b_q = None real_b_q = None if with_bias: b = get_parameter_or_create( "b", n_outmaps, b_init, True, not fix_parameters) if quantize_b: b_q = get_parameter_or_create( "b_q", n_outmaps, b_init, False) real_b_q = F.pow2_quantize(b, quantize=quantize_b, sign=sign_b, with_zero=with_zero_b, n=n_b, m=m_b, ste_fine_grained=ste_fine_grained_b, outputs=[b_q.data]) real_b_q.persistent = True else: real_b_q = b return F.affine(inp, real_w_q, real_b_q, base_axis)
python
def pow2_quantized_affine(inp, n_outmaps, base_axis=1, w_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True, quantize_w=True, sign_w=True, with_zero_w=False, n_w=8, m_w=2, ste_fine_grained_w=True, quantize_b=True, sign_b=True, with_zero_b=False, n_b=8, m_b=2, ste_fine_grained_b=True): if not hasattr(n_outmaps, '__iter__'): n_outmaps = [n_outmaps] n_outmaps = list(n_outmaps) n_outmap = int(np.prod(n_outmaps)) if w_init is None: inmaps = np.prod(inp.shape[base_axis:]) w_init = UniformInitializer( calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() # Floating Weight w = get_parameter_or_create( "W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, True, not fix_parameters) # Quantized Weight if quantize_w: w_q = get_parameter_or_create( "W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, False) # Link computation graph real_w_q = F.pow2_quantize(w, quantize=quantize_w, sign=sign_w, with_zero=with_zero_w, n=n_w, m=m_w, ste_fine_grained=ste_fine_grained_w, outputs=[w_q.data]) real_w_q.persistent = True else: real_w_q = w # Bias # Floating b = None b_q = None real_b_q = None if with_bias: b = get_parameter_or_create( "b", n_outmaps, b_init, True, not fix_parameters) if quantize_b: b_q = get_parameter_or_create( "b_q", n_outmaps, b_init, False) real_b_q = F.pow2_quantize(b, quantize=quantize_b, sign=sign_b, with_zero=with_zero_b, n=n_b, m=m_b, ste_fine_grained=ste_fine_grained_b, outputs=[b_q.data]) real_b_q.persistent = True else: real_b_q = b return F.affine(inp, real_w_q, real_b_q, base_axis)
[ "def", "pow2_quantized_affine", "(", "inp", ",", "n_outmaps", ",", "base_axis", "=", "1", ",", "w_init", "=", "None", ",", "b_init", "=", "None", ",", "fix_parameters", "=", "False", ",", "rng", "=", "None", ",", "with_bias", "=", "True", ",", "quantize_...
Pow2 Quantized Affine. Pow2 Quantized Affine is the affine function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_j = \sum_{i} Q(w_{ji}) x_i, where :math:`Q(w_{ji})` is the power-of-2 quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) Quantized values are stored as floating point number for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix. n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. sign_w (bool): Use signed quantization if `True`. with_zero_w (bool): Indicate using zero as a quantized value. Default is false. n_w (int): Bit width used for weight. m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. with_zero_b (bool): Indicate using zero as a quantized value. Default is false. n_b (int): Bit width used for bias. m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
[ "Pow2", "Quantized", "Affine", "." ]
aaf3d33b7cbb38f2a03aa754178ba8f7c8481320
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L2026-L2132