code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def run(self, instance):
"""Run the recorded chain of methods on `instance`.
Args:
instance: an object.
"""
last = instance
for item in self.stack:
if isinstance(item, str):
last = getattr(last, item)
else:
last = last(*item[0], **item[1])
self.stack = []
return last
|
Run the recorded chain of methods on `instance`.
Args:
instance: an object.
|
run
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/utils.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/utils.py
|
Apache-2.0
|
def tendermint_version_is_compatible(running_tm_ver):
"""
Check Tendermint compatability with BigchainDB server
:param running_tm_ver: Version number of the connected Tendermint instance
:type running_tm_ver: str
:return: True/False depending on the compatability with BigchainDB server
:rtype: bool
"""
# Splitting because version can look like this e.g. 0.22.8-40d6dc2e
tm_ver = running_tm_ver.split('-')
if not tm_ver:
return False
for ver in __tm_supported_versions__:
if version.parse(ver) == version.parse(tm_ver[0]):
return True
return False
|
Check Tendermint compatability with BigchainDB server
:param running_tm_ver: Version number of the connected Tendermint instance
:type running_tm_ver: str
:return: True/False depending on the compatability with BigchainDB server
:rtype: bool
|
tendermint_version_is_compatible
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/utils.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/utils.py
|
Apache-2.0
|
def connect(backend=None, host=None, port=None, name=None, max_tries=None,
connection_timeout=None, replicaset=None, ssl=None, login=None, password=None,
ca_cert=None, certfile=None, keyfile=None, keyfile_passphrase=None,
crlfile=None):
"""Create a new connection to the database backend.
All arguments default to the current configuration's values if not
given.
Args:
backend (str): the name of the backend to use.
host (str): the host to connect to.
port (int): the port to connect to.
name (str): the name of the database to use.
replicaset (str): the name of the replica set (only relevant for
MongoDB connections).
Returns:
An instance of :class:`~bigchaindb.backend.connection.Connection`
based on the given (or defaulted) :attr:`backend`.
Raises:
:exc:`~ConnectionError`: If the connection to the database fails.
:exc:`~ConfigurationError`: If the given (or defaulted) :attr:`backend`
is not supported or could not be loaded.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
"""
backend = backend or get_bigchaindb_config_value_or_key_error('backend')
host = host or get_bigchaindb_config_value_or_key_error('host')
port = port or get_bigchaindb_config_value_or_key_error('port')
dbname = name or get_bigchaindb_config_value_or_key_error('name')
# Not sure how to handle this here. This setting is only relevant for
# mongodb.
# I added **kwargs for both RethinkDBConnection and MongoDBConnection
# to handle these these additional args. In case of RethinkDBConnection
# it just does not do anything with it.
#
# UPD: RethinkDBConnection is not here anymore cause we no longer support RethinkDB.
# The problem described above might be reconsidered next time we introduce a backend,
# if it ever happens.
replicaset = replicaset or get_bigchaindb_config_value('replicaset')
ssl = ssl if ssl is not None else get_bigchaindb_config_value('ssl', False)
login = login or get_bigchaindb_config_value('login')
password = password or get_bigchaindb_config_value('password')
ca_cert = ca_cert or get_bigchaindb_config_value('ca_cert')
certfile = certfile or get_bigchaindb_config_value('certfile')
keyfile = keyfile or get_bigchaindb_config_value('keyfile')
keyfile_passphrase = keyfile_passphrase or get_bigchaindb_config_value('keyfile_passphrase', None)
crlfile = crlfile or get_bigchaindb_config_value('crlfile')
try:
module_name, _, class_name = BACKENDS[backend].rpartition('.')
Class = getattr(import_module(module_name), class_name)
except KeyError:
raise ConfigurationError('Backend `{}` is not supported. '
'BigchainDB currently supports {}'.format(backend, BACKENDS.keys()))
except (ImportError, AttributeError) as exc:
raise ConfigurationError('Error loading backend `{}`'.format(backend)) from exc
logger.debug('Connection: {}'.format(Class))
return Class(host=host, port=port, dbname=dbname,
max_tries=max_tries, connection_timeout=connection_timeout,
replicaset=replicaset, ssl=ssl, login=login, password=password,
ca_cert=ca_cert, certfile=certfile, keyfile=keyfile,
keyfile_passphrase=keyfile_passphrase, crlfile=crlfile)
|
Create a new connection to the database backend.
All arguments default to the current configuration's values if not
given.
Args:
backend (str): the name of the backend to use.
host (str): the host to connect to.
port (int): the port to connect to.
name (str): the name of the database to use.
replicaset (str): the name of the replica set (only relevant for
MongoDB connections).
Returns:
An instance of :class:`~bigchaindb.backend.connection.Connection`
based on the given (or defaulted) :attr:`backend`.
Raises:
:exc:`~ConnectionError`: If the connection to the database fails.
:exc:`~ConfigurationError`: If the given (or defaulted) :attr:`backend`
is not supported or could not be loaded.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
|
connect
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/backend/connection.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/backend/connection.py
|
Apache-2.0
|
def __init__(self, host=None, port=None, dbname=None,
connection_timeout=None, max_tries=None,
**kwargs):
"""Create a new :class:`~.Connection` instance.
Args:
host (str): the host to connect to.
port (int): the port to connect to.
dbname (str): the name of the database to use.
connection_timeout (int, optional): the milliseconds to wait
until timing out the database connection attempt.
Defaults to 5000ms.
max_tries (int, optional): how many tries before giving up,
if 0 then try forever. Defaults to 3.
**kwargs: arbitrary keyword arguments provided by the
configuration's ``database`` settings
"""
dbconf = bigchaindb.config['database']
self.host = host or dbconf['host']
self.port = port or dbconf['port']
self.dbname = dbname or dbconf['name']
self.connection_timeout = connection_timeout if connection_timeout is not None \
else dbconf['connection_timeout']
self.max_tries = max_tries if max_tries is not None else dbconf['max_tries']
self.max_tries_counter = range(self.max_tries) if self.max_tries != 0 else repeat(0)
self._conn = None
|
Create a new :class:`~.Connection` instance.
Args:
host (str): the host to connect to.
port (int): the port to connect to.
dbname (str): the name of the database to use.
connection_timeout (int, optional): the milliseconds to wait
until timing out the database connection attempt.
Defaults to 5000ms.
max_tries (int, optional): how many tries before giving up,
if 0 then try forever. Defaults to 3.
**kwargs: arbitrary keyword arguments provided by the
configuration's ``database`` settings
|
__init__
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/backend/connection.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/backend/connection.py
|
Apache-2.0
|
def init_database(connection=None, dbname=None):
"""Initialize the configured backend for use with BigchainDB.
Creates a database with :attr:`dbname` with any required tables
and supporting indexes.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`): an
existing connection to use to initialize the database.
Creates one if not given.
dbname (str): the name of the database to create.
Defaults to the database name given in the BigchainDB
configuration.
"""
connection = connection or connect()
dbname = dbname or bigchaindb.config['database']['name']
create_database(connection, dbname)
create_tables(connection, dbname)
|
Initialize the configured backend for use with BigchainDB.
Creates a database with :attr:`dbname` with any required tables
and supporting indexes.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`): an
existing connection to use to initialize the database.
Creates one if not given.
dbname (str): the name of the database to create.
Defaults to the database name given in the BigchainDB
configuration.
|
init_database
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/backend/schema.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/backend/schema.py
|
Apache-2.0
|
def validate_language_key(obj, key):
"""Validate all nested "language" key in `obj`.
Args:
obj (dict): dictionary whose "language" key is to be validated.
Returns:
None: validation successful
Raises:
ValidationError: will raise exception in case language is not valid.
"""
backend = bigchaindb.config['database']['backend']
if backend == 'localmongodb':
data = obj.get(key, {})
if isinstance(data, dict):
validate_all_values_for_key_in_obj(data, 'language', validate_language)
elif isinstance(data, list):
validate_all_values_for_key_in_list(data, 'language', validate_language)
|
Validate all nested "language" key in `obj`.
Args:
obj (dict): dictionary whose "language" key is to be validated.
Returns:
None: validation successful
Raises:
ValidationError: will raise exception in case language is not valid.
|
validate_language_key
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/backend/schema.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/backend/schema.py
|
Apache-2.0
|
def validate_language(value):
"""Check if `value` is a valid language.
https://docs.mongodb.com/manual/reference/text-search-languages/
Args:
value (str): language to validated
Returns:
None: validation successful
Raises:
ValidationError: will raise exception in case language is not valid.
"""
if value not in VALID_LANGUAGES:
error_str = ('MongoDB does not support text search for the '
'language "{}". If you do not understand this error '
'message then please rename key/field "language" to '
'something else like "lang".').format(value)
raise ValidationError(error_str)
|
Check if `value` is a valid language.
https://docs.mongodb.com/manual/reference/text-search-languages/
Args:
value (str): language to validated
Returns:
None: validation successful
Raises:
ValidationError: will raise exception in case language is not valid.
|
validate_language
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/backend/schema.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/backend/schema.py
|
Apache-2.0
|
def __init__(self, replicaset=None, ssl=None, login=None, password=None,
ca_cert=None, certfile=None, keyfile=None,
keyfile_passphrase=None, crlfile=None, **kwargs):
"""Create a new Connection instance.
Args:
replicaset (str, optional): the name of the replica set to
connect to.
**kwargs: arbitrary keyword arguments provided by the
configuration's ``database`` settings
"""
super().__init__(**kwargs)
self.replicaset = replicaset or get_bigchaindb_config_value('replicaset')
self.ssl = ssl if ssl is not None else get_bigchaindb_config_value('ssl', False)
self.login = login or get_bigchaindb_config_value('login')
self.password = password or get_bigchaindb_config_value('password')
self.ca_cert = ca_cert or get_bigchaindb_config_value('ca_cert')
self.certfile = certfile or get_bigchaindb_config_value('certfile')
self.keyfile = keyfile or get_bigchaindb_config_value('keyfile')
self.keyfile_passphrase = keyfile_passphrase or get_bigchaindb_config_value('keyfile_passphrase')
self.crlfile = crlfile or get_bigchaindb_config_value('crlfile')
|
Create a new Connection instance.
Args:
replicaset (str, optional): the name of the replica set to
connect to.
**kwargs: arbitrary keyword arguments provided by the
configuration's ``database`` settings
|
__init__
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/backend/localmongodb/connection.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/backend/localmongodb/connection.py
|
Apache-2.0
|
def _connect(self):
"""Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
:exc:`~ConfigurationError`: If there is a ConfigurationError while
connecting to the database.
"""
try:
# FYI: the connection process might raise a
# `ServerSelectionTimeoutError`, that is a subclass of
# `ConnectionFailure`.
# The presence of ca_cert, certfile, keyfile, crlfile implies the
# use of certificates for TLS connectivity.
if self.ca_cert is None or self.certfile is None or \
self.keyfile is None or self.crlfile is None:
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
**MONGO_OPTS)
if self.login is not None and self.password is not None:
client[self.dbname].authenticate(self.login, self.password)
else:
logger.info('Connecting to MongoDB over TLS/SSL...')
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
ssl_ca_certs=self.ca_cert,
ssl_certfile=self.certfile,
ssl_keyfile=self.keyfile,
ssl_pem_passphrase=self.keyfile_passphrase,
ssl_crlfile=self.crlfile,
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if self.login is not None:
client[self.dbname].authenticate(self.login,
mechanism='MONGODB-X509')
return client
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
logger.info('Exception in _connect(): {}'.format(exc))
raise ConnectionError(str(exc)) from exc
except pymongo.errors.ConfigurationError as exc:
raise ConfigurationError from exc
|
Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
:exc:`~ConfigurationError`: If there is a ConfigurationError while
connecting to the database.
|
_connect
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/backend/localmongodb/connection.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/backend/localmongodb/connection.py
|
Apache-2.0
|
def run_configure(args):
"""Run a script to configure the current node."""
config_path = args.config or bigchaindb.config_utils.CONFIG_DEFAULT_PATH
config_file_exists = False
# if the config path is `-` then it's stdout
if config_path != '-':
config_file_exists = os.path.exists(config_path)
if config_file_exists and not args.yes:
want = input_on_stderr('Config file `{}` exists, do you want to '
'override it? (cannot be undone) [y/N]: '.format(config_path))
if want != 'y':
return
conf = copy.deepcopy(bigchaindb.config)
# select the correct config defaults based on the backend
print('Generating default configuration for backend {}'
.format(args.backend), file=sys.stderr)
database_keys = bigchaindb._database_keys_map[args.backend]
conf['database'] = bigchaindb._database_map[args.backend]
if not args.yes:
for key in ('bind', ):
val = conf['server'][key]
conf['server'][key] = input_on_stderr('API Server {}? (default `{}`): '.format(key, val), val)
for key in ('scheme', 'host', 'port'):
val = conf['wsserver'][key]
conf['wsserver'][key] = input_on_stderr('WebSocket Server {}? (default `{}`): '.format(key, val), val)
for key in database_keys:
val = conf['database'][key]
conf['database'][key] = input_on_stderr('Database {}? (default `{}`): '.format(key, val), val)
for key in ('host', 'port'):
val = conf['tendermint'][key]
conf['tendermint'][key] = input_on_stderr('Tendermint {}? (default `{}`)'.format(key, val), val)
if config_path != '-':
bigchaindb.config_utils.write_config(conf, config_path)
else:
print(json.dumps(conf, indent=4, sort_keys=True))
print('Configuration written to {}'.format(config_path), file=sys.stderr)
print('Ready to go!', file=sys.stderr)
|
Run a script to configure the current node.
|
run_configure
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/commands/bigchaindb.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/commands/bigchaindb.py
|
Apache-2.0
|
def run_election_new_upsert_validator(args, bigchain):
"""Initiates an election to add/update/remove a validator to an existing BigchainDB network
:param args: dict
args = {
'public_key': the public key of the proposed peer, (str)
'power': the proposed validator power for the new peer, (str)
'node_id': the node_id of the new peer (str)
'sk': the path to the private key of the node calling the election (str)
}
:param bigchain: an instance of BigchainDB
:return: election_id or `False` in case of failure
"""
new_validator = {
'public_key': {'value': public_key_from_base64(args.public_key),
'type': 'ed25519-base16'},
'power': args.power,
'node_id': args.node_id
}
return create_new_election(args.sk, bigchain, ValidatorElection, new_validator)
|
Initiates an election to add/update/remove a validator to an existing BigchainDB network
:param args: dict
args = {
'public_key': the public key of the proposed peer, (str)
'power': the proposed validator power for the new peer, (str)
'node_id': the node_id of the new peer (str)
'sk': the path to the private key of the node calling the election (str)
}
:param bigchain: an instance of BigchainDB
:return: election_id or `False` in case of failure
|
run_election_new_upsert_validator
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/commands/bigchaindb.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/commands/bigchaindb.py
|
Apache-2.0
|
def run_election_approve(args, bigchain):
"""Approve an election
:param args: dict
args = {
'election_id': the election_id of the election (str)
'sk': the path to the private key of the signer (str)
}
:param bigchain: an instance of BigchainDB
:return: success log message or `False` in case of error
"""
key = load_node_key(args.sk)
tx = bigchain.get_transaction(args.election_id)
voting_powers = [v.amount for v in tx.outputs if key.public_key in v.public_keys]
if len(voting_powers) > 0:
voting_power = voting_powers[0]
else:
logger.error('The key you provided does not match any of the eligible voters in this election.')
return False
inputs = [i for i in tx.to_inputs() if key.public_key in i.owners_before]
election_pub_key = ValidatorElection.to_public_key(tx.id)
approval = Vote.generate(inputs,
[([election_pub_key], voting_power)],
tx.id).sign([key.private_key])
approval.validate(bigchain)
resp = bigchain.write_transaction(approval, BROADCAST_TX_COMMIT)
if resp == (202, ''):
logger.info('[SUCCESS] Your vote has been submitted')
return approval.id
else:
logger.error('Failed to commit vote')
return False
|
Approve an election
:param args: dict
args = {
'election_id': the election_id of the election (str)
'sk': the path to the private key of the signer (str)
}
:param bigchain: an instance of BigchainDB
:return: success log message or `False` in case of error
|
run_election_approve
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/commands/bigchaindb.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/commands/bigchaindb.py
|
Apache-2.0
|
def run_election_show(args, bigchain):
"""Retrieves information about an election
:param args: dict
args = {
'election_id': the transaction_id for an election (str)
}
:param bigchain: an instance of BigchainDB
"""
election = bigchain.get_transaction(args.election_id)
if not election:
logger.error(f'No election found with election_id {args.election_id}')
return
response = election.show_election(bigchain)
logger.info(response)
return response
|
Retrieves information about an election
:param args: dict
args = {
'election_id': the transaction_id for an election (str)
}
:param bigchain: an instance of BigchainDB
|
run_election_show
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/commands/bigchaindb.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/commands/bigchaindb.py
|
Apache-2.0
|
def run_start(args):
"""Start the processes to run the node"""
# Configure Logging
setup_logging()
logger.info('BigchainDB Version %s', bigchaindb.__version__)
run_recover(bigchaindb.lib.BigchainDB())
if not args.skip_initialize_database:
logger.info('Initializing database')
_run_init()
logger.info('Starting BigchainDB main process.')
from bigchaindb.start import start
start(args)
|
Start the processes to run the node
|
run_start
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/commands/bigchaindb.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/commands/bigchaindb.py
|
Apache-2.0
|
def configure_bigchaindb(command):
"""Decorator to be used by command line functions, such that the
configuration of bigchaindb is performed before the execution of
the command.
Args:
command: The command to decorate.
Returns:
The command wrapper function.
"""
@functools.wraps(command)
def configure(args):
config_from_cmdline = None
try:
if args.log_level is not None:
config_from_cmdline = {
'log': {
'level_console': args.log_level,
'level_logfile': args.log_level,
},
'server': {'loglevel': args.log_level},
}
except AttributeError:
pass
bigchaindb.config_utils.autoconfigure(
filename=args.config, config=config_from_cmdline, force=True)
command(args)
return configure
|
Decorator to be used by command line functions, such that the
configuration of bigchaindb is performed before the execution of
the command.
Args:
command: The command to decorate.
Returns:
The command wrapper function.
|
configure_bigchaindb
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/commands/utils.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/commands/utils.py
|
Apache-2.0
|
def input_on_stderr(prompt='', default=None, convert=None):
"""Output a string to stderr and wait for input.
Args:
prompt (str): the message to display.
default: the default value to return if the user
leaves the field empty
convert (callable): a callable to be used to convert
the value the user inserted. If None, the type of
``default`` will be used.
"""
print(prompt, end='', file=sys.stderr)
value = builtins.input()
return _convert(value, default, convert)
|
Output a string to stderr and wait for input.
Args:
prompt (str): the message to display.
default: the default value to return if the user
leaves the field empty
convert (callable): a callable to be used to convert
the value the user inserted. If None, the type of
``default`` will be used.
|
input_on_stderr
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/commands/utils.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/commands/utils.py
|
Apache-2.0
|
def start(parser, argv, scope):
"""Utility function to execute a subcommand.
The function will look up in the ``scope``
if there is a function called ``run_<parser.args.command>``
and will run it using ``parser.args`` as first positional argument.
Args:
parser: an ArgumentParser instance.
argv: the list of command line arguments without the script name.
scope (dict): map containing (eventually) the functions to be called.
Raises:
NotImplementedError: if ``scope`` doesn't contain a function called
``run_<parser.args.command>``.
"""
args = parser.parse_args(argv)
if not args.command:
parser.print_help()
raise SystemExit()
# look up in the current scope for a function called 'run_<command>'
# replacing all the dashes '-' with the lowercase character '_'
func = scope.get('run_' + args.command.replace('-', '_'))
# if no command has been found, raise a `NotImplementedError`
if not func:
raise NotImplementedError('Command `{}` not yet implemented'.
format(args.command))
args.multiprocess = getattr(args, 'multiprocess', False)
if args.multiprocess is False:
args.multiprocess = 1
elif args.multiprocess is None:
args.multiprocess = mp.cpu_count()
return func(args)
|
Utility function to execute a subcommand.
The function will look up in the ``scope``
if there is a function called ``run_<parser.args.command>``
and will run it using ``parser.args`` as first positional argument.
Args:
parser: an ArgumentParser instance.
argv: the list of command line arguments without the script name.
scope (dict): map containing (eventually) the functions to be called.
Raises:
NotImplementedError: if ``scope`` doesn't contain a function called
``run_<parser.args.command>``.
|
start
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/commands/utils.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/commands/utils.py
|
Apache-2.0
|
def generate_key_pair():
"""Generates a cryptographic key pair.
Returns:
:class:`~bigchaindb.common.crypto.CryptoKeypair`: A
:obj:`collections.namedtuple` with named fields
:attr:`~bigchaindb.common.crypto.CryptoKeypair.private_key` and
:attr:`~bigchaindb.common.crypto.CryptoKeypair.public_key`.
"""
# TODO FOR CC: Adjust interface so that this function becomes unnecessary
return CryptoKeypair(
*(k.decode() for k in crypto.ed25519_generate_key_pair()))
|
Generates a cryptographic key pair.
Returns:
:class:`~bigchaindb.common.crypto.CryptoKeypair`: A
:obj:`collections.namedtuple` with named fields
:attr:`~bigchaindb.common.crypto.CryptoKeypair.private_key` and
:attr:`~bigchaindb.common.crypto.CryptoKeypair.public_key`.
|
generate_key_pair
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/crypto.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/crypto.py
|
Apache-2.0
|
def key_pair_from_ed25519_key(hex_private_key):
"""Generate base58 encode public-private key pair from a hex encoded private key"""
priv_key = crypto.Ed25519SigningKey(bytes.fromhex(hex_private_key)[:32], encoding='bytes')
public_key = priv_key.get_verifying_key()
return CryptoKeypair(private_key=priv_key.encode(encoding='base58').decode('utf-8'),
public_key=public_key.encode(encoding='base58').decode('utf-8'))
|
Generate base58 encode public-private key pair from a hex encoded private key
|
key_pair_from_ed25519_key
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/crypto.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/crypto.py
|
Apache-2.0
|
def __init__(self, fulfillment, owners_before, fulfills=None):
"""Create an instance of an :class:`~.Input`.
Args:
fulfillment (:class:`cryptoconditions.Fulfillment`): A
Fulfillment to be signed with a private key.
owners_before (:obj:`list` of :obj:`str`): A list of owners
after a Transaction was confirmed.
fulfills (:class:`~bigchaindb.common.transaction.
TransactionLink`, optional): A link representing the input
of a `TRANSFER` Transaction.
"""
if fulfills is not None and not isinstance(fulfills, TransactionLink):
raise TypeError('`fulfills` must be a TransactionLink instance')
if not isinstance(owners_before, list):
raise TypeError('`owners_before` must be a list instance')
self.fulfillment = fulfillment
self.fulfills = fulfills
self.owners_before = owners_before
|
Create an instance of an :class:`~.Input`.
Args:
fulfillment (:class:`cryptoconditions.Fulfillment`): A
Fulfillment to be signed with a private key.
owners_before (:obj:`list` of :obj:`str`): A list of owners
after a Transaction was confirmed.
fulfills (:class:`~bigchaindb.common.transaction.
TransactionLink`, optional): A link representing the input
of a `TRANSFER` Transaction.
|
__init__
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def to_dict(self):
"""Transforms the object to a Python dictionary.
Note:
If an Input hasn't been signed yet, this method returns a
dictionary representation.
Returns:
dict: The Input as an alternative serialization format.
"""
try:
fulfillment = self.fulfillment.serialize_uri()
except (TypeError, AttributeError, ASN1EncodeError, ASN1DecodeError):
fulfillment = _fulfillment_to_details(self.fulfillment)
try:
# NOTE: `self.fulfills` can be `None` and that's fine
fulfills = self.fulfills.to_dict()
except AttributeError:
fulfills = None
input_ = {
'owners_before': self.owners_before,
'fulfills': fulfills,
'fulfillment': fulfillment,
}
return input_
|
Transforms the object to a Python dictionary.
Note:
If an Input hasn't been signed yet, this method returns a
dictionary representation.
Returns:
dict: The Input as an alternative serialization format.
|
to_dict
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def from_dict(cls, data):
"""Transforms a Python dictionary to an Input object.
Note:
Optionally, this method can also serialize a Cryptoconditions-
Fulfillment that is not yet signed.
Args:
data (dict): The Input to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Input`
Raises:
InvalidSignature: If an Input's URI couldn't be parsed.
"""
fulfillment = data['fulfillment']
if not isinstance(fulfillment, (Fulfillment, type(None))):
try:
fulfillment = Fulfillment.from_uri(data['fulfillment'])
except ASN1DecodeError:
# TODO Remove as it is legacy code, and simply fall back on
# ASN1DecodeError
raise InvalidSignature("Fulfillment URI couldn't been parsed")
except TypeError:
# NOTE: See comment about this special case in
# `Input.to_dict`
fulfillment = _fulfillment_from_details(data['fulfillment'])
fulfills = TransactionLink.from_dict(data['fulfills'])
return cls(fulfillment, data['owners_before'], fulfills)
|
Transforms a Python dictionary to an Input object.
Note:
Optionally, this method can also serialize a Cryptoconditions-
Fulfillment that is not yet signed.
Args:
data (dict): The Input to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Input`
Raises:
InvalidSignature: If an Input's URI couldn't be parsed.
|
from_dict
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def _fulfillment_to_details(fulfillment):
"""Encode a fulfillment as a details dictionary
Args:
fulfillment: Crypto-conditions Fulfillment object
"""
if fulfillment.type_name == 'ed25519-sha-256':
return {
'type': 'ed25519-sha-256',
'public_key': base58.b58encode(fulfillment.public_key).decode(),
}
if fulfillment.type_name == 'threshold-sha-256':
subconditions = [
_fulfillment_to_details(cond['body'])
for cond in fulfillment.subconditions
]
return {
'type': 'threshold-sha-256',
'threshold': fulfillment.threshold,
'subconditions': subconditions,
}
raise UnsupportedTypeError(fulfillment.type_name)
|
Encode a fulfillment as a details dictionary
Args:
fulfillment: Crypto-conditions Fulfillment object
|
_fulfillment_to_details
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def _fulfillment_from_details(data, _depth=0):
"""Load a fulfillment for a signing spec dictionary
Args:
data: tx.output[].condition.details dictionary
"""
if _depth == 100:
raise ThresholdTooDeep()
if data['type'] == 'ed25519-sha-256':
public_key = base58.b58decode(data['public_key'])
return Ed25519Sha256(public_key=public_key)
if data['type'] == 'threshold-sha-256':
threshold = ThresholdSha256(data['threshold'])
for cond in data['subconditions']:
cond = _fulfillment_from_details(cond, _depth+1)
threshold.add_subfulfillment(cond)
return threshold
raise UnsupportedTypeError(data.get('type'))
|
Load a fulfillment for a signing spec dictionary
Args:
data: tx.output[].condition.details dictionary
|
_fulfillment_from_details
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def __init__(self, txid=None, output=None):
"""Create an instance of a :class:`~.TransactionLink`.
Note:
In an IPLD implementation, this class is not necessary anymore,
as an IPLD link can simply point to an object, as well as an
objects properties. So instead of having a (de)serializable
class, we can have a simple IPLD link of the form:
`/<tx_id>/transaction/outputs/<output>/`.
Args:
txid (str, optional): A Transaction to link to.
output (int, optional): An Outputs's index in a Transaction with
id `txid`.
"""
self.txid = txid
self.output = output
|
Create an instance of a :class:`~.TransactionLink`.
Note:
In an IPLD implementation, this class is not necessary anymore,
as an IPLD link can simply point to an object, as well as an
objects properties. So instead of having a (de)serializable
class, we can have a simple IPLD link of the form:
`/<tx_id>/transaction/outputs/<output>/`.
Args:
txid (str, optional): A Transaction to link to.
output (int, optional): An Outputs's index in a Transaction with
id `txid`.
|
__init__
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def from_dict(cls, link):
"""Transforms a Python dictionary to a TransactionLink object.
Args:
link (dict): The link to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.TransactionLink`
"""
try:
return cls(link['transaction_id'], link['output_index'])
except TypeError:
return cls()
|
Transforms a Python dictionary to a TransactionLink object.
Args:
link (dict): The link to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.TransactionLink`
|
from_dict
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def to_dict(self):
"""Transforms the object to a Python dictionary.
Returns:
(dict|None): The link as an alternative serialization format.
"""
if self.txid is None and self.output is None:
return None
else:
return {
'transaction_id': self.txid,
'output_index': self.output,
}
|
Transforms the object to a Python dictionary.
Returns:
(dict|None): The link as an alternative serialization format.
|
to_dict
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def __init__(self, fulfillment, public_keys=None, amount=1):
"""Create an instance of a :class:`~.Output`.
Args:
fulfillment (:class:`cryptoconditions.Fulfillment`): A
Fulfillment to extract a Condition from.
public_keys (:obj:`list` of :obj:`str`, optional): A list of
owners before a Transaction was confirmed.
amount (int): The amount of Assets to be locked with this
Output.
Raises:
TypeError: if `public_keys` is not instance of `list`.
"""
if not isinstance(public_keys, list) and public_keys is not None:
raise TypeError('`public_keys` must be a list instance or None')
if not isinstance(amount, int):
raise TypeError('`amount` must be an int')
if amount < 1:
raise AmountError('`amount` must be greater than 0')
if amount > self.MAX_AMOUNT:
raise AmountError('`amount` must be <= %s' % self.MAX_AMOUNT)
self.fulfillment = fulfillment
self.amount = amount
self.public_keys = public_keys
|
Create an instance of a :class:`~.Output`.
Args:
fulfillment (:class:`cryptoconditions.Fulfillment`): A
Fulfillment to extract a Condition from.
public_keys (:obj:`list` of :obj:`str`, optional): A list of
owners before a Transaction was confirmed.
amount (int): The amount of Assets to be locked with this
Output.
Raises:
TypeError: if `public_keys` is not instance of `list`.
|
__init__
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def to_dict(self):
"""Transforms the object to a Python dictionary.
Note:
A dictionary serialization of the Input the Output was
derived from is always provided.
Returns:
dict: The Output as an alternative serialization format.
"""
# TODO FOR CC: It must be able to recognize a hashlock condition
# and fulfillment!
condition = {}
try:
condition['details'] = _fulfillment_to_details(self.fulfillment)
except AttributeError:
pass
try:
condition['uri'] = self.fulfillment.condition_uri
except AttributeError:
condition['uri'] = self.fulfillment
output = {
'public_keys': self.public_keys,
'condition': condition,
'amount': str(self.amount),
}
return output
|
Transforms the object to a Python dictionary.
Note:
A dictionary serialization of the Input the Output was
derived from is always provided.
Returns:
dict: The Output as an alternative serialization format.
|
to_dict
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def generate(cls, public_keys, amount):
"""Generates a Output from a specifically formed tuple or list.
Note:
If a ThresholdCondition has to be generated where the threshold
is always the number of subconditions it is split between, a
list of the following structure is sufficient:
[(address|condition)*, [(address|condition)*, ...], ...]
Args:
public_keys (:obj:`list` of :obj:`str`): The public key of
the users that should be able to fulfill the Condition
that is being created.
amount (:obj:`int`): The amount locked by the Output.
Returns:
An Output that can be used in a Transaction.
Raises:
TypeError: If `public_keys` is not an instance of `list`.
ValueError: If `public_keys` is an empty list.
"""
threshold = len(public_keys)
if not isinstance(amount, int):
raise TypeError('`amount` must be a int')
if amount < 1:
raise AmountError('`amount` needs to be greater than zero')
if not isinstance(public_keys, list):
raise TypeError('`public_keys` must be an instance of list')
if len(public_keys) == 0:
raise ValueError('`public_keys` needs to contain at least one'
'owner')
elif len(public_keys) == 1 and not isinstance(public_keys[0], list):
if isinstance(public_keys[0], Fulfillment):
ffill = public_keys[0]
else:
ffill = Ed25519Sha256(
public_key=base58.b58decode(public_keys[0]))
return cls(ffill, public_keys, amount=amount)
else:
initial_cond = ThresholdSha256(threshold=threshold)
threshold_cond = reduce(cls._gen_condition, public_keys,
initial_cond)
return cls(threshold_cond, public_keys, amount=amount)
|
Generates a Output from a specifically formed tuple or list.
Note:
If a ThresholdCondition has to be generated where the threshold
is always the number of subconditions it is split between, a
list of the following structure is sufficient:
[(address|condition)*, [(address|condition)*, ...], ...]
Args:
public_keys (:obj:`list` of :obj:`str`): The public key of
the users that should be able to fulfill the Condition
that is being created.
amount (:obj:`int`): The amount locked by the Output.
Returns:
An Output that can be used in a Transaction.
Raises:
TypeError: If `public_keys` is not an instance of `list`.
ValueError: If `public_keys` is an empty list.
|
generate
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def _gen_condition(cls, initial, new_public_keys):
"""Generates ThresholdSha256 conditions from a list of new owners.
Note:
This method is intended only to be used with a reduce function.
For a description on how to use this method, see
:meth:`~.Output.generate`.
Args:
initial (:class:`cryptoconditions.ThresholdSha256`):
A Condition representing the overall root.
new_public_keys (:obj:`list` of :obj:`str`|str): A list of new
owners or a single new owner.
Returns:
:class:`cryptoconditions.ThresholdSha256`:
"""
try:
threshold = len(new_public_keys)
except TypeError:
threshold = None
if isinstance(new_public_keys, list) and len(new_public_keys) > 1:
ffill = ThresholdSha256(threshold=threshold)
reduce(cls._gen_condition, new_public_keys, ffill)
elif isinstance(new_public_keys, list) and len(new_public_keys) <= 1:
raise ValueError('Sublist cannot contain single owner')
else:
try:
new_public_keys = new_public_keys.pop()
except AttributeError:
pass
# NOTE: Instead of submitting base58 encoded addresses, a user
# of this class can also submit fully instantiated
# Cryptoconditions. In the case of casting
# `new_public_keys` to a Ed25519Fulfillment with the
# result of a `TypeError`, we're assuming that
# `new_public_keys` is a Cryptocondition then.
if isinstance(new_public_keys, Fulfillment):
ffill = new_public_keys
else:
ffill = Ed25519Sha256(
public_key=base58.b58decode(new_public_keys))
initial.add_subfulfillment(ffill)
return initial
|
Generates ThresholdSha256 conditions from a list of new owners.
Note:
This method is intended only to be used with a reduce function.
For a description on how to use this method, see
:meth:`~.Output.generate`.
Args:
initial (:class:`cryptoconditions.ThresholdSha256`):
A Condition representing the overall root.
new_public_keys (:obj:`list` of :obj:`str`|str): A list of new
owners or a single new owner.
Returns:
:class:`cryptoconditions.ThresholdSha256`:
|
_gen_condition
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def from_dict(cls, data):
"""Transforms a Python dictionary to an Output object.
Note:
To pass a serialization cycle multiple times, a
Cryptoconditions Fulfillment needs to be present in the
passed-in dictionary, as Condition URIs are not serializable
anymore.
Args:
data (dict): The dict to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Output`
"""
try:
fulfillment = _fulfillment_from_details(data['condition']['details'])
except KeyError:
# NOTE: Hashlock condition case
fulfillment = data['condition']['uri']
try:
amount = int(data['amount'])
except ValueError:
raise AmountError('Invalid amount: %s' % data['amount'])
return cls(fulfillment, data['public_keys'], amount)
|
Transforms a Python dictionary to an Output object.
Note:
To pass a serialization cycle multiple times, a
Cryptoconditions Fulfillment needs to be present in the
passed-in dictionary, as Condition URIs are not serializable
anymore.
Args:
data (dict): The dict to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Output`
|
from_dict
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def __init__(self, operation, asset, inputs=None, outputs=None,
metadata=None, version=None, hash_id=None, tx_dict=None):
"""The constructor allows to create a customizable Transaction.
Note:
When no `version` is provided, one is being
generated by this method.
Args:
operation (str): Defines the operation of the Transaction.
asset (dict): Asset payload for this Transaction.
inputs (:obj:`list` of :class:`~bigchaindb.common.
transaction.Input`, optional): Define the assets to
outputs (:obj:`list` of :class:`~bigchaindb.common.
transaction.Output`, optional): Define the assets to
lock.
metadata (dict): Metadata to be stored along with the
Transaction.
version (string): Defines the version number of a Transaction.
hash_id (string): Hash id of the transaction.
"""
if operation not in self.ALLOWED_OPERATIONS:
allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS)
raise ValueError('`operation` must be one of {}'
.format(allowed_ops))
# Asset payloads for 'CREATE' operations must be None or
# dicts holding a `data` property. Asset payloads for 'TRANSFER'
# operations must be dicts holding an `id` property.
if (operation == self.CREATE and
asset is not None and not (isinstance(asset, dict) and 'data' in asset)):
raise TypeError(('`asset` must be None or a dict holding a `data` '
" property instance for '{}' Transactions".format(operation)))
elif (operation == self.TRANSFER and
not (isinstance(asset, dict) and 'id' in asset)):
raise TypeError(('`asset` must be a dict holding an `id` property '
'for \'TRANSFER\' Transactions'))
if outputs and not isinstance(outputs, list):
raise TypeError('`outputs` must be a list instance or None')
if inputs and not isinstance(inputs, list):
raise TypeError('`inputs` must be a list instance or None')
if metadata is not None and not isinstance(metadata, dict):
raise TypeError('`metadata` must be a dict or None')
self.version = version if version is not None else self.VERSION
self.operation = operation
self.asset = asset
self.inputs = inputs or []
self.outputs = outputs or []
self.metadata = metadata
self._id = hash_id
self.tx_dict = tx_dict
|
The constructor allows to create a customizable Transaction.
Note:
When no `version` is provided, one is being
generated by this method.
Args:
operation (str): Defines the operation of the Transaction.
asset (dict): Asset payload for this Transaction.
inputs (:obj:`list` of :class:`~bigchaindb.common.
transaction.Input`, optional): Define the assets to
outputs (:obj:`list` of :class:`~bigchaindb.common.
transaction.Output`, optional): Define the assets to
lock.
metadata (dict): Metadata to be stored along with the
Transaction.
version (string): Defines the version number of a Transaction.
hash_id (string): Hash id of the transaction.
|
__init__
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def unspent_outputs(self):
"""UnspentOutput: The outputs of this transaction, in a data
structure containing relevant information for storing them in
a UTXO set, and performing validation.
"""
if self.operation == self.CREATE:
self._asset_id = self._id
elif self.operation == self.TRANSFER:
self._asset_id = self.asset['id']
return (UnspentOutput(
transaction_id=self._id,
output_index=output_index,
amount=output.amount,
asset_id=self._asset_id,
condition_uri=output.fulfillment.condition_uri,
) for output_index, output in enumerate(self.outputs))
|
UnspentOutput: The outputs of this transaction, in a data
structure containing relevant information for storing them in
a UTXO set, and performing validation.
|
unspent_outputs
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def spent_outputs(self):
"""Tuple of :obj:`dict`: Inputs of this transaction. Each input
is represented as a dictionary containing a transaction id and
output index.
"""
return (
input_.fulfills.to_dict()
for input_ in self.inputs if input_.fulfills
)
|
Tuple of :obj:`dict`: Inputs of this transaction. Each input
is represented as a dictionary containing a transaction id and
output index.
|
spent_outputs
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def create(cls, tx_signers, recipients, metadata=None, asset=None):
"""A simple way to generate a `CREATE` transaction.
Note:
This method currently supports the following Cryptoconditions
use cases:
- Ed25519
- ThresholdSha256
Additionally, it provides support for the following BigchainDB
use cases:
- Multiple inputs and outputs.
Args:
tx_signers (:obj:`list` of :obj:`str`): A list of keys that
represent the signers of the CREATE Transaction.
recipients (:obj:`list` of :obj:`tuple`): A list of
([keys],amount) that represent the recipients of this
Transaction.
metadata (dict): The metadata to be stored along with the
Transaction.
asset (dict): The metadata associated with the asset that will
be created in this Transaction.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
"""
(inputs, outputs) = cls.validate_create(tx_signers, recipients, asset, metadata)
return cls(cls.CREATE, {'data': asset}, inputs, outputs, metadata)
|
A simple way to generate a `CREATE` transaction.
Note:
This method currently supports the following Cryptoconditions
use cases:
- Ed25519
- ThresholdSha256
Additionally, it provides support for the following BigchainDB
use cases:
- Multiple inputs and outputs.
Args:
tx_signers (:obj:`list` of :obj:`str`): A list of keys that
represent the signers of the CREATE Transaction.
recipients (:obj:`list` of :obj:`tuple`): A list of
([keys],amount) that represent the recipients of this
Transaction.
metadata (dict): The metadata to be stored along with the
Transaction.
asset (dict): The metadata associated with the asset that will
be created in this Transaction.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
|
create
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def transfer(cls, inputs, recipients, asset_id, metadata=None):
"""A simple way to generate a `TRANSFER` transaction.
Note:
Different cases for threshold conditions:
Combining multiple `inputs` with an arbitrary number of
`recipients` can yield interesting cases for the creation of
threshold conditions we'd like to support. The following
notation is proposed:
1. The index of a `recipient` corresponds to the index of
an input:
e.g. `transfer([input1], [a])`, means `input1` would now be
owned by user `a`.
2. `recipients` can (almost) get arbitrary deeply nested,
creating various complex threshold conditions:
e.g. `transfer([inp1, inp2], [[a, [b, c]], d])`, means
`a`'s signature would have a 50% weight on `inp1`
compared to `b` and `c` that share 25% of the leftover
weight respectively. `inp2` is owned completely by `d`.
Args:
inputs (:obj:`list` of :class:`~bigchaindb.common.transaction.
Input`): Converted `Output`s, intended to
be used as inputs in the transfer to generate.
recipients (:obj:`list` of :obj:`tuple`): A list of
([keys],amount) that represent the recipients of this
Transaction.
asset_id (str): The asset ID of the asset to be transferred in
this Transaction.
metadata (dict): Python dictionary to be stored along with the
Transaction.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
"""
(inputs, outputs) = cls.validate_transfer(inputs, recipients, asset_id, metadata)
return cls(cls.TRANSFER, {'id': asset_id}, inputs, outputs, metadata)
|
A simple way to generate a `TRANSFER` transaction.
Note:
Different cases for threshold conditions:
Combining multiple `inputs` with an arbitrary number of
`recipients` can yield interesting cases for the creation of
threshold conditions we'd like to support. The following
notation is proposed:
1. The index of a `recipient` corresponds to the index of
an input:
e.g. `transfer([input1], [a])`, means `input1` would now be
owned by user `a`.
2. `recipients` can (almost) get arbitrary deeply nested,
creating various complex threshold conditions:
e.g. `transfer([inp1, inp2], [[a, [b, c]], d])`, means
`a`'s signature would have a 50% weight on `inp1`
compared to `b` and `c` that share 25% of the leftover
weight respectively. `inp2` is owned completely by `d`.
Args:
inputs (:obj:`list` of :class:`~bigchaindb.common.transaction.
Input`): Converted `Output`s, intended to
be used as inputs in the transfer to generate.
recipients (:obj:`list` of :obj:`tuple`): A list of
([keys],amount) that represent the recipients of this
Transaction.
asset_id (str): The asset ID of the asset to be transferred in
this Transaction.
metadata (dict): Python dictionary to be stored along with the
Transaction.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
|
transfer
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def to_inputs(self, indices=None):
"""Converts a Transaction's outputs to spendable inputs.
Note:
Takes the Transaction's outputs and derives inputs
from that can then be passed into `Transaction.transfer` as
`inputs`.
A list of integers can be passed to `indices` that
defines which outputs should be returned as inputs.
If no `indices` are passed (empty list or None) all
outputs of the Transaction are returned.
Args:
indices (:obj:`list` of int): Defines which
outputs should be returned as inputs.
Returns:
:obj:`list` of :class:`~bigchaindb.common.transaction.
Input`
"""
# NOTE: If no indices are passed, we just assume to take all outputs
# as inputs.
indices = indices or range(len(self.outputs))
return [
Input(self.outputs[idx].fulfillment,
self.outputs[idx].public_keys,
TransactionLink(self.id, idx))
for idx in indices
]
|
Converts a Transaction's outputs to spendable inputs.
Note:
Takes the Transaction's outputs and derives inputs
from that can then be passed into `Transaction.transfer` as
`inputs`.
A list of integers can be passed to `indices` that
defines which outputs should be returned as inputs.
If no `indices` are passed (empty list or None) all
outputs of the Transaction are returned.
Args:
indices (:obj:`list` of int): Defines which
outputs should be returned as inputs.
Returns:
:obj:`list` of :class:`~bigchaindb.common.transaction.
Input`
|
to_inputs
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def add_input(self, input_):
"""Adds an input to a Transaction's list of inputs.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`): An Input to be added to the Transaction.
"""
if not isinstance(input_, Input):
raise TypeError('`input_` must be a Input instance')
self.inputs.append(input_)
|
Adds an input to a Transaction's list of inputs.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`): An Input to be added to the Transaction.
|
add_input
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def add_output(self, output):
"""Adds an output to a Transaction's list of outputs.
Args:
output (:class:`~bigchaindb.common.transaction.
Output`): An Output to be added to the
Transaction.
"""
if not isinstance(output, Output):
raise TypeError('`output` must be an Output instance or None')
self.outputs.append(output)
|
Adds an output to a Transaction's list of outputs.
Args:
output (:class:`~bigchaindb.common.transaction.
Output`): An Output to be added to the
Transaction.
|
add_output
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def sign(self, private_keys):
"""Fulfills a previous Transaction's Output by signing Inputs.
Note:
This method works only for the following Cryptoconditions
currently:
- Ed25519Fulfillment
- ThresholdSha256
Furthermore, note that all keys required to fully sign the
Transaction have to be passed to this method. A subset of all
will cause this method to fail.
Args:
private_keys (:obj:`list` of :obj:`str`): A complete list of
all private keys needed to sign all Fulfillments of this
Transaction.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
"""
# TODO: Singing should be possible with at least one of all private
# keys supplied to this method.
if private_keys is None or not isinstance(private_keys, list):
raise TypeError('`private_keys` must be a list instance')
# NOTE: Generate public keys from private keys and match them in a
# dictionary:
# key: public_key
# value: private_key
def gen_public_key(private_key):
# TODO FOR CC: Adjust interface so that this function becomes
# unnecessary
# cc now provides a single method `encode` to return the key
# in several different encodings.
public_key = private_key.get_verifying_key().encode()
# Returned values from cc are always bytestrings so here we need
# to decode to convert the bytestring into a python str
return public_key.decode()
key_pairs = {gen_public_key(PrivateKey(private_key)):
PrivateKey(private_key) for private_key in private_keys}
tx_dict = self.to_dict()
tx_dict = Transaction._remove_signatures(tx_dict)
tx_serialized = Transaction._to_str(tx_dict)
for i, input_ in enumerate(self.inputs):
self.inputs[i] = self._sign_input(input_, tx_serialized, key_pairs)
self._hash()
return self
|
Fulfills a previous Transaction's Output by signing Inputs.
Note:
This method works only for the following Cryptoconditions
currently:
- Ed25519Fulfillment
- ThresholdSha256
Furthermore, note that all keys required to fully sign the
Transaction have to be passed to this method. A subset of all
will cause this method to fail.
Args:
private_keys (:obj:`list` of :obj:`str`): A complete list of
all private keys needed to sign all Fulfillments of this
Transaction.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
|
sign
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def _sign_input(cls, input_, message, key_pairs):
"""Signs a single Input.
Note:
This method works only for the following Cryptoconditions
currently:
- Ed25519Fulfillment
- ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
"""
if isinstance(input_.fulfillment, Ed25519Sha256):
return cls._sign_simple_signature_fulfillment(input_, message,
key_pairs)
elif isinstance(input_.fulfillment, ThresholdSha256):
return cls._sign_threshold_signature_fulfillment(input_, message,
key_pairs)
else:
raise ValueError(
'Fulfillment couldn\'t be matched to '
'Cryptocondition fulfillment type.')
|
Signs a single Input.
Note:
This method works only for the following Cryptoconditions
currently:
- Ed25519Fulfillment
- ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
|
_sign_input
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs):
"""Signs a Ed25519Fulfillment.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
"""
# NOTE: To eliminate the dangers of accidentally signing a condition by
# reference, we remove the reference of input_ here
# intentionally. If the user of this class knows how to use it,
# this should never happen, but then again, never say never.
input_ = deepcopy(input_)
public_key = input_.owners_before[0]
message = sha3_256(message.encode())
if input_.fulfills:
message.update('{}{}'.format(
input_.fulfills.txid, input_.fulfills.output).encode())
try:
# cryptoconditions makes no assumptions of the encoding of the
# message to sign or verify. It only accepts bytestrings
input_.fulfillment.sign(
message.digest(), base58.b58decode(key_pairs[public_key].encode()))
except KeyError:
raise KeypairMismatchException('Public key {} is not a pair to '
'any of the private keys'
.format(public_key))
return input_
|
Signs a Ed25519Fulfillment.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
|
_sign_simple_signature_fulfillment
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def _sign_threshold_signature_fulfillment(cls, input_, message, key_pairs):
"""Signs a ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
"""
input_ = deepcopy(input_)
message = sha3_256(message.encode())
if input_.fulfills:
message.update('{}{}'.format(
input_.fulfills.txid, input_.fulfills.output).encode())
for owner_before in set(input_.owners_before):
# TODO: CC should throw a KeypairMismatchException, instead of
# our manual mapping here
# TODO FOR CC: Naming wise this is not so smart,
# `get_subcondition` in fact doesn't return a
# condition but a fulfillment
# TODO FOR CC: `get_subcondition` is singular. One would not
# expect to get a list back.
ccffill = input_.fulfillment
subffills = ccffill.get_subcondition_from_vk(
base58.b58decode(owner_before))
if not subffills:
raise KeypairMismatchException('Public key {} cannot be found '
'in the fulfillment'
.format(owner_before))
try:
private_key = key_pairs[owner_before]
except KeyError:
raise KeypairMismatchException('Public key {} is not a pair '
'to any of the private keys'
.format(owner_before))
# cryptoconditions makes no assumptions of the encoding of the
# message to sign or verify. It only accepts bytestrings
for subffill in subffills:
subffill.sign(
message.digest(), base58.b58decode(private_key.encode()))
return input_
|
Signs a ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
|
_sign_threshold_signature_fulfillment
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def _inputs_valid(self, output_condition_uris):
"""Validates an Input against a given set of Outputs.
Note:
The number of `output_condition_uris` must be equal to the
number of Inputs a Transaction has.
Args:
output_condition_uris (:obj:`list` of :obj:`str`): A list of
Outputs to check the Inputs against.
Returns:
bool: If all Outputs are valid.
"""
if len(self.inputs) != len(output_condition_uris):
raise ValueError('Inputs and '
'output_condition_uris must have the same count')
tx_dict = self.tx_dict if self.tx_dict else self.to_dict()
tx_dict = Transaction._remove_signatures(tx_dict)
tx_dict['id'] = None
tx_serialized = Transaction._to_str(tx_dict)
def validate(i, output_condition_uri=None):
"""Validate input against output condition URI"""
return self._input_valid(self.inputs[i], self.operation,
tx_serialized, output_condition_uri)
return all(validate(i, cond)
for i, cond in enumerate(output_condition_uris))
|
Validates an Input against a given set of Outputs.
Note:
The number of `output_condition_uris` must be equal to the
number of Inputs a Transaction has.
Args:
output_condition_uris (:obj:`list` of :obj:`str`): A list of
Outputs to check the Inputs against.
Returns:
bool: If all Outputs are valid.
|
_inputs_valid
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def _input_valid(self, input_, operation, message, output_condition_uri=None):
"""Validates a single Input against a single Output.
Note:
In case of a `CREATE` Transaction, this method
does not validate against `output_condition_uri`.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
operation (str): The type of Transaction.
message (str): The fulfillment message.
output_condition_uri (str, optional): An Output to check the
Input against.
Returns:
bool: If the Input is valid.
"""
ccffill = input_.fulfillment
try:
parsed_ffill = Fulfillment.from_uri(ccffill.serialize_uri())
except (TypeError, ValueError,
ParsingError, ASN1DecodeError, ASN1EncodeError):
return False
if operation == self.CREATE:
# NOTE: In the case of a `CREATE` transaction, the
# output is always valid.
output_valid = True
else:
output_valid = output_condition_uri == ccffill.condition_uri
message = sha3_256(message.encode())
if input_.fulfills:
message.update('{}{}'.format(
input_.fulfills.txid, input_.fulfills.output).encode())
# NOTE: We pass a timestamp to `.validate`, as in case of a timeout
# condition we'll have to validate against it
# cryptoconditions makes no assumptions of the encoding of the
# message to sign or verify. It only accepts bytestrings
ffill_valid = parsed_ffill.validate(message=message.digest())
return output_valid and ffill_valid
|
Validates a single Input against a single Output.
Note:
In case of a `CREATE` Transaction, this method
does not validate against `output_condition_uri`.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
operation (str): The type of Transaction.
message (str): The fulfillment message.
output_condition_uri (str, optional): An Output to check the
Input against.
Returns:
bool: If the Input is valid.
|
_input_valid
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def to_dict(self):
"""Transforms the object to a Python dictionary.
Returns:
dict: The Transaction as an alternative serialization format.
"""
return {
'inputs': [input_.to_dict() for input_ in self.inputs],
'outputs': [output.to_dict() for output in self.outputs],
'operation': str(self.operation),
'metadata': self.metadata,
'asset': self.asset,
'version': self.version,
'id': self._id,
}
|
Transforms the object to a Python dictionary.
Returns:
dict: The Transaction as an alternative serialization format.
|
to_dict
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def _remove_signatures(tx_dict):
"""Takes a Transaction dictionary and removes all signatures.
Args:
tx_dict (dict): The Transaction to remove all signatures from.
Returns:
dict
"""
# NOTE: We remove the reference since we need `tx_dict` only for the
# transaction's hash
tx_dict = deepcopy(tx_dict)
for input_ in tx_dict['inputs']:
# NOTE: Not all Cryptoconditions return a `signature` key (e.g.
# ThresholdSha256), so setting it to `None` in any
# case could yield incorrect signatures. This is why we only
# set it to `None` if it's set in the dict.
input_['fulfillment'] = None
return tx_dict
|
Takes a Transaction dictionary and removes all signatures.
Args:
tx_dict (dict): The Transaction to remove all signatures from.
Returns:
dict
|
_remove_signatures
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def get_asset_id(cls, transactions):
"""Get the asset id from a list of :class:`~.Transactions`.
This is useful when we want to check if the multiple inputs of a
transaction are related to the same asset id.
Args:
transactions (:obj:`list` of :class:`~bigchaindb.common.
transaction.Transaction`): A list of Transactions.
Usually input Transactions that should have a matching
asset ID.
Returns:
str: ID of the asset.
Raises:
:exc:`AssetIdMismatch`: If the inputs are related to different
assets.
"""
if not isinstance(transactions, list):
transactions = [transactions]
# create a set of the transactions' asset ids
asset_ids = {tx.id if tx.operation == tx.CREATE
else tx.asset['id']
for tx in transactions}
# check that all the transasctions have the same asset id
if len(asset_ids) > 1:
raise AssetIdMismatch(('All inputs of all transactions passed'
' need to have the same asset id'))
return asset_ids.pop()
|
Get the asset id from a list of :class:`~.Transactions`.
This is useful when we want to check if the multiple inputs of a
transaction are related to the same asset id.
Args:
transactions (:obj:`list` of :class:`~bigchaindb.common.
transaction.Transaction`): A list of Transactions.
Usually input Transactions that should have a matching
asset ID.
Returns:
str: ID of the asset.
Raises:
:exc:`AssetIdMismatch`: If the inputs are related to different
assets.
|
get_asset_id
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def validate_id(tx_body):
"""Validate the transaction ID of a transaction
Args:
tx_body (dict): The Transaction to be transformed.
"""
# NOTE: Remove reference to avoid side effects
# tx_body = deepcopy(tx_body)
tx_body = rapidjson.loads(rapidjson.dumps(tx_body))
try:
proposed_tx_id = tx_body['id']
except KeyError:
raise InvalidHash('No transaction id found!')
tx_body['id'] = None
tx_body_serialized = Transaction._to_str(tx_body)
valid_tx_id = Transaction._to_hash(tx_body_serialized)
if proposed_tx_id != valid_tx_id:
err_msg = ("The transaction's id '{}' isn't equal to "
"the hash of its body, i.e. it's not valid.")
raise InvalidHash(err_msg.format(proposed_tx_id))
|
Validate the transaction ID of a transaction
Args:
tx_body (dict): The Transaction to be transformed.
|
validate_id
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def from_dict(cls, tx, skip_schema_validation=True):
"""Transforms a Python dictionary to a Transaction object.
Args:
tx_body (dict): The Transaction to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
"""
operation = tx.get('operation', Transaction.CREATE) if isinstance(tx, dict) else Transaction.CREATE
cls = Transaction.resolve_class(operation)
if not skip_schema_validation:
cls.validate_id(tx)
cls.validate_schema(tx)
inputs = [Input.from_dict(input_) for input_ in tx['inputs']]
outputs = [Output.from_dict(output) for output in tx['outputs']]
return cls(tx['operation'], tx['asset'], inputs, outputs,
tx['metadata'], tx['version'], hash_id=tx['id'], tx_dict=tx)
|
Transforms a Python dictionary to a Transaction object.
Args:
tx_body (dict): The Transaction to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
|
from_dict
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def from_db(cls, bigchain, tx_dict_list):
"""Helper method that reconstructs a transaction dict that was returned
from the database. It checks what asset_id to retrieve, retrieves the
asset from the asset table and reconstructs the transaction.
Args:
bigchain (:class:`~bigchaindb.tendermint.BigchainDB`): An instance
of BigchainDB used to perform database queries.
tx_dict_list (:list:`dict` or :obj:`dict`): The transaction dict or
list of transaction dict as returned from the database.
Returns:
:class:`~Transaction`
"""
return_list = True
if isinstance(tx_dict_list, dict):
tx_dict_list = [tx_dict_list]
return_list = False
tx_map = {}
tx_ids = []
for tx in tx_dict_list:
tx.update({'metadata': None})
tx_map[tx['id']] = tx
tx_ids.append(tx['id'])
assets = list(bigchain.get_assets(tx_ids))
for asset in assets:
if asset is not None:
tx = tx_map[asset['id']]
del asset['id']
tx['asset'] = asset
tx_ids = list(tx_map.keys())
metadata_list = list(bigchain.get_metadata(tx_ids))
for metadata in metadata_list:
tx = tx_map[metadata['id']]
tx.update({'metadata': metadata.get('metadata')})
if return_list:
tx_list = []
for tx_id, tx in tx_map.items():
tx_list.append(cls.from_dict(tx))
return tx_list
else:
tx = list(tx_map.values())[0]
return cls.from_dict(tx)
|
Helper method that reconstructs a transaction dict that was returned
from the database. It checks what asset_id to retrieve, retrieves the
asset from the asset table and reconstructs the transaction.
Args:
bigchain (:class:`~bigchaindb.tendermint.BigchainDB`): An instance
of BigchainDB used to perform database queries.
tx_dict_list (:list:`dict` or :obj:`dict`): The transaction dict or
list of transaction dict as returned from the database.
Returns:
:class:`~Transaction`
|
from_db
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def resolve_class(operation):
"""For the given `tx` based on the `operation` key return its implementation class"""
create_txn_class = Transaction.type_registry.get(Transaction.CREATE)
return Transaction.type_registry.get(operation, create_txn_class)
|
For the given `tx` based on the `operation` key return its implementation class
|
resolve_class
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/transaction.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/transaction.py
|
Apache-2.0
|
def validate_txn_obj(obj_name, obj, key, validation_fun):
"""Validate value of `key` in `obj` using `validation_fun`.
Args:
obj_name (str): name for `obj` being validated.
obj (dict): dictionary object.
key (str): key to be validated in `obj`.
validation_fun (function): function used to validate the value
of `key`.
Returns:
None: indicates validation successful
Raises:
ValidationError: `validation_fun` will raise exception on failure
"""
backend = bigchaindb.config['database']['backend']
if backend == 'localmongodb':
data = obj.get(key, {})
if isinstance(data, dict):
validate_all_keys_in_obj(obj_name, data, validation_fun)
elif isinstance(data, list):
validate_all_items_in_list(obj_name, data, validation_fun)
|
Validate value of `key` in `obj` using `validation_fun`.
Args:
obj_name (str): name for `obj` being validated.
obj (dict): dictionary object.
key (str): key to be validated in `obj`.
validation_fun (function): function used to validate the value
of `key`.
Returns:
None: indicates validation successful
Raises:
ValidationError: `validation_fun` will raise exception on failure
|
validate_txn_obj
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/utils.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/utils.py
|
Apache-2.0
|
def validate_all_keys_in_obj(obj_name, obj, validation_fun):
"""Validate all (nested) keys in `obj` by using `validation_fun`.
Args:
obj_name (str): name for `obj` being validated.
obj (dict): dictionary object.
validation_fun (function): function used to validate the value
of `key`.
Returns:
None: indicates validation successful
Raises:
ValidationError: `validation_fun` will raise this error on failure
"""
for key, value in obj.items():
validation_fun(obj_name, key)
if isinstance(value, dict):
validate_all_keys_in_obj(obj_name, value, validation_fun)
elif isinstance(value, list):
validate_all_items_in_list(obj_name, value, validation_fun)
|
Validate all (nested) keys in `obj` by using `validation_fun`.
Args:
obj_name (str): name for `obj` being validated.
obj (dict): dictionary object.
validation_fun (function): function used to validate the value
of `key`.
Returns:
None: indicates validation successful
Raises:
ValidationError: `validation_fun` will raise this error on failure
|
validate_all_keys_in_obj
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/utils.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/utils.py
|
Apache-2.0
|
def validate_all_values_for_key_in_obj(obj, key, validation_fun):
"""Validate value for all (nested) occurrence of `key` in `obj`
using `validation_fun`.
Args:
obj (dict): dictionary object.
key (str): key whose value is to be validated.
validation_fun (function): function used to validate the value
of `key`.
Raises:
ValidationError: `validation_fun` will raise this error on failure
"""
for vkey, value in obj.items():
if vkey == key:
validation_fun(value)
elif isinstance(value, dict):
validate_all_values_for_key_in_obj(value, key, validation_fun)
elif isinstance(value, list):
validate_all_values_for_key_in_list(value, key, validation_fun)
|
Validate value for all (nested) occurrence of `key` in `obj`
using `validation_fun`.
Args:
obj (dict): dictionary object.
key (str): key whose value is to be validated.
validation_fun (function): function used to validate the value
of `key`.
Raises:
ValidationError: `validation_fun` will raise this error on failure
|
validate_all_values_for_key_in_obj
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/utils.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/utils.py
|
Apache-2.0
|
def validate_transaction_schema(tx):
"""Validate a transaction dict.
TX_SCHEMA_COMMON contains properties that are common to all types of
transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top.
"""
_validate_schema(TX_SCHEMA_COMMON, tx)
if tx['operation'] == 'TRANSFER':
_validate_schema(TX_SCHEMA_TRANSFER, tx)
else:
_validate_schema(TX_SCHEMA_CREATE, tx)
|
Validate a transaction dict.
TX_SCHEMA_COMMON contains properties that are common to all types of
transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top.
|
validate_transaction_schema
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/common/schema/__init__.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/common/schema/__init__.py
|
Apache-2.0
|
def get_validator_change(cls, bigchain):
"""Return the validator set from the most recent approved block
:return: {
'height': <block_height>,
'validators': <validator_set>
}
"""
latest_block = bigchain.get_latest_block()
if latest_block is None:
return None
return bigchain.get_validator_change(latest_block['height'])
|
Return the validator set from the most recent approved block
:return: {
'height': <block_height>,
'validators': <validator_set>
}
|
get_validator_change
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/elections/election.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/elections/election.py
|
Apache-2.0
|
def get_validators(cls, bigchain, height=None):
"""Return a dictionary of validators with key as `public_key` and
value as the `voting_power`
"""
validators = {}
for validator in bigchain.get_validators(height):
# NOTE: we assume that Tendermint encodes public key in base64
public_key = public_key_from_ed25519_key(key_from_base64(validator['public_key']['value']))
validators[public_key] = validator['voting_power']
return validators
|
Return a dictionary of validators with key as `public_key` and
value as the `voting_power`
|
get_validators
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/elections/election.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/elections/election.py
|
Apache-2.0
|
def recipients(cls, bigchain):
"""Convert validator dictionary to a recipient list for `Transaction`"""
recipients = []
for public_key, voting_power in cls.get_validators(bigchain).items():
recipients.append(([public_key], voting_power))
return recipients
|
Convert validator dictionary to a recipient list for `Transaction`
|
recipients
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/elections/election.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/elections/election.py
|
Apache-2.0
|
def validate(self, bigchain, current_transactions=[]):
"""Validate election transaction
NOTE:
* A valid election is initiated by an existing validator.
* A valid election is one where voters are validators and votes are
allocated according to the voting power of each validator node.
Args:
:param bigchain: (BigchainDB) an instantiated bigchaindb.lib.BigchainDB object.
:param current_transactions: (list) A list of transactions to be validated along with the election
Returns:
Election: a Election object or an object of the derived Election subclass.
Raises:
ValidationError: If the election is invalid
"""
input_conditions = []
duplicates = any(txn for txn in current_transactions if txn.id == self.id)
if bigchain.is_committed(self.id) or duplicates:
raise DuplicateTransaction('transaction `{}` already exists'
.format(self.id))
if not self.inputs_valid(input_conditions):
raise InvalidSignature('Transaction signature is invalid.')
current_validators = self.get_validators(bigchain)
# NOTE: Proposer should be a single node
if len(self.inputs) != 1 or len(self.inputs[0].owners_before) != 1:
raise MultipleInputsError('`tx_signers` must be a list instance of length one')
# NOTE: Check if the proposer is a validator.
[election_initiator_node_pub_key] = self.inputs[0].owners_before
if election_initiator_node_pub_key not in current_validators.keys():
raise InvalidProposer('Public key is not a part of the validator set')
# NOTE: Check if all validators have been assigned votes equal to their voting power
if not self.is_same_topology(current_validators, self.outputs):
raise UnequalValidatorSet('Validator set much be exactly same to the outputs of election')
return self
|
Validate election transaction
NOTE:
* A valid election is initiated by an existing validator.
* A valid election is one where voters are validators and votes are
allocated according to the voting power of each validator node.
Args:
:param bigchain: (BigchainDB) an instantiated bigchaindb.lib.BigchainDB object.
:param current_transactions: (list) A list of transactions to be validated along with the election
Returns:
Election: a Election object or an object of the derived Election subclass.
Raises:
ValidationError: If the election is invalid
|
validate
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/elections/election.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/elections/election.py
|
Apache-2.0
|
def validate_schema(cls, tx):
"""Validate the election transaction. Since `ELECTION` extends `CREATE` transaction, all the validations for
`CREATE` transaction should be inherited
"""
_validate_schema(TX_SCHEMA_COMMON, tx)
_validate_schema(TX_SCHEMA_CREATE, tx)
if cls.TX_SCHEMA_CUSTOM:
_validate_schema(cls.TX_SCHEMA_CUSTOM, tx)
|
Validate the election transaction. Since `ELECTION` extends `CREATE` transaction, all the validations for
`CREATE` transaction should be inherited
|
validate_schema
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/elections/election.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/elections/election.py
|
Apache-2.0
|
def has_concluded(self, bigchain, current_votes=[]):
"""Check if the election can be concluded or not.
* Elections can only be concluded if the validator set has not changed
since the election was initiated.
* Elections can be concluded only if the current votes form a supermajority.
Custom elections may override this function and introduce additional checks.
"""
if self.has_validator_set_changed(bigchain):
return False
election_pk = self.to_public_key(self.id)
votes_committed = self.get_commited_votes(bigchain, election_pk)
votes_current = self.count_votes(election_pk, current_votes)
total_votes = sum(output.amount for output in self.outputs)
if (votes_committed < (2/3) * total_votes) and \
(votes_committed + votes_current >= (2/3)*total_votes):
return True
return False
|
Check if the election can be concluded or not.
* Elections can only be concluded if the validator set has not changed
since the election was initiated.
* Elections can be concluded only if the current votes form a supermajority.
Custom elections may override this function and introduce additional checks.
|
has_concluded
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/elections/election.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/elections/election.py
|
Apache-2.0
|
def process_block(cls, bigchain, new_height, txns):
"""Looks for election and vote transactions inside the block, records
and processes elections.
Every election is recorded in the database.
Every vote has a chance to conclude the corresponding election. When
an election is concluded, the corresponding database record is
marked as such.
Elections and votes are processed in the order in which they
appear in the block. Elections are concluded in the order of
appearance of their first votes in the block.
For every election concluded in the block, calls its `on_approval`
method. The returned value of the last `on_approval`, if any,
is a validator set update to be applied in one of the following blocks.
`on_approval` methods are implemented by elections of particular type.
The method may contain side effects but should be idempotent. To account
for other concluded elections, if it requires so, the method should
rely on the database state.
"""
# elections initiated in this block
initiated_elections = cls._get_initiated_elections(new_height, txns)
if initiated_elections:
bigchain.store_elections(initiated_elections)
# elections voted for in this block and their votes
elections = cls._get_votes(txns)
validator_update = None
for election_id, votes in elections.items():
election = bigchain.get_transaction(election_id)
if election is None:
continue
if not election.has_concluded(bigchain, votes):
continue
validator_update = election.on_approval(bigchain, new_height)
election.store(bigchain, new_height, is_concluded=True)
return [validator_update] if validator_update else []
|
Looks for election and vote transactions inside the block, records
and processes elections.
Every election is recorded in the database.
Every vote has a chance to conclude the corresponding election. When
an election is concluded, the corresponding database record is
marked as such.
Elections and votes are processed in the order in which they
appear in the block. Elections are concluded in the order of
appearance of their first votes in the block.
For every election concluded in the block, calls its `on_approval`
method. The returned value of the last `on_approval`, if any,
is a validator set update to be applied in one of the following blocks.
`on_approval` methods are implemented by elections of particular type.
The method may contain side effects but should be idempotent. To account
for other concluded elections, if it requires so, the method should
rely on the database state.
|
process_block
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/elections/election.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/elections/election.py
|
Apache-2.0
|
def validate_schema(cls, tx):
"""Validate the validator election vote transaction. Since `VOTE` extends `TRANSFER`
transaction, all the validations for `CREATE` transaction should be inherited
"""
_validate_schema(TX_SCHEMA_COMMON, tx)
_validate_schema(TX_SCHEMA_TRANSFER, tx)
_validate_schema(cls.TX_SCHEMA_CUSTOM, tx)
|
Validate the validator election vote transaction. Since `VOTE` extends `TRANSFER`
transaction, all the validations for `CREATE` transaction should be inherited
|
validate_schema
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/elections/vote.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/elections/vote.py
|
Apache-2.0
|
def validate(self, bigchain, current_transactions=[]):
"""For more details refer BEP-21: https://github.com/bigchaindb/BEPs/tree/master/21
"""
current_validators = self.get_validators(bigchain)
super(ValidatorElection, self).validate(bigchain, current_transactions=current_transactions)
# NOTE: change more than 1/3 of the current power is not allowed
if self.asset['data']['power'] >= (1/3)*sum(current_validators.values()):
raise InvalidPowerChange('`power` change must be less than 1/3 of total power')
return self
|
For more details refer BEP-21: https://github.com/bigchaindb/BEPs/tree/master/21
|
validate
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/upsert_validator/validator_election.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/upsert_validator/validator_election.py
|
Apache-2.0
|
def add_routes(app):
"""Add the routes to an app"""
for (prefix, routes) in API_SECTIONS:
api = Api(app, prefix=prefix)
for ((pattern, resource, *args), kwargs) in routes:
kwargs.setdefault('strict_slashes', False)
api.add_resource(resource, pattern, *args, **kwargs)
|
Add the routes to an app
|
add_routes
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/routes.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/routes.py
|
Apache-2.0
|
def __init__(self, app, *, options=None):
"""Initialize a new standalone application.
Args:
app: A wsgi Python application.
options (dict): the configuration.
"""
self.options = options or {}
self.application = app
super().__init__()
|
Initialize a new standalone application.
Args:
app: A wsgi Python application.
options (dict): the configuration.
|
__init__
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/server.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/server.py
|
Apache-2.0
|
def create_app(*, debug=False, threads=1, bigchaindb_factory=None):
"""Return an instance of the Flask application.
Args:
debug (bool): a flag to activate the debug mode for the app
(default: False).
threads (int): number of threads to use
Return:
an instance of the Flask application.
"""
if not bigchaindb_factory:
bigchaindb_factory = BigchainDB
app = Flask(__name__)
app.wsgi_app = StripContentTypeMiddleware(app.wsgi_app)
CORS(app)
app.debug = debug
app.config['bigchain_pool'] = utils.pool(bigchaindb_factory, size=threads)
add_routes(app)
return app
|
Return an instance of the Flask application.
Args:
debug (bool): a flag to activate the debug mode for the app
(default: False).
threads (int): number of threads to use
Return:
an instance of the Flask application.
|
create_app
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/server.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/server.py
|
Apache-2.0
|
def create_server(settings, log_config=None, bigchaindb_factory=None):
"""Wrap and return an application ready to be run.
Args:
settings (dict): a dictionary containing the settings, more info
here http://docs.gunicorn.org/en/latest/settings.html
Return:
an initialized instance of the application.
"""
settings = copy.deepcopy(settings)
if not settings.get('workers'):
settings['workers'] = (multiprocessing.cpu_count() * 2) + 1
if not settings.get('threads'):
# Note: Threading is not recommended currently, as the frontend workload
# is largely CPU bound and parallisation across Python threads makes it
# slower.
settings['threads'] = 1
settings['custom_log_config'] = log_config
app = create_app(debug=settings.get('debug', False),
threads=settings['threads'],
bigchaindb_factory=bigchaindb_factory)
standalone = StandaloneApplication(app, options=settings)
return standalone
|
Wrap and return an application ready to be run.
Args:
settings (dict): a dictionary containing the settings, more info
here http://docs.gunicorn.org/en/latest/settings.html
Return:
an initialized instance of the application.
|
create_server
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/server.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/server.py
|
Apache-2.0
|
def __call__(self, environ, start_response):
"""Run the middleware and then call the original WSGI application."""
if environ['REQUEST_METHOD'] == 'GET':
try:
del environ['CONTENT_TYPE']
except KeyError:
pass
else:
logger.debug('Remove header "Content-Type" from GET request')
return self.app(environ, start_response)
|
Run the middleware and then call the original WSGI application.
|
__call__
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/strip_content_type_middleware.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/strip_content_type_middleware.py
|
Apache-2.0
|
def _multiprocessing_to_asyncio(in_queue, out_queue, loop):
"""Bridge between a synchronous multiprocessing queue
and an asynchronous asyncio queue.
Args:
in_queue (multiprocessing.Queue): input queue
out_queue (asyncio.Queue): output queue
"""
while True:
value = in_queue.get()
loop.call_soon_threadsafe(out_queue.put_nowait, value)
|
Bridge between a synchronous multiprocessing queue
and an asynchronous asyncio queue.
Args:
in_queue (multiprocessing.Queue): input queue
out_queue (asyncio.Queue): output queue
|
_multiprocessing_to_asyncio
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/websocket_server.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/websocket_server.py
|
Apache-2.0
|
def __init__(self, event_source):
"""Create a new instance.
Args:
event_source: a source of events. Elements in the queue
should be strings.
"""
self.event_source = event_source
self.subscribers = {}
|
Create a new instance.
Args:
event_source: a source of events. Elements in the queue
should be strings.
|
__init__
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/websocket_server.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/websocket_server.py
|
Apache-2.0
|
async def publish(self):
"""Publish new events to the subscribers."""
while True:
event = await self.event_source.get()
str_buffer = []
if event == POISON_PILL:
return
if isinstance(event, str):
str_buffer.append(event)
elif event.type == EventTypes.BLOCK_VALID:
str_buffer = map(json.dumps, eventify_block(event.data))
for str_item in str_buffer:
for _, websocket in self.subscribers.items():
await websocket.send_str(str_item)
|
Publish new events to the subscribers.
|
publish
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/websocket_server.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/websocket_server.py
|
Apache-2.0
|
def init_app(event_source, *, loop=None):
"""Init the application server.
Return:
An aiohttp application.
"""
dispatcher = Dispatcher(event_source)
# Schedule the dispatcher
loop.create_task(dispatcher.publish())
app = web.Application(loop=loop)
app['dispatcher'] = dispatcher
app.router.add_get(EVENTS_ENDPOINT, websocket_handler)
return app
|
Init the application server.
Return:
An aiohttp application.
|
init_app
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/websocket_server.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/websocket_server.py
|
Apache-2.0
|
def start(sync_event_source, loop=None):
"""Create and start the WebSocket server."""
if not loop:
loop = asyncio.get_event_loop()
event_source = asyncio.Queue(loop=loop)
bridge = threading.Thread(target=_multiprocessing_to_asyncio,
args=(sync_event_source, event_source, loop),
daemon=True)
bridge.start()
app = init_app(event_source, loop=loop)
aiohttp.web.run_app(app,
host=config['wsserver']['host'],
port=config['wsserver']['port'])
|
Create and start the WebSocket server.
|
start
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/websocket_server.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/websocket_server.py
|
Apache-2.0
|
def base_ws_uri():
"""Base websocket URL that is advertised to external clients.
Useful when the websocket URL advertised to the clients needs to be
customized (typically when running behind NAT, firewall, etc.)
"""
config_wsserver = config['wsserver']
scheme = config_wsserver['advertised_scheme']
host = config_wsserver['advertised_host']
port = config_wsserver['advertised_port']
return '{}://{}:{}'.format(scheme, host, port)
|
Base websocket URL that is advertised to external clients.
Useful when the websocket URL advertised to the clients needs to be
customized (typically when running behind NAT, firewall, etc.)
|
base_ws_uri
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/views/base.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/base.py
|
Apache-2.0
|
def get(self, block_id):
"""API endpoint to get details about a block.
Args:
block_id (str): the id of the block.
Return:
A JSON string containing the data about the block.
"""
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
block = bigchain.get_block(block_id=block_id)
if not block:
return make_error(404)
return block
|
API endpoint to get details about a block.
Args:
block_id (str): the id of the block.
Return:
A JSON string containing the data about the block.
|
get
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/views/blocks.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/blocks.py
|
Apache-2.0
|
def get(self):
"""API endpoint to get the related blocks for a transaction.
Return:
A ``list`` of ``block_id``s that contain the given transaction. The
list may be filtered when provided a status query parameter:
"valid", "invalid", "undecided".
"""
parser = reqparse.RequestParser()
parser.add_argument('transaction_id', type=str, required=True)
args = parser.parse_args(strict=True)
tx_id = args['transaction_id']
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
blocks = bigchain.get_block_containing_tx(tx_id)
return blocks
|
API endpoint to get the related blocks for a transaction.
Return:
A ``list`` of ``block_id``s that contain the given transaction. The
list may be filtered when provided a status query parameter:
"valid", "invalid", "undecided".
|
get
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/views/blocks.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/blocks.py
|
Apache-2.0
|
def get_api_v1_info(api_prefix):
"""Return a dict with all the information specific for the v1 of the
api.
"""
websocket_root = base_ws_uri() + EVENTS_ENDPOINT
docs_url = [
'https://docs.bigchaindb.com/projects/server/en/v',
version.__version__,
'/http-client-server-api.html',
]
return {
'docs': ''.join(docs_url),
'transactions': '{}transactions/'.format(api_prefix),
'blocks': '{}blocks/'.format(api_prefix),
'assets': '{}assets/'.format(api_prefix),
'outputs': '{}outputs/'.format(api_prefix),
'streams': websocket_root,
'metadata': '{}metadata/'.format(api_prefix),
'validators': '{}validators'.format(api_prefix),
}
|
Return a dict with all the information specific for the v1 of the
api.
|
get_api_v1_info
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/views/info.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/info.py
|
Apache-2.0
|
def get(self):
"""API endpoint to retrieve a list of links to transaction
outputs.
Returns:
A :obj:`list` of :cls:`str` of links to outputs.
"""
parser = reqparse.RequestParser()
parser.add_argument('public_key', type=parameters.valid_ed25519,
required=True)
parser.add_argument('spent', type=parameters.valid_bool)
args = parser.parse_args(strict=True)
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
outputs = bigchain.get_outputs_filtered(args['public_key'],
args['spent'])
return [{'transaction_id': output.txid, 'output_index': output.output}
for output in outputs]
|
API endpoint to retrieve a list of links to transaction
outputs.
Returns:
A :obj:`list` of :cls:`str` of links to outputs.
|
get
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/views/outputs.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/outputs.py
|
Apache-2.0
|
def get(self, tx_id):
"""API endpoint to get details about a transaction.
Args:
tx_id (str): the id of the transaction.
Return:
A JSON string containing the data about the transaction.
"""
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
tx = bigchain.get_transaction(tx_id)
if not tx:
return make_error(404)
return tx.to_dict()
|
API endpoint to get details about a transaction.
Args:
tx_id (str): the id of the transaction.
Return:
A JSON string containing the data about the transaction.
|
get
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/views/transactions.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/transactions.py
|
Apache-2.0
|
def post(self):
"""API endpoint to push transactions to the Federation.
Return:
A ``dict`` containing the data about the transaction.
"""
parser = reqparse.RequestParser()
parser.add_argument('mode', type=parameters.valid_mode,
default=BROADCAST_TX_ASYNC)
args = parser.parse_args()
mode = str(args['mode'])
pool = current_app.config['bigchain_pool']
# `force` will try to format the body of the POST request even if the
# `content-type` header is not set to `application/json`
tx = request.get_json(force=True)
try:
tx_obj = Transaction.from_dict(tx)
except SchemaValidationError as e:
return make_error(
400,
message='Invalid transaction schema: {}'.format(
e.__cause__.message)
)
except ValidationError as e:
return make_error(
400,
'Invalid transaction ({}): {}'.format(type(e).__name__, e)
)
with pool() as bigchain:
try:
bigchain.validate_transaction(tx_obj)
except ValidationError as e:
return make_error(
400,
'Invalid transaction ({}): {}'.format(type(e).__name__, e)
)
else:
status_code, message = bigchain.write_transaction(tx_obj, mode)
if status_code == 202:
response = jsonify(tx)
response.status_code = 202
return response
else:
return make_error(status_code, message)
|
API endpoint to push transactions to the Federation.
Return:
A ``dict`` containing the data about the transaction.
|
post
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/views/transactions.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/transactions.py
|
Apache-2.0
|
def get(self):
"""API endpoint to get validators set.
Return:
A JSON string containing the validator set of the current node.
"""
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
validators = bigchain.get_validators()
return validators
|
API endpoint to get validators set.
Return:
A JSON string containing the validator set of the current node.
|
get
|
python
|
bigchaindb/bigchaindb
|
bigchaindb/web/views/validators.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/validators.py
|
Apache-2.0
|
def generate_validators(powers):
"""Generates an arbitrary number of validators with random public keys.
The object under the `storage` key is in the format expected by DB.
The object under the `eleciton` key is in the format expected by
the upsert validator election.
`public_key`, `private_key` are in the format used for signing transactions.
Args:
powers: A list of intergers representing the voting power to
assign to the corresponding validators.
"""
validators = []
for power in powers:
kp = crypto.generate_key_pair()
validators.append({
'storage': {
'public_key': {
'value': key_to_base64(base58.b58decode(kp.public_key).hex()),
'type': 'ed25519-base64',
},
'voting_power': power,
},
'election': {
'node_id': f'node-{random.choice(range(100))}',
'power': power,
'public_key': {
'value': base64.b16encode(base58.b58decode(kp.public_key)).decode('utf-8'),
'type': 'ed25519-base16',
},
},
'public_key': kp.public_key,
'private_key': kp.private_key,
})
return validators
|
Generates an arbitrary number of validators with random public keys.
The object under the `storage` key is in the format expected by DB.
The object under the `eleciton` key is in the format expected by
the upsert validator election.
`public_key`, `private_key` are in the format used for signing transactions.
Args:
powers: A list of intergers representing the voting power to
assign to the corresponding validators.
|
generate_validators
|
python
|
bigchaindb/bigchaindb
|
tests/utils.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/tests/utils.py
|
Apache-2.0
|
def _test_additionalproperties(node, path=''):
"""Validate that each object node has additionalProperties set, so that
objects with junk keys do not pass as valid.
"""
if isinstance(node, list):
for i, nnode in enumerate(node):
_test_additionalproperties(nnode, path + str(i) + '.')
if isinstance(node, dict):
if node.get('type') == 'object':
assert 'additionalProperties' in node, \
('additionalProperties not set at path:' + path)
for name, val in node.items():
_test_additionalproperties(val, path + name + '.')
|
Validate that each object node has additionalProperties set, so that
objects with junk keys do not pass as valid.
|
_test_additionalproperties
|
python
|
bigchaindb/bigchaindb
|
tests/common/test_schema.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/tests/common/test_schema.py
|
Apache-2.0
|
def test_cant_spend_same_input_twice_in_tx(b, alice):
"""Recreate duplicated fulfillments bug
https://github.com/bigchaindb/bigchaindb/issues/1099
"""
from bigchaindb.models import Transaction
from bigchaindb.common.exceptions import DoubleSpend
# create a divisible asset
tx_create = Transaction.create([alice.public_key], [([alice.public_key], 100)])
tx_create_signed = tx_create.sign([alice.private_key])
assert b.validate_transaction(tx_create_signed) == tx_create_signed
b.store_bulk_transactions([tx_create_signed])
# Create a transfer transaction with duplicated fulfillments
dup_inputs = tx_create.to_inputs() + tx_create.to_inputs()
tx_transfer = Transaction.transfer(dup_inputs, [([alice.public_key], 200)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([alice.private_key])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
|
Recreate duplicated fulfillments bug
https://github.com/bigchaindb/bigchaindb/issues/1099
|
test_cant_spend_same_input_twice_in_tx
|
python
|
bigchaindb/bigchaindb
|
tests/db/test_bigchain_api.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/tests/db/test_bigchain_api.py
|
Apache-2.0
|
def get_txs_patched(conn, **args):
"""Patch `get_transactions_filtered` so that rather than return an array
of transactions it returns an array of shims with a to_dict() method
that reports one of the arguments passed to `get_transactions_filtered`.
"""
return [type('', (), {'to_dict': partial(lambda a: a, arg)})
for arg in sorted(args.items())]
|
Patch `get_transactions_filtered` so that rather than return an array
of transactions it returns an array of shims with a to_dict() method
that reports one of the arguments passed to `get_transactions_filtered`.
|
get_txs_patched
|
python
|
bigchaindb/bigchaindb
|
tests/web/test_transactions.py
|
https://github.com/bigchaindb/bigchaindb/blob/master/tests/web/test_transactions.py
|
Apache-2.0
|
def call(self, x, training=None):
"""
Apply random channel-swap augmentation to `x`.
Args:
x (`Tensor`): A batch tensor of 1D (signals) or 2D (spectrograms) data
"""
if training in (None, False):
return x
# figure out input data format
if K.ndim(x) not in (3, 4):
raise ValueError(
'ndim of input tensor x should be 3 (batch signal) or 4 (batch spectrogram),'
'but it is %d' % K.ndim(x)
)
if self.data_format == _CH_LAST_STR:
ch_axis = 3 if K.ndim(x) == 4 else 2
else:
ch_axis = 1
# get swap indices
n_ch = K.int_shape(x)[ch_axis]
if n_ch == 1:
return x
swap_indices = np.random.permutation(n_ch).tolist()
# swap and return
return tf.gather(x, indices=swap_indices, axis=ch_axis)
|
Apply random channel-swap augmentation to `x`.
Args:
x (`Tensor`): A batch tensor of 1D (signals) or 2D (spectrograms) data
|
call
|
python
|
keunwoochoi/kapre
|
kapre/augmentation.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/augmentation.py
|
MIT
|
def _apply_masks_to_axis(self, x, axis, mask_param, n_masks):
"""
Applies a number of masks (defined by the parameter n_masks) to the spectrogram
by the axis provided.
Args:
x (float `Tensor`): A spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on data_format.
axis (int): The axis where the masks will be applied
mask_param (int): The mask param as defined in the original paper, which is the max width of the mask
applied to the specified axis.
n_masks (int): The number of masks to be applied
Returns:
(float `Tensor`): The masked spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on x shape (that is, the input spectrogram).
"""
axis_limit = K.int_shape(x)[axis]
axis_indices = tf.range(axis_limit)
if axis == 0:
axis_indices = tf.reshape(axis_indices, (-1, 1, 1))
elif axis == 1:
axis_indices = tf.reshape(axis_indices, (1, -1, 1))
elif axis == 2:
axis_indices = tf.reshape(axis_indices, (1, 1, -1))
else:
raise NotImplementedError(f"Axis parameter must be one of the following: 0, 1, 2")
# Check if mask_width is greater than axis_limit
if axis_limit < mask_param:
raise ValueError(
"Time and freq axis shapes must be greater than time_mask_param "
"and freq_mask_param respectively"
)
x_repeated = tf.repeat(tf.expand_dims(x, 0), n_masks, axis=0)
axis_limit_repeated = tf.repeat(axis_limit, n_masks, axis=0)
axis_indices_repeated = tf.repeat(tf.expand_dims(axis_indices, 0), n_masks, axis=0)
mask_param_repeated = tf.repeat(mask_param, n_masks, axis=0)
masks = tf.map_fn(
elems=(x_repeated, axis_limit_repeated, axis_indices_repeated, mask_param_repeated),
fn=self._generate_axis_mask,
dtype=(tf.float32, tf.int32, tf.int32, tf.int32),
fn_output_signature=tf.bool,
)
mask = tf.math.reduce_any(masks, 0)
return tf.where(mask, self.mask_value, x)
|
Applies a number of masks (defined by the parameter n_masks) to the spectrogram
by the axis provided.
Args:
x (float `Tensor`): A spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on data_format.
axis (int): The axis where the masks will be applied
mask_param (int): The mask param as defined in the original paper, which is the max width of the mask
applied to the specified axis.
n_masks (int): The number of masks to be applied
Returns:
(float `Tensor`): The masked spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on x shape (that is, the input spectrogram).
|
_apply_masks_to_axis
|
python
|
keunwoochoi/kapre
|
kapre/augmentation.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/augmentation.py
|
MIT
|
def _apply_spec_augment(self, x):
"""
Main method that applies SpecAugment technique by both frequency and
time axis.
Args:
x (float `Tensor`) : A spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on data_format.
Returns:
(float `Tensor`): The spectrogram masked by time and frequency axis. Its shape is (time, freq, ch)
or (ch, time, freq) depending on x shape (that is, the input spectrogram).
"""
if self.data_format == _CH_LAST_STR:
time_axis, freq_axis = 0, 1
else:
time_axis, freq_axis = 1, 2
if self.n_time_masks >= 1:
x = self._apply_masks_to_axis(
x, axis=time_axis, mask_param=self.time_mask_param, n_masks=self.n_time_masks
)
if self.n_freq_masks >= 1:
x = self._apply_masks_to_axis(
x, axis=freq_axis, mask_param=self.freq_mask_param, n_masks=self.n_freq_masks
)
return x
|
Main method that applies SpecAugment technique by both frequency and
time axis.
Args:
x (float `Tensor`) : A spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on data_format.
Returns:
(float `Tensor`): The spectrogram masked by time and frequency axis. Its shape is (time, freq, ch)
or (ch, time, freq) depending on x shape (that is, the input spectrogram).
|
_apply_spec_augment
|
python
|
keunwoochoi/kapre
|
kapre/augmentation.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/augmentation.py
|
MIT
|
def get_window_fn(window_name=None):
"""Return a window function given its name.
This function is used inside layers such as `STFT` to get a window function.
Args:
window_name (None or str): name of window function. On Tensorflow 2.3, there are five windows available in
`tf.signal` (`hamming_window`, `hann_window`, `kaiser_bessel_derived_window`, `kaiser_window`, `vorbis_window`).
"""
if window_name is None:
return tf.signal.hann_window
available_windows = {
'hamming_window': tf.signal.hamming_window,
'hann_window': tf.signal.hann_window,
}
if hasattr(tf.signal, 'kaiser_bessel_derived_window'):
available_windows['kaiser_bessel_derived_window'] = tf.signal.kaiser_bessel_derived_window
if hasattr(tf.signal, 'kaiser_window'):
available_windows['kaiser_window'] = tf.signal.kaiser_window
if hasattr(tf.signal, 'vorbis_window'):
available_windows['vorbis_window'] = tf.signal.vorbis_window
if window_name not in available_windows:
raise NotImplementedError(
'Window name %s is not supported now. Currently, %d windows are'
'supported - %s'
% (
window_name,
len(available_windows),
', '.join([k for k in available_windows.keys()]),
)
)
return available_windows[window_name]
|
Return a window function given its name.
This function is used inside layers such as `STFT` to get a window function.
Args:
window_name (None or str): name of window function. On Tensorflow 2.3, there are five windows available in
`tf.signal` (`hamming_window`, `hann_window`, `kaiser_bessel_derived_window`, `kaiser_window`, `vorbis_window`).
|
get_window_fn
|
python
|
keunwoochoi/kapre
|
kapre/backend.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/backend.py
|
MIT
|
def validate_data_format_str(data_format):
"""A function that validates the data format string."""
if data_format not in (_CH_DEFAULT_STR, _CH_FIRST_STR, _CH_LAST_STR):
raise ValueError(
'data_format should be one of {}'.format(
str([_CH_FIRST_STR, _CH_LAST_STR, _CH_DEFAULT_STR])
)
+ ' but we received {}'.format(data_format)
)
|
A function that validates the data format string.
|
validate_data_format_str
|
python
|
keunwoochoi/kapre
|
kapre/backend.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/backend.py
|
MIT
|
def magnitude_to_decibel(x, ref_value=1.0, amin=1e-5, dynamic_range=80.0):
"""A function that converts magnitude to decibel scaling.
In essence, it runs `10 * log10(x)`, but with some other utility operations.
Similar to `librosa.power_to_db` with `ref=1.0` and `top_db=dynamic_range`
Args:
x (`Tensor`): float tensor. Can be batch or not. Something like magnitude of STFT.
ref_value (`float`): an input value that would become 0 dB in the result.
For spectrogram magnitudes, ref_value=1.0 usually make the decibel-scaled output to be around zero
if the input audio was in [-1, 1].
amin (`float`): the noise floor of the input. An input that is smaller than `amin`, it's converted to `amin`.
dynamic_range (`float`): range of the resulting value. E.g., if the maximum magnitude is 30 dB,
the noise floor of the output would become (30 - dynamic_range) dB
Returns:
log_spec (`Tensor`): a decibel-scaled version of `x`.
Note:
In many deep learning based application, the input spectrogram magnitudes (e.g., abs(STFT)) are decibel-scaled
(=logarithmically mapped) for a better performance.
Example:
::
input_shape = (2048, 1) # mono signal
model = Sequential()
model.add(kapre.Frame(frame_length=1024, hop_length=512, input_shape=input_shape))
# now the shape is (batch, n_frame=3, frame_length=1024, ch=1)
"""
def _log10(x):
return tf.math.log(x) / tf.math.log(tf.constant(10, dtype=x.dtype))
if K.ndim(x) > 1: # we assume x is batch in this case
max_axis = tuple(range(K.ndim(x))[1:])
else:
max_axis = None
if amin is None:
amin = 1e-5
amin = tf.cast(amin, dtype=x.dtype)
log_spec = 10.0 * _log10(tf.math.maximum(x, amin))
log_spec = log_spec - 10.0 * _log10(tf.math.maximum(amin, ref_value))
log_spec = tf.math.maximum(
log_spec, tf.math.reduce_max(log_spec, axis=max_axis, keepdims=True) - dynamic_range
)
return log_spec
|
A function that converts magnitude to decibel scaling.
In essence, it runs `10 * log10(x)`, but with some other utility operations.
Similar to `librosa.power_to_db` with `ref=1.0` and `top_db=dynamic_range`
Args:
x (`Tensor`): float tensor. Can be batch or not. Something like magnitude of STFT.
ref_value (`float`): an input value that would become 0 dB in the result.
For spectrogram magnitudes, ref_value=1.0 usually make the decibel-scaled output to be around zero
if the input audio was in [-1, 1].
amin (`float`): the noise floor of the input. An input that is smaller than `amin`, it's converted to `amin`.
dynamic_range (`float`): range of the resulting value. E.g., if the maximum magnitude is 30 dB,
the noise floor of the output would become (30 - dynamic_range) dB
Returns:
log_spec (`Tensor`): a decibel-scaled version of `x`.
Note:
In many deep learning based application, the input spectrogram magnitudes (e.g., abs(STFT)) are decibel-scaled
(=logarithmically mapped) for a better performance.
Example:
::
input_shape = (2048, 1) # mono signal
model = Sequential()
model.add(kapre.Frame(frame_length=1024, hop_length=512, input_shape=input_shape))
# now the shape is (batch, n_frame=3, frame_length=1024, ch=1)
|
magnitude_to_decibel
|
python
|
keunwoochoi/kapre
|
kapre/backend.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/backend.py
|
MIT
|
def filterbank_mel(
sample_rate, n_freq, n_mels=128, f_min=0.0, f_max=None, htk=False, norm='slaney'
):
"""A wrapper for librosa.filters.mel that additionally does transpose and tensor conversion
Args:
sample_rate (`int`): sample rate of the input audio
n_freq (`int`): number of frequency bins in the input STFT magnitude.
n_mels (`int`): the number of mel bands
f_min (`float`): lowest frequency that is going to be included in the mel filterbank (Hertz)
f_max (`float`): highest frequency that is going to be included in the mel filterbank (Hertz)
htk (bool): whether to use `htk` formula or not
norm: The default, 'slaney', would normalize the the mel weights by the width of the mel band.
Returns:
(`Tensor`): mel filterbanks. Shape=`(n_freq, n_mels)`
"""
filterbank = librosa.filters.mel(
sr=sample_rate,
n_fft=(n_freq - 1) * 2,
n_mels=n_mels,
fmin=f_min,
fmax=f_max,
htk=htk,
norm=norm,
).astype(K.floatx())
return tf.convert_to_tensor(filterbank.T)
|
A wrapper for librosa.filters.mel that additionally does transpose and tensor conversion
Args:
sample_rate (`int`): sample rate of the input audio
n_freq (`int`): number of frequency bins in the input STFT magnitude.
n_mels (`int`): the number of mel bands
f_min (`float`): lowest frequency that is going to be included in the mel filterbank (Hertz)
f_max (`float`): highest frequency that is going to be included in the mel filterbank (Hertz)
htk (bool): whether to use `htk` formula or not
norm: The default, 'slaney', would normalize the the mel weights by the width of the mel band.
Returns:
(`Tensor`): mel filterbanks. Shape=`(n_freq, n_mels)`
|
filterbank_mel
|
python
|
keunwoochoi/kapre
|
kapre/backend.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/backend.py
|
MIT
|
def get_stft_magnitude_layer(
input_shape=None,
n_fft=2048,
win_length=None,
hop_length=None,
window_name=None,
pad_begin=False,
pad_end=False,
return_decibel=False,
db_amin=1e-5,
db_ref_value=1.0,
db_dynamic_range=80.0,
input_data_format='default',
output_data_format='default',
name='stft_magnitude',
):
"""A function that returns a stft magnitude layer.
The layer is a `keras.Sequential` model consists of `STFT`, `Magnitude`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model. Necessary only if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin (bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output melspectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
STFT magnitude represents a linear-frequency spectrum of audio signal and probably the most popular choice
when it comes to audio analysis in general. By using magnitude, this layer discard the phase information,
which is generally known to be irrelevant to human auditory perception.
Note:
For audio analysis (when the output is tag/label/etc), we'd like to recommend to set `return_decibel=True`.
Decibel scaling is perceptually plausible and numerically stable
(related paper: `A Comparison of Audio Signal Preprocessing Methods for Deep Neural Networks on Music Tagging <https://arxiv.org/abs/1709.01922>`_)
Many music, speech, and audio applications have used this log-magnitude STFT, e.g.,
`Learning to Pinpoint Singing Voice from Weakly Labeled Examples <https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/07/315_Paper.pdf>`_,
`Joint Beat and Downbeat Tracking with Recurrent Neural Networks <https://archives.ismir.net/ismir2016/paper/000186.pdf>`_,
and many more.
For audio processing (when the output is audio signal), it might be better to use STFT as it is (`return_decibel=False`).
Example: `Singing voice separation with deep U-Net convolutional networks <https://openaccess.city.ac.uk/id/eprint/19289/>`_.
This is because decibel scaling is has some clipping at the noise floor which is irreversible.
One may use `log(1+X)` instead of `log(X)` to avoid the clipping but it is not included in Kapre at the moment.
Example:
::
input_shape = (2048, 1) # mono signal, audio is channels_last
stft_mag = get_stft_magnitude_layer(input_shape=input_shape, n_fft=1024, return_decibel=True,
input_data_format='channels_last', output_data_format='channels_first')
model = Sequential()
model.add(stft_mag)
# now the shape is (batch, ch=1, n_frame=3, n_freq=513) because output_data_format is 'channels_first'
# and the dtype is float
"""
backend.validate_data_format_str(input_data_format)
backend.validate_data_format_str(output_data_format)
stft_kwargs = {}
if input_shape is not None:
stft_kwargs['input_shape'] = input_shape
waveform_to_stft = STFT(
**stft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=window_name,
pad_begin=pad_begin,
pad_end=pad_end,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
stft_to_stftm = Magnitude()
layers = [waveform_to_stft, stft_to_stftm]
if return_decibel:
mag_to_decibel = MagnitudeToDecibel(
ref_value=db_ref_value, amin=db_amin, dynamic_range=db_dynamic_range
)
layers.append(mag_to_decibel)
return Sequential(layers, name=name)
|
A function that returns a stft magnitude layer.
The layer is a `keras.Sequential` model consists of `STFT`, `Magnitude`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model. Necessary only if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin (bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output melspectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
STFT magnitude represents a linear-frequency spectrum of audio signal and probably the most popular choice
when it comes to audio analysis in general. By using magnitude, this layer discard the phase information,
which is generally known to be irrelevant to human auditory perception.
Note:
For audio analysis (when the output is tag/label/etc), we'd like to recommend to set `return_decibel=True`.
Decibel scaling is perceptually plausible and numerically stable
(related paper: `A Comparison of Audio Signal Preprocessing Methods for Deep Neural Networks on Music Tagging <https://arxiv.org/abs/1709.01922>`_)
Many music, speech, and audio applications have used this log-magnitude STFT, e.g.,
`Learning to Pinpoint Singing Voice from Weakly Labeled Examples <https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/07/315_Paper.pdf>`_,
`Joint Beat and Downbeat Tracking with Recurrent Neural Networks <https://archives.ismir.net/ismir2016/paper/000186.pdf>`_,
and many more.
For audio processing (when the output is audio signal), it might be better to use STFT as it is (`return_decibel=False`).
Example: `Singing voice separation with deep U-Net convolutional networks <https://openaccess.city.ac.uk/id/eprint/19289/>`_.
This is because decibel scaling is has some clipping at the noise floor which is irreversible.
One may use `log(1+X)` instead of `log(X)` to avoid the clipping but it is not included in Kapre at the moment.
Example:
::
input_shape = (2048, 1) # mono signal, audio is channels_last
stft_mag = get_stft_magnitude_layer(input_shape=input_shape, n_fft=1024, return_decibel=True,
input_data_format='channels_last', output_data_format='channels_first')
model = Sequential()
model.add(stft_mag)
# now the shape is (batch, ch=1, n_frame=3, n_freq=513) because output_data_format is 'channels_first'
# and the dtype is float
|
get_stft_magnitude_layer
|
python
|
keunwoochoi/kapre
|
kapre/composed.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py
|
MIT
|
def get_melspectrogram_layer(
input_shape=None,
n_fft=2048,
win_length=None,
hop_length=None,
window_name=None,
pad_begin=False,
pad_end=False,
sample_rate=22050,
n_mels=128,
mel_f_min=0.0,
mel_f_max=None,
mel_htk=False,
mel_norm='slaney',
return_decibel=False,
db_amin=1e-5,
db_ref_value=1.0,
db_dynamic_range=80.0,
input_data_format='default',
output_data_format='default',
name='melspectrogram',
):
"""A function that returns a melspectrogram layer, which is a `keras.Sequential` model consists of
`STFT`, `Magnitude`, `ApplyFilterbank(_mel_filterbank)`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model. Necessary only if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin (bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
sample_rate (int): sample rate of the input audio
n_mels (int): number of mel bins in the mel filterbank
mel_f_min (float): lowest frequency of the mel filterbank
mel_f_max (float): highest frequency of the mel filterbank
mel_htk (bool): whether to follow the htk mel filterbank fomula or not
mel_norm ('slaney' or int): normalization policy of the mel filterbank triangles
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output melspectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
Melspectrogram is originally developed for speech applications and has been *very* widely used for audio signal
analysis including music information retrieval. As its mel-axis is a non-linear compression of (linear)
frequency axis, a melspectrogram can be an efficient choice as an input of a machine learning model.
We recommend to set `return_decibel=True`.
**References**:
`Automatic tagging using deep convolutional neural networks <https://arxiv.org/abs/1606.00298>`_,
`Deep content-based music recommendation <http://papers.nips.cc/paper/5004-deep-content-based-music-recommen>`_,
`CNN Architectures for Large-Scale Audio Classification <https://arxiv.org/abs/1609.09430>`_,
`Multi-label vs. combined single-label sound event detection with deep neural networks <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.711.74&rep=rep1&type=pdf>`_,
`Deep Convolutional Neural Networks and Data Augmentation for Environmental Sound Classification <https://arxiv.org/pdf/1608.04363.pdf>`_,
and way too many speech applications.
Example:
::
input_shape = (2, 2048) # stereo signal, audio is channels_first
melgram = get_melspectrogram_layer(input_shape=input_shape, n_fft=1024, return_decibel=True,
n_mels=96, input_data_format='channels_first', output_data_format='channels_last')
model = Sequential()
model.add(melgram)
# now the shape is (batch, n_frame=3, n_mels=96, n_ch=2) because output_data_format is 'channels_last'
# and the dtype is float
"""
backend.validate_data_format_str(input_data_format)
backend.validate_data_format_str(output_data_format)
stft_kwargs = {}
if input_shape is not None:
stft_kwargs['input_shape'] = input_shape
waveform_to_stft = STFT(
**stft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=window_name,
pad_begin=pad_begin,
pad_end=pad_end,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
stft_to_stftm = Magnitude()
kwargs = {
'sample_rate': sample_rate,
'n_freq': n_fft // 2 + 1,
'n_mels': n_mels,
'f_min': mel_f_min,
'f_max': mel_f_max,
'htk': mel_htk,
'norm': mel_norm,
}
stftm_to_melgram = ApplyFilterbank(
type='mel', filterbank_kwargs=kwargs, data_format=output_data_format
)
layers = [waveform_to_stft, stft_to_stftm, stftm_to_melgram]
if return_decibel:
mag_to_decibel = MagnitudeToDecibel(
ref_value=db_ref_value, amin=db_amin, dynamic_range=db_dynamic_range
)
layers.append(mag_to_decibel)
return Sequential(layers, name=name)
|
A function that returns a melspectrogram layer, which is a `keras.Sequential` model consists of
`STFT`, `Magnitude`, `ApplyFilterbank(_mel_filterbank)`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model. Necessary only if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin (bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
sample_rate (int): sample rate of the input audio
n_mels (int): number of mel bins in the mel filterbank
mel_f_min (float): lowest frequency of the mel filterbank
mel_f_max (float): highest frequency of the mel filterbank
mel_htk (bool): whether to follow the htk mel filterbank fomula or not
mel_norm ('slaney' or int): normalization policy of the mel filterbank triangles
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output melspectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
Melspectrogram is originally developed for speech applications and has been *very* widely used for audio signal
analysis including music information retrieval. As its mel-axis is a non-linear compression of (linear)
frequency axis, a melspectrogram can be an efficient choice as an input of a machine learning model.
We recommend to set `return_decibel=True`.
**References**:
`Automatic tagging using deep convolutional neural networks <https://arxiv.org/abs/1606.00298>`_,
`Deep content-based music recommendation <http://papers.nips.cc/paper/5004-deep-content-based-music-recommen>`_,
`CNN Architectures for Large-Scale Audio Classification <https://arxiv.org/abs/1609.09430>`_,
`Multi-label vs. combined single-label sound event detection with deep neural networks <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.711.74&rep=rep1&type=pdf>`_,
`Deep Convolutional Neural Networks and Data Augmentation for Environmental Sound Classification <https://arxiv.org/pdf/1608.04363.pdf>`_,
and way too many speech applications.
Example:
::
input_shape = (2, 2048) # stereo signal, audio is channels_first
melgram = get_melspectrogram_layer(input_shape=input_shape, n_fft=1024, return_decibel=True,
n_mels=96, input_data_format='channels_first', output_data_format='channels_last')
model = Sequential()
model.add(melgram)
# now the shape is (batch, n_frame=3, n_mels=96, n_ch=2) because output_data_format is 'channels_last'
# and the dtype is float
|
get_melspectrogram_layer
|
python
|
keunwoochoi/kapre
|
kapre/composed.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py
|
MIT
|
def get_log_frequency_spectrogram_layer(
input_shape=None,
n_fft=2048,
win_length=None,
hop_length=None,
window_name=None,
pad_begin=False,
pad_end=False,
sample_rate=22050,
log_n_bins=84,
log_f_min=None,
log_bins_per_octave=12,
log_spread=0.125,
return_decibel=False,
db_amin=1e-5,
db_ref_value=1.0,
db_dynamic_range=80.0,
input_data_format='default',
output_data_format='default',
name='log_frequency_spectrogram',
):
"""A function that returns a log-frequency STFT layer, which is a `keras.Sequential` model consists of
`STFT`, `Magnitude`, `ApplyFilterbank(_log_filterbank)`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin(bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
sample_rate (int): sample rate of the input audio
log_n_bins (int): number of the bins in the log-frequency filterbank
log_f_min (float): lowest frequency of the filterbank
log_bins_per_octave (int): number of bins in each octave in the filterbank
log_spread (float): spread constant (Q value) in the log filterbank.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output mel spectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
Log-frequency spectrogram is similar to melspectrogram but its frequency axis is perfectly linear to octave scale.
For some pitch-related applications, a log-frequency spectrogram can be a good choice.
Example:
::
input_shape = (2048, 2) # stereo signal, audio is channels_last
logfreq_stft_mag = get_log_frequency_spectrogram_layer(
input_shape=input_shape, n_fft=1024, return_decibel=True,
log_n_bins=84, input_data_format='channels_last', output_data_format='channels_last')
model = Sequential()
model.add(logfreq_stft_mag)
# now the shape is (batch, n_frame=3, n_bins=84, n_ch=2) because output_data_format is 'channels_last'
# and the dtype is float
"""
backend.validate_data_format_str(input_data_format)
backend.validate_data_format_str(output_data_format)
stft_kwargs = {}
if input_shape is not None:
stft_kwargs['input_shape'] = input_shape
waveform_to_stft = STFT(
**stft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=window_name,
pad_begin=pad_begin,
pad_end=pad_end,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
stft_to_stftm = Magnitude()
_log_filterbank = backend.filterbank_log(
sample_rate=sample_rate,
n_freq=n_fft // 2 + 1,
n_bins=log_n_bins,
bins_per_octave=log_bins_per_octave,
f_min=log_f_min,
spread=log_spread,
)
kwargs = {
'sample_rate': sample_rate,
'n_freq': n_fft // 2 + 1,
'n_bins': log_n_bins,
'bins_per_octave': log_bins_per_octave,
'f_min': log_f_min,
'spread': log_spread,
}
stftm_to_loggram = ApplyFilterbank(
type='log', filterbank_kwargs=kwargs, data_format=output_data_format
)
layers = [waveform_to_stft, stft_to_stftm, stftm_to_loggram]
if return_decibel:
mag_to_decibel = MagnitudeToDecibel(
ref_value=db_ref_value, amin=db_amin, dynamic_range=db_dynamic_range
)
layers.append(mag_to_decibel)
return Sequential(layers, name=name)
|
A function that returns a log-frequency STFT layer, which is a `keras.Sequential` model consists of
`STFT`, `Magnitude`, `ApplyFilterbank(_log_filterbank)`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin(bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
sample_rate (int): sample rate of the input audio
log_n_bins (int): number of the bins in the log-frequency filterbank
log_f_min (float): lowest frequency of the filterbank
log_bins_per_octave (int): number of bins in each octave in the filterbank
log_spread (float): spread constant (Q value) in the log filterbank.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output mel spectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
Log-frequency spectrogram is similar to melspectrogram but its frequency axis is perfectly linear to octave scale.
For some pitch-related applications, a log-frequency spectrogram can be a good choice.
Example:
::
input_shape = (2048, 2) # stereo signal, audio is channels_last
logfreq_stft_mag = get_log_frequency_spectrogram_layer(
input_shape=input_shape, n_fft=1024, return_decibel=True,
log_n_bins=84, input_data_format='channels_last', output_data_format='channels_last')
model = Sequential()
model.add(logfreq_stft_mag)
# now the shape is (batch, n_frame=3, n_bins=84, n_ch=2) because output_data_format is 'channels_last'
# and the dtype is float
|
get_log_frequency_spectrogram_layer
|
python
|
keunwoochoi/kapre
|
kapre/composed.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py
|
MIT
|
def get_perfectly_reconstructing_stft_istft(
stft_input_shape=None,
istft_input_shape=None,
n_fft=2048,
win_length=None,
hop_length=None,
forward_window_name=None,
waveform_data_format='default',
stft_data_format='default',
stft_name='stft',
istft_name='istft',
):
"""A function that returns two layers, stft and inverse stft, which would be perfectly reconstructing pair.
Args:
stft_input_shape (tuple): Input shape of single waveform.
Must specify this if the returned stft layer is going to be used as first layer of a Sequential model.
istft_input_shape (tuple): Input shape of single STFT.
Must specify this if the returned istft layer is going to be used as first layer of a Sequential model.
n_fft (int): Number of FFTs. Defaults to `2048`
win_length (`int` or `None`): Window length in sample. Defaults to `n_fft`.
hop_length (`int` or `None`): Hop length in sample between analysis windows. Defaults to `n_fft // 4` following librosa.
forward_window_name (function or `None`): *Name* of `tf.signal` function that returns a 1D tensor window that is used.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
waveform_data_format (str): The audio data format of waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
stft_data_format (str): The data format of STFT.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
stft_name (str): name of the returned STFT layer
istft_name (str): name of the returned ISTFT layer
Note:
Without a careful setting, `tf.signal.stft` and `tf.signal.istft` is not perfectly reconstructing.
Note:
Imagine `x` --> `STFT` --> `InverseSTFT` --> `y`.
The length of `x` will be longer than `y` due to the padding at the beginning and the end.
To compare them, you would need to trim `y` along time axis.
The formula: if `trim_begin = win_length - hop_length` and `len_signal` is length of `x`,
`y_trimmed = y[trim_begin: trim_begin + len_signal, :]` (in the case of `channels_last`).
Example:
::
stft_input_shape = (2048, 2) # stereo and channels_last
stft_layer, istft_layer = get_perfectly_reconstructing_stft_istft(
stft_input_shape=stft_input_shape
)
unet = get_unet() input: stft (complex value), output: stft (complex value)
model = Sequential()
model.add(stft_layer) # input is waveform
model.add(unet)
model.add(istft_layer) # output is also waveform
"""
backend.validate_data_format_str(waveform_data_format)
backend.validate_data_format_str(stft_data_format)
if win_length is None:
win_length = n_fft
if hop_length is None:
hop_length = win_length // 4
if (win_length / hop_length) % 2 != 0:
raise RuntimeError(
'The ratio of win_length and hop_length must be power of 2 to get a '
'perfectly reconstructing stft-istft pair.'
)
stft_kwargs = {}
if stft_input_shape is not None:
stft_kwargs['input_shape'] = stft_input_shape
istft_kwargs = {}
if istft_input_shape is not None:
istft_kwargs['input_shape'] = istft_input_shape
waveform_to_stft = STFT(
**stft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=forward_window_name,
pad_begin=True,
pad_end=True,
input_data_format=waveform_data_format,
output_data_format=stft_data_format,
name=stft_name,
)
stft_to_waveform = InverseSTFT(
**istft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
forward_window_name=forward_window_name,
input_data_format=stft_data_format,
output_data_format=waveform_data_format,
name=istft_name,
)
return waveform_to_stft, stft_to_waveform
|
A function that returns two layers, stft and inverse stft, which would be perfectly reconstructing pair.
Args:
stft_input_shape (tuple): Input shape of single waveform.
Must specify this if the returned stft layer is going to be used as first layer of a Sequential model.
istft_input_shape (tuple): Input shape of single STFT.
Must specify this if the returned istft layer is going to be used as first layer of a Sequential model.
n_fft (int): Number of FFTs. Defaults to `2048`
win_length (`int` or `None`): Window length in sample. Defaults to `n_fft`.
hop_length (`int` or `None`): Hop length in sample between analysis windows. Defaults to `n_fft // 4` following librosa.
forward_window_name (function or `None`): *Name* of `tf.signal` function that returns a 1D tensor window that is used.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
waveform_data_format (str): The audio data format of waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
stft_data_format (str): The data format of STFT.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
stft_name (str): name of the returned STFT layer
istft_name (str): name of the returned ISTFT layer
Note:
Without a careful setting, `tf.signal.stft` and `tf.signal.istft` is not perfectly reconstructing.
Note:
Imagine `x` --> `STFT` --> `InverseSTFT` --> `y`.
The length of `x` will be longer than `y` due to the padding at the beginning and the end.
To compare them, you would need to trim `y` along time axis.
The formula: if `trim_begin = win_length - hop_length` and `len_signal` is length of `x`,
`y_trimmed = y[trim_begin: trim_begin + len_signal, :]` (in the case of `channels_last`).
Example:
::
stft_input_shape = (2048, 2) # stereo and channels_last
stft_layer, istft_layer = get_perfectly_reconstructing_stft_istft(
stft_input_shape=stft_input_shape
)
unet = get_unet() input: stft (complex value), output: stft (complex value)
model = Sequential()
model.add(stft_layer) # input is waveform
model.add(unet)
model.add(istft_layer) # output is also waveform
|
get_perfectly_reconstructing_stft_istft
|
python
|
keunwoochoi/kapre
|
kapre/composed.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py
|
MIT
|
def get_stft_mag_phase(
input_shape,
n_fft=2048,
win_length=None,
hop_length=None,
window_name=None,
pad_begin=False,
pad_end=False,
return_decibel=False,
db_amin=1e-5,
db_ref_value=1.0,
db_dynamic_range=80.0,
input_data_format='default',
output_data_format='default',
name='stft_mag_phase',
):
"""A function that returns magnitude and phase of input audio.
Args:
input_shape (None or tuple of integers): input shape of the stft layer.
Because this mag_phase is based on keras.Functional model, it is required to specify the input shape.
E.g., (44100, 2) for 44100-sample stereo audio with `input_data_format=='channels_last'`.
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`
.pad_begin(bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output mel spectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Example:
::
input_shape = (2048, 3) # stereo and channels_last
model = Sequential()
model.add(
get_stft_mag_phase(input_shape=input_shape, return_decibel=True, n_fft=1024)
)
# now output shape is (batch, n_frame=3, freq=513, ch=6). 6 channels = [3 mag ch; 3 phase ch]
"""
backend.validate_data_format_str(input_data_format)
backend.validate_data_format_str(output_data_format)
waveform_to_stft = STFT(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=window_name,
pad_begin=pad_begin,
pad_end=pad_end,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
stft_to_stftm = Magnitude()
stft_to_stftp = Phase()
waveforms = keras.Input(shape=input_shape)
stfts = waveform_to_stft(waveforms)
mag_stfts = stft_to_stftm(stfts) # magnitude
phase_stfts = stft_to_stftp(stfts) # phase
if return_decibel:
mag_to_decibel = MagnitudeToDecibel(
ref_value=db_ref_value, amin=db_amin, dynamic_range=db_dynamic_range
)
mag_stfts = mag_to_decibel(mag_stfts)
ch_axis = 1 if output_data_format == _CH_FIRST_STR else 3
concat_layer = keras.layers.Concatenate(axis=ch_axis)
stfts_mag_phase = concat_layer([mag_stfts, phase_stfts])
model = Model(inputs=waveforms, outputs=stfts_mag_phase, name=name)
return model
|
A function that returns magnitude and phase of input audio.
Args:
input_shape (None or tuple of integers): input shape of the stft layer.
Because this mag_phase is based on keras.Functional model, it is required to specify the input shape.
E.g., (44100, 2) for 44100-sample stereo audio with `input_data_format=='channels_last'`.
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`
.pad_begin(bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output mel spectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Example:
::
input_shape = (2048, 3) # stereo and channels_last
model = Sequential()
model.add(
get_stft_mag_phase(input_shape=input_shape, return_decibel=True, n_fft=1024)
)
# now output shape is (batch, n_frame=3, freq=513, ch=6). 6 channels = [3 mag ch; 3 phase ch]
|
get_stft_mag_phase
|
python
|
keunwoochoi/kapre
|
kapre/composed.py
|
https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.