code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
## Amazon S3 manager
## Author: Michal Ludvig <michal@logix.cz>
## http://www.logix.cz/michal
## License: GPL Version 2
## Copyright: TGRMN Software and contributors
from __future__ import absolute_import, division
import sys
import datetime
import time
import S3.Utils
class Progress(object):
_stdout = sys.stdout
_last_display = 0
def __init__(self, labels, total_size):
self._stdout = sys.stdout
self.new_file(labels, total_size)
def new_file(self, labels, total_size):
self.labels = labels
self.total_size = total_size
# Set initial_position to something in the
# case we're not counting from 0. For instance
# when appending to a partially downloaded file.
# Setting initial_position will let the speed
# be computed right.
self.initial_position = 0
self.current_position = self.initial_position
self.time_start = datetime.datetime.now()
self.time_last = self.time_start
self.time_current = self.time_start
self.display(new_file = True)
def update(self, current_position = -1, delta_position = -1):
self.time_last = self.time_current
self.time_current = datetime.datetime.now()
if current_position > -1:
self.current_position = current_position
elif delta_position > -1:
self.current_position += delta_position
#else:
# no update, just call display()
self.display()
def done(self, message):
self.display(done_message = message)
def output_labels(self):
self._stdout.write(u"%(action)s: '%(source)s' -> '%(destination)s' %(extra)s\n" % self.labels)
self._stdout.flush()
def _display_needed(self):
# We only need to update the display every so often.
if time.time() - self._last_display > 1:
self._last_display = time.time()
return True
return False
def display(self, new_file = False, done_message = None):
"""
display(new_file = False[/True], done = False[/True])
Override this method to provide a nicer output.
"""
if new_file:
self.output_labels()
self.last_milestone = 0
return
if self.current_position == self.total_size:
print_size = S3.Utils.formatSize(self.current_position, True)
if print_size[1] != "": print_size[1] += "B"
timedelta = self.time_current - self.time_start
sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds) / 1000000.0
print_speed = S3.Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
self._stdout.write("100%% %s%s in %.2fs (%.2f %sB/s)\n" %
(print_size[0], print_size[1], sec_elapsed, print_speed[0], print_speed[1]))
self._stdout.flush()
return
rel_position = (self.current_position * 100) // self.total_size
if rel_position >= self.last_milestone:
# Move by increments of 5.
# NOTE: to check: Looks like to not do what is looks like to be designed to do
self.last_milestone = (rel_position // 5) * 5
self._stdout.write("%d%% ", self.last_milestone)
self._stdout.flush()
return
class ProgressANSI(Progress):
## http://en.wikipedia.org/wiki/ANSI_escape_code
SCI = '\x1b['
ANSI_hide_cursor = SCI + "?25l"
ANSI_show_cursor = SCI + "?25h"
ANSI_save_cursor_pos = SCI + "s"
ANSI_restore_cursor_pos = SCI + "u"
ANSI_move_cursor_to_column = SCI + "%uG"
ANSI_erase_to_eol = SCI + "0K"
ANSI_erase_current_line = SCI + "2K"
def display(self, new_file = False, done_message = None):
"""
display(new_file = False[/True], done_message = None)
"""
if new_file:
self.output_labels()
self._stdout.write(self.ANSI_save_cursor_pos)
self._stdout.flush()
return
# Only display progress every so often
if not (new_file or done_message) and not self._display_needed():
return
timedelta = self.time_current - self.time_start
sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
if (sec_elapsed > 0):
print_speed = S3.Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
else:
print_speed = (0, "")
self._stdout.write(self.ANSI_restore_cursor_pos)
self._stdout.write(self.ANSI_erase_to_eol)
self._stdout.write("%(current)s of %(total)s %(percent)3d%% in %(elapsed)ds %(speed).2f %(speed_coeff)sB/s" % {
"current" : str(self.current_position).rjust(len(str(self.total_size))),
"total" : self.total_size,
"percent" : self.total_size and ((self.current_position * 100) // self.total_size) or 0,
"elapsed" : sec_elapsed,
"speed" : print_speed[0],
"speed_coeff" : print_speed[1]
})
if done_message:
self._stdout.write(" %s\n" % done_message)
self._stdout.flush()
class ProgressCR(Progress):
## Uses CR char (Carriage Return) just like other progress bars do.
CR_char = chr(13)
def display(self, new_file = False, done_message = None):
"""
display(new_file = False[/True], done_message = None)
"""
if new_file:
self.output_labels()
return
# Only display progress every so often
if not (new_file or done_message) and not self._display_needed():
return
timedelta = self.time_current - self.time_start
sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
if (sec_elapsed > 0):
print_speed = S3.Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
else:
print_speed = (0, "")
self._stdout.write(self.CR_char)
output = " %(current)s of %(total)s %(percent)3d%% in %(elapsed)4ds %(speed)7.2f %(speed_coeff)sB/s" % {
"current" : str(self.current_position).rjust(len(str(self.total_size))),
"total" : self.total_size,
"percent" : self.total_size and ((self.current_position * 100) // self.total_size) or 0,
"elapsed" : sec_elapsed,
"speed" : print_speed[0],
"speed_coeff" : print_speed[1]
}
self._stdout.write(output)
if done_message:
self._stdout.write(" %s\n" % done_message)
self._stdout.flush()
class StatsInfo(object):
"""Holding info for stats totals"""
def __init__(self):
self.files = None
self.size = None
self.files_transferred = None
self.size_transferred = None
self.files_copied = None
self.size_copied = None
self.files_deleted = None
self.size_deleted = None
def format_output(self):
outstr = u""
if self.files is not None:
tmp_str = u"Number of files: %d"% self.files
if self.size is not None:
tmp_str += " (%d bytes) "% self.size
outstr += u"\nStats: " + tmp_str
if self.files_transferred:
tmp_str = u"Number of files transferred: %d"% self.files_transferred
if self.size_transferred is not None:
tmp_str += " (%d bytes) "% self.size_transferred
outstr += u"\nStats: " + tmp_str
if self.files_copied:
tmp_str = u"Number of files copied: %d"% self.files_copied
if self.size_copied is not None:
tmp_str += " (%d bytes) "% self.size_copied
outstr += u"\nStats: " + tmp_str
if self.files_deleted:
tmp_str = u"Number of files deleted: %d"% self.files_deleted
if self.size_deleted is not None:
tmp_str += " (%d bytes) "% self.size_deleted
outstr += u"\nStats: " + tmp_str
return outstr
# vim:et:ts=4:sts=4:ai | /s3cmd-2.3.0.tar.gz/s3cmd-2.3.0/S3/Progress.py | 0.456894 | 0.184988 | Progress.py | pypi |
## Amazon S3 manager
## Author: Michal Ludvig <michal@logix.cz>
## http://www.logix.cz/michal
## License: GPL Version 2
## Copyright: TGRMN Software and contributors
from __future__ import absolute_import
import sys
import hmac
try:
from base64 import encodebytes as encodestring
except ImportError:
# Python 2 support
from base64 import encodestring
from . import Config
from logging import debug
from .BaseUtils import encode_to_s3, decode_from_s3, s3_quote
from .Utils import time_to_epoch, deunicodise, check_bucket_name_dns_support
from .SortedDict import SortedDict
import datetime
from hashlib import sha1, sha256
__all__ = []
def format_param_str(params, always_have_equal=False, limited_keys=None):
"""
Format URL parameters from a params dict and returns
?parm1=val1&parm2=val2 or an empty string if there
are no parameters. Output of this function should
be appended directly to self.resource['uri']
- Set "always_have_equal" to always have the "=" char for a param even when
there is no value for it.
- Set "limited_keys" list to restrict the param string to keys that are
defined in it.
"""
if not params:
return ""
param_str = ""
equal_str = always_have_equal and u'=' or ''
for key in sorted(params.keys()):
if limited_keys and key not in limited_keys:
continue
value = params[key]
if value in (None, ""):
param_str += "&%s%s" % (s3_quote(key, unicode_output=True), equal_str)
else:
param_str += "&%s=%s" % (key, s3_quote(params[key], unicode_output=True))
return param_str and "?" + param_str[1:]
__all__.append("format_param_str")
### AWS Version 2 signing
def sign_string_v2(string_to_sign):
"""Sign a string with the secret key, returning base64 encoded results.
By default the configured secret key is used, but may be overridden as
an argument.
Useful for REST authentication. See http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
string_to_sign should be utf-8 "bytes".
and returned signature will be utf-8 encoded "bytes".
"""
secret_key = Config.Config().secret_key
signature = encodestring(hmac.new(encode_to_s3(secret_key), string_to_sign, sha1).digest()).strip()
return signature
__all__.append("sign_string_v2")
def sign_request_v2(method='GET', canonical_uri='/', params=None, cur_headers=None):
"""Sign a string with the secret key, returning base64 encoded results.
By default the configured secret key is used, but may be overridden as
an argument.
Useful for REST authentication. See http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
string_to_sign should be utf-8 "bytes".
"""
# valid sub-resources to be included in sign v2:
SUBRESOURCES_TO_INCLUDE = ['acl', 'lifecycle', 'location', 'logging',
'notification', 'partNumber', 'policy',
'requestPayment', 'torrent', 'uploadId',
'uploads', 'versionId', 'versioning',
'versions', 'website',
# Missing of aws s3 doc but needed
'delete', 'cors', 'restore']
if cur_headers is None:
cur_headers = SortedDict(ignore_case = True)
access_key = Config.Config().access_key
string_to_sign = method + "\n"
string_to_sign += cur_headers.get("content-md5", "") + "\n"
string_to_sign += cur_headers.get("content-type", "") + "\n"
string_to_sign += cur_headers.get("date", "") + "\n"
for header in sorted(cur_headers.keys()):
if header.startswith("x-amz-"):
string_to_sign += header + ":" + cur_headers[header] + "\n"
if header.startswith("x-emc-"):
string_to_sign += header + ":"+ cur_headers[header] + "\n"
canonical_uri = s3_quote(canonical_uri, quote_backslashes=False, unicode_output=True)
canonical_querystring = format_param_str(params, limited_keys=SUBRESOURCES_TO_INCLUDE)
# canonical_querystring would be empty if no param given, otherwise it will
# starts with a "?"
canonical_uri += canonical_querystring
string_to_sign += canonical_uri
debug("SignHeaders: " + repr(string_to_sign))
signature = decode_from_s3(sign_string_v2(encode_to_s3(string_to_sign)))
new_headers = SortedDict(list(cur_headers.items()), ignore_case=True)
new_headers["Authorization"] = "AWS " + access_key + ":" + signature
return new_headers
__all__.append("sign_request_v2")
def sign_url_v2(url_to_sign, expiry):
"""Sign a URL in s3://bucket/object form with the given expiry
time. The object will be accessible via the signed URL until the
AWS key and secret are revoked or the expiry time is reached, even
if the object is otherwise private.
See: http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
"""
return sign_url_base_v2(
bucket = url_to_sign.bucket(),
object = url_to_sign.object(),
expiry = expiry
)
__all__.append("sign_url_v2")
def sign_url_base_v2(**parms):
"""Shared implementation of sign_url methods. Takes a hash of 'bucket', 'object' and 'expiry' as args."""
content_disposition=Config.Config().content_disposition
content_type=Config.Config().content_type
parms['expiry']=time_to_epoch(parms['expiry'])
parms['access_key']=Config.Config().access_key
parms['host_base']=Config.Config().host_base
parms['object'] = s3_quote(parms['object'], quote_backslashes=False, unicode_output=True)
parms['proto'] = 'http'
if Config.Config().signurl_use_https:
parms['proto'] = 'https'
debug("Expiry interpreted as epoch time %s", parms['expiry'])
signtext = 'GET\n\n\n%(expiry)d\n/%(bucket)s/%(object)s' % parms
param_separator = '?'
if content_disposition:
signtext += param_separator + 'response-content-disposition=' + content_disposition
param_separator = '&'
if content_type:
signtext += param_separator + 'response-content-type=' + content_type
param_separator = '&'
debug("Signing plaintext: %r", signtext)
parms['sig'] = s3_quote(sign_string_v2(encode_to_s3(signtext)), unicode_output=True)
debug("Urlencoded signature: %s", parms['sig'])
if check_bucket_name_dns_support(Config.Config().host_bucket, parms['bucket']):
url = "%(proto)s://%(bucket)s.%(host_base)s/%(object)s"
else:
url = "%(proto)s://%(host_base)s/%(bucket)s/%(object)s"
url += "?AWSAccessKeyId=%(access_key)s&Expires=%(expiry)d&Signature=%(sig)s"
url = url % parms
if content_disposition:
url += "&response-content-disposition=" + s3_quote(content_disposition, unicode_output=True)
if content_type:
url += "&response-content-type=" + s3_quote(content_type, unicode_output=True)
return url
def sign(key, msg):
return hmac.new(key, encode_to_s3(msg), sha256).digest()
def getSignatureKey(key, dateStamp, regionName, serviceName):
"""
Input: unicode params
Output: bytes
"""
kDate = sign(encode_to_s3('AWS4' + key), dateStamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
def sign_request_v4(method='GET', host='', canonical_uri='/', params=None,
region='us-east-1', cur_headers=None, body=b''):
service = 's3'
if cur_headers is None:
cur_headers = SortedDict(ignore_case = True)
cfg = Config.Config()
access_key = cfg.access_key
secret_key = cfg.secret_key
t = datetime.datetime.utcnow()
amzdate = t.strftime('%Y%m%dT%H%M%SZ')
datestamp = t.strftime('%Y%m%d')
signing_key = getSignatureKey(secret_key, datestamp, region, service)
canonical_uri = s3_quote(canonical_uri, quote_backslashes=False, unicode_output=True)
canonical_querystring = format_param_str(params, always_have_equal=True).lstrip('?')
if type(body) == type(sha256(b'')):
payload_hash = decode_from_s3(body.hexdigest())
else:
payload_hash = decode_from_s3(sha256(encode_to_s3(body)).hexdigest())
canonical_headers = {'host' : host,
'x-amz-content-sha256': payload_hash,
'x-amz-date' : amzdate
}
signed_headers = 'host;x-amz-content-sha256;x-amz-date'
for header in cur_headers.keys():
# avoid duplicate headers and previous Authorization
if header == 'Authorization' or header in signed_headers.split(';'):
continue
canonical_headers[header.strip()] = cur_headers[header].strip()
signed_headers += ';' + header.strip()
# sort headers into a string
canonical_headers_str = ''
for k, v in sorted(canonical_headers.items()):
canonical_headers_str += k + ":" + v + "\n"
canonical_headers = canonical_headers_str
debug(u"canonical_headers = %s" % canonical_headers)
signed_headers = ';'.join(sorted(signed_headers.split(';')))
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
debug('Canonical Request:\n%s\n----------------------' % canonical_request)
algorithm = 'AWS4-HMAC-SHA256'
credential_scope = datestamp + '/' + region + '/' + service + '/' + 'aws4_request'
string_to_sign = algorithm + '\n' + amzdate + '\n' + credential_scope + '\n' + decode_from_s3(sha256(encode_to_s3(canonical_request)).hexdigest())
signature = decode_from_s3(hmac.new(signing_key, encode_to_s3(string_to_sign), sha256).hexdigest())
authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope + ',' + 'SignedHeaders=' + signed_headers + ',' + 'Signature=' + signature
new_headers = SortedDict(cur_headers.items())
new_headers.update({'x-amz-date':amzdate,
'Authorization':authorization_header,
'x-amz-content-sha256': payload_hash})
debug("signature-v4 headers: %s" % new_headers)
return new_headers
__all__.append("sign_request_v4")
def checksum_sha256_file(filename, offset=0, size=None):
try:
hash = sha256()
except Exception:
# fallback to Crypto SHA256 module
hash = sha256.new()
with open(deunicodise(filename),'rb') as f:
if size is None:
for chunk in iter(lambda: f.read(8192), b''):
hash.update(chunk)
else:
f.seek(offset)
size_left = size
while size_left > 0:
chunk = f.read(min(8192, size_left))
if not chunk:
break
size_left -= len(chunk)
hash.update(chunk)
return hash
def checksum_sha256_buffer(buffer, offset=0, size=None):
try:
hash = sha256()
except Exception:
# fallback to Crypto SHA256 module
hash = sha256.new()
if size is None:
hash.update(buffer)
else:
hash.update(buffer[offset:offset+size])
return hash | /s3cmd-2.3.0.tar.gz/s3cmd-2.3.0/S3/Crypto.py | 0.590543 | 0.160365 | Crypto.py | pypi |
## Amazon S3 - Access Control List representation
## Author: Michal Ludvig <michal@logix.cz>
## http://www.logix.cz/michal
## License: GPL Version 2
## Copyright: TGRMN Software and contributors
from __future__ import absolute_import, print_function
import sys
from . import S3Uri
from .Exceptions import ParameterError
from .BaseUtils import getTreeFromXml, decode_from_s3
from .ACL import GranteeAnonRead
try:
import xml.etree.ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
PY3 = (sys.version_info >= (3,0))
__all__ = []
class AccessLog(object):
LOG_DISABLED = "<BucketLoggingStatus></BucketLoggingStatus>"
LOG_TEMPLATE = "<LoggingEnabled><TargetBucket></TargetBucket><TargetPrefix></TargetPrefix></LoggingEnabled>"
def __init__(self, xml = None):
if not xml:
xml = self.LOG_DISABLED
self.tree = getTreeFromXml(xml)
self.tree.attrib['xmlns'] = "http://doc.s3.amazonaws.com/2006-03-01"
def isLoggingEnabled(self):
return (self.tree.find(".//LoggingEnabled") is not None)
def disableLogging(self):
el = self.tree.find(".//LoggingEnabled")
if el:
self.tree.remove(el)
def enableLogging(self, target_prefix_uri):
el = self.tree.find(".//LoggingEnabled")
if not el:
el = getTreeFromXml(self.LOG_TEMPLATE)
self.tree.append(el)
el.find(".//TargetBucket").text = target_prefix_uri.bucket()
el.find(".//TargetPrefix").text = target_prefix_uri.object()
def targetPrefix(self):
if self.isLoggingEnabled():
target_prefix = u"s3://%s/%s" % (
self.tree.find(".//LoggingEnabled//TargetBucket").text,
self.tree.find(".//LoggingEnabled//TargetPrefix").text)
return S3Uri.S3Uri(target_prefix)
else:
return ""
def setAclPublic(self, acl_public):
le = self.tree.find(".//LoggingEnabled")
if le is None:
raise ParameterError("Logging not enabled, can't set default ACL for logs")
tg = le.find(".//TargetGrants")
if not acl_public:
if not tg:
## All good, it's not been there
return
else:
le.remove(tg)
else: # acl_public == True
anon_read = GranteeAnonRead().getElement()
if not tg:
tg = ET.SubElement(le, "TargetGrants")
## What if TargetGrants already exists? We should check if
## AnonRead is there before appending a new one. Later...
tg.append(anon_read)
def isAclPublic(self):
raise NotImplementedError()
def __unicode__(self):
return decode_from_s3(ET.tostring(self.tree))
def __str__(self):
if PY3:
# Return unicode
return ET.tostring(self.tree, encoding="unicode")
else:
# Return bytes
return ET.tostring(self.tree)
__all__.append("AccessLog")
if __name__ == "__main__":
log = AccessLog()
print(log)
log.enableLogging(S3Uri.S3Uri(u"s3://targetbucket/prefix/log-"))
print(log)
log.setAclPublic(True)
print(log)
log.setAclPublic(False)
print(log)
log.disableLogging()
print(log)
# vim:et:ts=4:sts=4:ai | /s3cmd-2.3.0.tar.gz/s3cmd-2.3.0/S3/AccessLog.py | 0.523177 | 0.154983 | AccessLog.py | pypi |
import json
from urllib.parse import urlparse
from traitlets import Any
from s3contents.genericmanager import GenericContentsManager, from_dict
from s3contents.ipycompat import Bool, Unicode
from s3contents.s3_fs import S3FS
class S3ContentsManager(GenericContentsManager):
access_key_id = Unicode(
help="S3/AWS access key ID", allow_none=True, default_value=None
).tag(config=True, env="JPY_S3_ACCESS_KEY_ID")
anon = Bool(help="S3/AWS session token", default_value=False).tag(
config=True, env="JPY_S3_ANON"
)
boto3_session = Any(
help="Place to store custom boto3 session (passed to S3_FS) - could be set by init_s3_hook"
)
bucket = Unicode("notebooks", help="Bucket name to store notebooks").tag(
config=True, env="JPY_S3_BUCKET"
)
delimiter = Unicode("/", help="Path delimiter").tag(config=True)
endpoint_url = Unicode(
"https://s3.amazonaws.com", help="S3 endpoint URL"
).tag(config=True, env="JPY_S3_ENDPOINT_URL")
kms_key_id = Unicode(help="KMS ID to use to encrypt workbooks").tag(
config=True, env="JPY_S3_KMS_KEY_ID"
)
init_s3_hook = Any(help="optional hook for init'ing s3").tag(config=True)
prefix = Unicode("", help="Prefix path inside the specified bucket").tag(
config=True, env="JPY_S3_PREFIX"
)
region_name = Unicode("us-east-1", help="Region name").tag(
config=True, env="JPY_S3_REGION_NAME"
)
secret_access_key = Unicode(
help="S3/AWS secret access key", allow_none=True, default_value=None
).tag(config=True, env="JPY_S3_SECRET_ACCESS_KEY")
session_token = Unicode(
help="S3/AWS session token", allow_none=True, default_value=None
).tag(config=True, env="JPY_S3_SESSION_TOKEN")
signature_version = Unicode(help="Signature Version").tag(
config=True, env="JPY_S3_SIGNATURE_VERSION"
)
sse = Unicode(help="Type of server-side encryption to use").tag(
config=True, env="JPY_S3_SSE"
)
s3fs_additional_kwargs = Any(
help="optional dictionary to be appended to s3fs additional kwargs"
).tag(config=True)
def __init__(self, *args, **kwargs):
super(S3ContentsManager, self).__init__(*args, **kwargs)
self.run_init_s3_hook()
self.bucket = validate_bucket(self.bucket, self.log)
self._fs = S3FS(
access_key_id=self.access_key_id,
anon=self.anon,
boto3_session=self.boto3_session,
bucket=self.bucket,
delimiter=self.delimiter,
endpoint_url=self.endpoint_url,
kms_key_id=self.kms_key_id,
log=self.log,
prefix=self.prefix,
region_name=self.region_name,
secret_access_key=self.secret_access_key,
session_token=self.session_token,
signature_version=self.signature_version,
sse=self.sse,
s3fs_additional_kwargs=self.s3fs_additional_kwargs,
)
def run_init_s3_hook(self):
if self.init_s3_hook is not None:
self.init_s3_hook(self)
def _save_notebook(self, model, path):
nb_contents = from_dict(model["content"])
self.check_and_sign(nb_contents, path)
file_contents = json.dumps(model["content"])
self.fs.writenotebook(path, file_contents)
self.validate_notebook_model(model)
return model.get("message")
def validate_bucket(user_bucket, log):
"""Helper function to strip off schemas and keys from the bucket name
Parameters
----------
user_bucket : str
The bucket that the user provided in their jupyter_notebook_config.py
log :
The logger hanging off of GenericContentsManager
Returns
-------
str
The properly parsed bucket out of `user_bucket`
Raises
------
ValueError
* When I'm not sure how to parse out a bucket from the provided input
* When the user provides an empty bucket
"""
if not user_bucket:
raise ValueError(
f"user_bucket function argument is empty: {user_bucket}"
)
log.debug(
f"s3manager.validate_bucket: User provided bucket: {user_bucket}"
)
res = urlparse(user_bucket)
scheme, netloc, path, params, query, fragment = res
if netloc:
bucket = netloc
log.warning(
"s3manager.validate_bucket: "
f"Assuming you meant {bucket} for your bucket. "
f"Using that. Please set bucket={bucket} "
"in your jupyter_notebook_config.py file"
)
return bucket
if scheme or netloc or params or query or fragment:
log.error(
"s3manager.validate_bucket: "
f"Invalid bucket specification: {res}"
)
raise ValueError(f"Invalid bucket specification: {res}")
bucket = path
if "/" not in bucket:
return bucket
bucket, key = bucket.split("/", maxsplit=1)
log.warning(
"s3manager.validate_bucket: "
f"Assuming you meant {bucket} for your bucket name. Don't "
f"include '/' in your bucket name. Removing /{key} "
f"from your bucket name. Please set bucket={bucket} "
"in your jupyter_notebook_config.py file"
)
return bucket
if __name__ == "__main__":
import sys
from jupyterlab.labapp import main
sys.exit(main()) | /s3content-0.8.1.tar.gz/s3content-0.8.1/s3contents/s3manager.py | 0.574992 | 0.150778 | s3manager.py | pypi |
import base64
import os
import gcsfs
from tornado.web import HTTPError
from s3contents.genericfs import GenericFS, NoSuchFile
from s3contents.ipycompat import Unicode
class GCSFS(GenericFS):
project = Unicode(
help="GCP Project", allow_none=True, default_value=None
).tag(config=True, env="JPYNB_GCS_PROJECT")
token = Unicode(
help="Path to the GCP token", allow_none=True, default_value=None
).tag(config=True, env="JPYNB_GCS_TOKEN_PATH")
region_name = Unicode("us-east-1", help="Region name").tag(
config=True, env="JPYNB_GCS_REGION_NAME"
)
bucket = Unicode("notebooks", help="Bucket name to store notebooks").tag(
config=True, env="JPYNB_GCS_BUCKET"
)
prefix = Unicode("", help="Prefix path inside the specified bucket").tag(
config=True
)
separator = Unicode("/", help="Path separator").tag(config=True)
dir_keep_file = Unicode(
".gcskeep", help="Empty file to create when creating directories"
).tag(config=True)
def __init__(self, log, **kwargs):
super(GCSFS, self).__init__(**kwargs)
self.log = log
token = self.token
if token:
token = os.path.expanduser(self.token)
self.fs = gcsfs.GCSFileSystem(project=self.project, token=token)
self.init()
def init(self):
self.mkdir("")
self.ls("")
assert self.isdir(""), "The root directory should exists"
# GenericFS methods -----------------------------------------------------------------------------------------------
def ls(self, path):
path_ = self.path(path)
self.log.debug("S3contents.GCSFS: Listing directory: `%s`", path_)
files = self.fs.ls(path_)
return self.remove_prefix(files)
def isfile(self, path):
path_ = self.path(path)
is_file = False
exists = self.fs.exists(path_)
if not exists:
is_file = False
else:
try:
is_file = self.fs.info(path_)["type"] == "file"
except FileNotFoundError:
pass
self.log.debug("S3contents.GCSFS: `%s` is a file: %s", path_, is_file)
return is_file
def isdir(self, path):
# GCSFS doesnt return exists=True for a directory with no files so
# we need to check if the dir_keep_file exists
is_dir = self.isfile(path + self.separator + self.dir_keep_file)
path_ = self.path(path)
self.log.debug(
"S3contents.GCSFS: `%s` is a directory: %s", path_, is_dir
)
return is_dir
def mv(self, old_path, new_path):
self.log.debug(
"S3contents.GCSFS: Move file `%s` to `%s`", old_path, new_path
)
self.cp(old_path, new_path)
self.rm(old_path)
def cp(self, old_path, new_path):
old_path_, new_path_ = self.path(old_path), self.path(new_path)
self.log.debug(
"S3contents.GCSFS: Coping `%s` to `%s`", old_path_, new_path_
)
if self.isdir(old_path):
old_dir_path, new_dir_path = old_path, new_path
for obj in self.ls(old_dir_path):
old_item_path = obj
new_item_path = old_item_path.replace(
old_dir_path, new_dir_path, 1
)
self.cp(old_item_path, new_item_path)
elif self.isfile(old_path):
self.fs.copy(old_path_, new_path_)
def rm(self, path):
path_ = self.path(path)
self.log.debug("S3contents.GCSFS: Removing: `%s`", path_)
if self.isfile(path):
self.log.debug("S3contents.GCSFS: Removing file: `%s`", path_)
self.fs.rm(path_)
elif self.isdir(path):
self.log.debug("S3contents.GCSFS: Removing directory: `%s`", path_)
dirs = self.fs.walk(path_)
for dir in dirs:
for file in dir[2]:
self.fs.rm(dir[0] + self.separator + file)
def mkdir(self, path):
path_ = self.path(path, self.dir_keep_file)
self.log.debug("S3contents.GCSFS: Making dir (touch): `%s`", path_)
self.fs.touch(path_)
def read(self, path, format):
path_ = self.path(path)
if not self.isfile(path):
raise NoSuchFile(path_)
with self.fs.open(path_, mode="rb") as f:
content = f.read()
if format == "base64":
return base64.b64encode(content).decode("ascii"), "base64"
else:
# Try to interpret as unicode if format is unknown or if unicode
# was explicitly requested.
try:
return content.decode("utf-8"), "text"
except UnicodeError:
if format == "text":
err = "{} is not UTF-8 encoded".format(path_)
self.log.error(err)
raise HTTPError(400, err, reason="bad format")
def lstat(self, path):
path_ = self.path(path)
info = self.fs.info(path_)
ret = {}
if "updated" in info:
ret["ST_MTIME"] = info["updated"]
return ret
def write(self, path, content, format):
path_ = self.path(self.remove_prefix(path))
self.log.debug("S3contents.GCSFS: Writing file: `%s`", path_)
with self.fs.open(path_, mode="wb") as f:
if format == "base64":
b64_bytes = content.encode("ascii")
content_ = base64.b64decode(b64_bytes)
else:
content_ = content.encode("utf8")
f.write(content_)
# Utilities -------------------------------------------------------------------------------------------------------
def strip(self, path):
if isinstance(path, str):
return path.strip(self.separator)
if isinstance(path, (list, tuple)):
return list(map(self.strip, path))
def join(self, *paths):
paths = self.strip(paths)
return self.separator.join(paths)
def get_prefix(self):
"""Full prefix: bucket + optional prefix"""
prefix = self.bucket
if self.prefix:
prefix += self.separator + self.prefix
return prefix
prefix_ = property(get_prefix)
def remove_prefix(self, path):
"""Remove the self.prefix_ (if present) from a path or list of paths"""
path = self.strip(path)
if isinstance(path, str):
path = (
path[len(self.prefix_) :]
if path.startswith(self.prefix_)
else path
)
path = path[1:] if path.startswith(self.separator) else path
return path
if isinstance(path, (list, tuple)):
path = [
p[len(self.prefix_) :] if p.startswith(self.prefix_) else p
for p in path
]
path = [p[1:] if p.startswith(self.separator) else p for p in path]
return path
def path(self, *path):
"""Utility to join paths including the bucket and prefix"""
path = list(filter(None, path))
path = self.remove_prefix(path)
items = [self.prefix_] + path
return self.join(*items) | /s3content-0.8.1.tar.gz/s3content-0.8.1/s3contents/gcs/gcs_fs.py | 0.499756 | 0.175786 | gcs_fs.py | pypi |
KEY = {
"up": -204,
"down": -206,
"left": -203,
"right": -205,
"shiftUp": 337,
"shiftDown": 336,
"enter": 10,
"space": 32,
"tab": -301,
"shiftTab": -302,
"backspace": -300,
"esc": -1,
"altC": 231,
":": ord(":"),
"?": ord("?"),
}
KEYLIST = {
"move": [
KEY["up"],
KEY["down"],
KEY["shiftUp"],
KEY["shiftDown"],
KEY["esc"],
KEY["left"],
KEY["right"],
KEY["enter"],
],
"number": range(48, 58),
"y": [ord("Y"), ord("y")],
"n": [ord("N"), ord("n")],
}
class KeyHandler:
@staticmethod
def check(key_code, CURRENT):
sect = CURRENT["section"]
if CURRENT["layer"] == "confirm":
if key_code == KEY["left"] or key_code in KEYLIST["y"]:
CURRENT["action"] = "yes"
elif key_code == KEY["right"] or key_code in KEYLIST["n"]:
CURRENT["action"] = "no"
elif key_code == KEY["enter"]:
CURRENT["action"] = "confirm"
elif key_code == KEY["esc"]:
CURRENT["action"] = "cancel"
return
if key_code == KEY["esc"]:
CURRENT["action"] = "esc"
return
if key_code in [KEY["down"], KEY["space"]]:
if key_code == KEY["space"]:
CURRENT["action"] = "select"
CURRENT["kwargs"] = {"row": CURRENT["selectedRow"][sect]}
CURRENT["selectedRow"][sect] += 1
if CURRENT["selectedRow"][sect] >= CURRENT["rows"][sect]:
if key_code == KEY["space"]:
CURRENT["selectedRow"][sect] -= 1
else:
CURRENT["selectedRow"][sect] = 0
CURRENT["scrollTop"][sect] = 0
elif (
CURRENT["selectedRow"][sect] >= CURRENT["rowlimit"]
and CURRENT["selectedRow"][sect]
>= CURRENT["scrollTop"][sect] + CURRENT["rowlimit"]
):
CURRENT["scrollTop"][sect] += 1
elif key_code == KEY["up"]:
CURRENT["selectedRow"][sect] -= 1
if CURRENT["selectedRow"][sect] < 0:
CURRENT["selectedRow"][sect] = CURRENT["rows"][sect] - 1
if CURRENT["rows"][sect] > CURRENT["rowlimit"]:
CURRENT["scrollTop"][sect] = (
CURRENT["rows"][sect] - CURRENT["rowlimit"]
)
if (
CURRENT["rows"][sect] > CURRENT["rowlimit"]
and CURRENT["selectedRow"][sect] < CURRENT["scrollTop"][sect]
):
CURRENT["scrollTop"][sect] = CURRENT["selectedRow"][sect]
elif key_code == KEY["shiftDown"]:
CURRENT["selectedRow"][sect] += 10
if CURRENT["selectedRow"][sect] >= CURRENT["rowlimit"]:
CURRENT["selectedRow"][sect] = 0
elif key_code == KEY["shiftUp"]:
CURRENT["selectedRow"][sect] -= 10
if CURRENT["selectedRow"][sect] < 0:
CURRENT["selectedRow"][sect] = CURRENT["rowlimit"] - 1
if key_code == KEY["enter"]:
CURRENT["action"] = "enter"
return
elif key_code == KEY["?"]:
CURRENT["action"] = "help"
return
elif key_code == KEY["altC"]:
CURRENT["action"] = "copy"
return
elif key_code in [KEY["tab"], KEY["shiftTab"], KEY["left"], KEY["right"]]:
CURRENT["section"] = "remote" if CURRENT["section"] == "local" else "local"
CURRENT["action"] = "tab"
return
if key_code in KEYLIST["move"]:
CURRENT["action"] = "move"
return | /s3dir-1.0.2.tar.gz/s3dir-1.0.2/s3dir_src/lib/keyhandler.py | 0.442155 | 0.208058 | keyhandler.py | pypi |
import errno
from contextlib import contextmanager
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
class FileExpired(IOError):
"""
Is raised, when the file content has been changed from a different process after
opening the file. Reading the file would lead to invalid or inconsistent output.
This can also be triggered by outdated file-information inside the directory cache.
In this case ``S3FileSystem.invalidate_cache`` can be used to force an update of
the file-information when opening the file.
"""
def __init__(self, filename: str, e_tag: str):
super().__init__(
errno.EBUSY,
"The remote file corresponding to filename %s and Etag %s no longer exists."
% (filename, e_tag),
)
def title_case(string):
"""
TitleCases a given string.
Parameters
----------
string : underscore separated string
"""
return "".join(x.capitalize() for x in string.split("_"))
class ParamKwargsHelper(object):
"""
Utility class to help extract the subset of keys that an s3 method is
actually using
Parameters
----------
s3 : boto S3FileSystem
"""
_kwarg_cache = {}
def __init__(self, s3):
self.s3 = s3
def _get_valid_keys(self, model_name):
if model_name not in self._kwarg_cache:
model = self.s3.meta.service_model.operation_model(model_name)
valid_keys = (
set(model.input_shape.members.keys())
if model.input_shape is not None
else set()
)
self._kwarg_cache[model_name] = valid_keys
return self._kwarg_cache[model_name]
def filter_dict(self, method_name, d):
model_name = title_case(method_name)
valid_keys = self._get_valid_keys(model_name)
if isinstance(d, SSEParams):
d = d.to_kwargs()
return {k: v for k, v in d.items() if k in valid_keys}
class SSEParams(object):
def __init__(
self,
server_side_encryption=None,
sse_customer_algorithm=None,
sse_customer_key=None,
sse_kms_key_id=None,
):
self.ServerSideEncryption = server_side_encryption
self.SSECustomerAlgorithm = sse_customer_algorithm
self.SSECustomerKey = sse_customer_key
self.SSEKMSKeyId = sse_kms_key_id
def to_kwargs(self):
return {k: v for k, v in self.__dict__.items() if v is not None}
def _get_brange(size, block):
"""
Chunk up a file into zero-based byte ranges
Parameters
----------
size : file size
block : block size
"""
for offset in range(0, size, block):
yield offset, min(offset + block - 1, size - 1) | /s3fs-py36-backport-0.6.0a1.tar.gz/s3fs-py36-backport-0.6.0a1/s3fs/utils.py | 0.69451 | 0.224501 | utils.py | pypi |
import logging
import requests
from io import BytesIO
from PIL import Image, ExifTags
from boto import s3
from boto.s3.key import Key
log = logging.getLogger(__name__)
class S3ImageResizerException(Exception):
pass
class InvalidParameterException(S3ImageResizerException):
pass
class CantFetchImageException(S3ImageResizerException):
pass
class RTFMException(S3ImageResizerException):
pass
class S3ImageResizer(object):
def __init__(self, s3_conn):
if not s3_conn or 'S3Connection' not in str(type(s3_conn)):
raise InvalidParameterException("Expecting an instance of boto s3 connection")
self.s3_conn = s3_conn
self.image = None
self.exif_tags = {}
def fetch(self, url):
"""Fetch an image and keep it in memory"""
assert url
log.debug("Fetching image at url %s" % url)
res = requests.get(url)
if res.status_code != 200:
raise CantFetchImageException("Failed to load image at url %s" % url)
image = Image.open(BytesIO(res.content))
# Fetch exif tags (if any)
if image._getexif():
tags = dict((ExifTags.TAGS[k].lower(), v) for k, v in list(image._getexif().items()) if k in ExifTags.TAGS)
self.exif_tags = tags
# Make sure Pillow does not ignore alpha channels during conversion
# See http://twigstechtips.blogspot.se/2011/12/python-converting-transparent-areas-in.html
image = image.convert("RGBA")
canvas = Image.new('RGBA', image.size, (255,255,255,255))
canvas.paste(image, mask=image)
self.image = canvas
return self
def orientate(self):
"""Apply exif orientation, if any"""
log.debug("Image has exif tags: %s" % self.exif_tags)
# No exif orientation?
if 'orientation' not in self.exif_tags:
log.info("No exif orientation known for this image")
return self
# If image has an exif rotation, apply it to the image prior to resizing
# See http://stackoverflow.com/questions/4228530/pil-thumbnail-is-rotating-my-image
angle = self.exif_tags['orientation']
log.info("Applying exif orientation %s to image" % angle)
angle_to_degrees = [
# orientation = transformation
lambda i: i,
lambda i: i.transpose(Image.FLIP_LEFT_RIGHT),
lambda i: i.transpose(Image.ROTATE_180),
lambda i: i.transpose(Image.FLIP_TOP_BOTTOM),
lambda i: i.transpose(Image.ROTATE_90).transpose(Image.FLIP_LEFT_RIGHT),
lambda i: i.transpose(Image.ROTATE_270),
lambda i: i.transpose(Image.ROTATE_90).transpose(Image.FLIP_TOP_BOTTOM),
lambda i: i.transpose(Image.ROTATE_90),
]
assert angle >= 1 and angle <= 8
f = angle_to_degrees[angle - 1]
self.image = f(self.image)
return self
def resize(self, width=None, height=None):
"""Resize the in-memory image previously fetched, and
return a clone of self holding the resized image"""
if not width and not height:
raise InvalidParameterException("One of width or height must be specified")
if not self.image:
raise RTFMException("No image loaded! You must call fetch() before resize()")
cur_width = self.image.width
cur_height = self.image.height
if width and height:
to_width = width
to_height = height
elif width:
to_width = width
to_height = int(cur_height * width / cur_width)
elif height:
to_width = int(cur_width * height / cur_height)
to_height = height
# Return a clone of self, loaded with the resized image
clone = S3ImageResizer(self.s3_conn)
log.info("Resizing image from (%s, %s) to (%s, %s)" % (cur_width, cur_height, to_width, to_height))
clone.image = self.image.resize((to_width, to_height), Image.ANTIALIAS)
return clone
def store(self, in_bucket=None, key_name=None, metadata=None, quality=95, public=True):
"""Store the loaded image into the given bucket with the given key name. Tag
it with metadata if provided. Make the Image public and return its url"""
if not in_bucket:
raise InvalidParameterException("No in_bucket specified")
if not key_name:
raise InvalidParameterException("No key_name specified")
if not self.image:
raise RTFMException("No image loaded! You must call fetch() before store()")
if metadata:
if type(metadata) is not dict:
raise RTFMException("metadata must be a dict")
else:
metadata = {}
metadata['Content-Type'] = 'image/jpeg'
log.info("Storing image into bucket %s/%s" % (in_bucket, key_name))
# Export image to a string
sio = BytesIO()
self.image.save(sio, 'JPEG', quality=quality, optimize=True)
contents = sio.getvalue()
sio.close()
# Get the bucket
bucket = self.s3_conn.get_bucket(in_bucket)
# Create a key containing the image. Make it public
k = Key(bucket)
k.key = key_name
k.set_contents_from_string(contents)
k.set_remote_metadata(metadata, {}, True)
if public:
k.set_acl('public-read')
# Return the key's url
return k.generate_url(
method='GET',
expires_in=0,
query_auth=False,
force_http=False
) | /s3imageresizer-0.0.28.tar.gz/s3imageresizer-0.0.28/s3imageresizer.py | 0.506103 | 0.198627 | s3imageresizer.py | pypi |
from contextlib import contextmanager
import re
from tempfile import NamedTemporaryFile
import boto
__all__ = [
'open',
'BucketNotFoundError',
'KeyNotFoundError',
'UrlParseError',
]
S3_URL_REGEX = '(?P<scheme>s3:\/\/)(?P<bucket>[^\/]+)\/(?P<key>.+)'
re_s3_url = re.compile(S3_URL_REGEX)
class UrlParseError(Exception):
pass
class KeyNotFoundError(Exception):
pass
class BucketNotFoundError(Exception):
pass
def url_split(s3_url):
"""Split S3 URL and return a tuple of (bucket, key).
S3 URL is expected to be of "s3://<bucket>/<key>" format.
"""
assert isinstance(s3_url, str)
match = re_s3_url.match(s3_url)
if not match:
raise UrlParseError('Error parsing S3 URL: "%s"' % s3_url)
parts = match.groupdict()
return (parts['bucket'], parts['key'])
@contextmanager
def open(s3_url, mode='r', s3_connection=None, **kwargs):
"""Open S3 url, returning a File Object.
S3 connection:
1. Can be specified directly by `s3_connection`.
2. `boto.connect_s3` will be used supplying all `kwargs`.
- `aws_access_key_id` and `aws_secret_access_key`.
- `profile_name` - recommended.
See:
http://boto.readthedocs.org/en/latest/boto_config_tut.html
"""
connection = s3_connection or boto.connect_s3(**kwargs)
bucket_name, key_name = url_split(s3_url)
try:
bucket = connection.get_bucket(bucket_name)
except boto.exception.S3ResponseError:
raise BucketNotFoundError('Bucket "%s" was not found.' % bucket_name)
f = NamedTemporaryFile()
try:
if 'w' in mode.lower():
s3_key = bucket.new_key(key_name)
yield f
f.seek(0)
s3_key.set_contents_from_file(f)
else:
s3_key = bucket.get_key(key_name)
if not s3_key:
raise KeyNotFoundError('Key "%s" was not found.' % s3_url)
s3_key.get_file(f)
f.seek(0)
yield f
finally:
f.close() | /s3io-0.1.1.tar.gz/s3io-0.1.1/s3io.py | 0.605449 | 0.193719 | s3io.py | pypi |
### About
s3kv is a key value database backed by S3 Object Storage. As such it leverages many of the inbuilt capabilities of Object Storage. Especially interesting features are those such as key access policies, legal holds and retention.
It is primarily intended for use cases such as data flow caching, configuration management and secrets management.
### Basic Operations
The following are the basic operations.
```
# Import the s3kv library
from s3kv import S3KV
import os
# Initialize the database with S3 credentials and bucket name
s3kv = S3KV(os.environ['S3_ENDPOINT_URL'],
os.environ['S3_BUCKET'],
os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'],
enable_local_cache=True)
# Store data in the database
s3kv.add('key1', 'value1')
s3kv.add('key2', {'name': 'John', 'age': 30})
# Retrieve data from the database
value1 = s3kv.get('key1')
data = s3kv.get('key2')
# Delete a key
s3kv.delete('key1')
# Check if a key exists
if s3kv.exists('key3'):
print("Key 'key3' exists!")
```
### Extended Key Operations : Copy and Merge
In the copy_key method, we use the get_object method to retrieve the value of the source key from the S3 bucket. Then, we use the put_object method to store the same value under the destination key. If the source key exists in the local cache (/tmp/s3kv_cache), we also copy the cached value to the destination key in the cache.
Please note that if the source key does not exist, the method will raise an error. Ensure that the source key exists before calling this method. You can use the key_exists method to check if the source key exists.
Here’s an example of how to use the copy_key method:
```
# Import the s3kv library
from s3kv import S3KV
import os
# Initialize the database with S3 credentials and bucket name
s3kv = S3KV(os.environ['S3_ENDPOINT_URL'],
os.environ['S3_BUCKET'],
os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'],
enable_local_cache=True)
data = {"name": "john"}
s3kv.add('source_key',data)
source_key = 'source_key'
destination_key = 'destination_key'
if s3kv.key_exists(source_key):
s3kv.copy_key(source_key, destination_key)
else:
print(f"The source key '{source_key}' does not exist in the S3KV database.")
```
In the merge_keys method, we iterate through the list of source keys, retrieve their values, and merge them into the destination value using the update method. Then, we update the value of the destination key both in the S3 bucket and the local cache. To use this method, you can call it with a list of source keys and the destination key:
```
# Import the s3kv library
from s3kv import S3KV
import os
# Initialize the database with S3 credentials and bucket name
s3kv = S3KV(os.environ['S3_ENDPOINT_URL'],
os.environ['S3_BUCKET'],
os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'],
enable_local_cache=True)
data_a = {"data_a": "value_a"}
s3kv.add('source_key_a',data_a,None)
data_b = {"data_b": "value_b"}
s3kv.add('source_key_b',data_b,None)
source_keys = ['source_key_a', 'source_key_b']
destination_key = 'destination_key'
s3kv.merge_keys(source_keys, destination_key)
```
### Cache Operations
The following are the Cache operations.
```
# Import the s3kv library
from s3kv import S3KV
import os
# Initialize the s3kv database with local caching and key indexing
s3kv = S3KV(os.environ['S3_ENDPOINT_URL'],
os.environ['S3_BUCKET'],
os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'],
enable_local_cache=True)
# Explicitly clear the cache
s3kv.clear_cache() # Clears the entire local cache
# Clear files older than max_days
max_days = 2
s3kv.clear_old_cache(max_days)
# Clear the cache for a specific key
s3kv.clear_cache_for_key('test')
```
### Tagging Keys
In addition to the existing features, S3KV introduces the ability to tag keys, which provides a convenient and organized way to manage hundreds of configurations or key-value pairs. Tags allow developers to associate metadata with individual keys, making it easier to categorize, search, and apply policies based on specific attributes. For example, in the context of configuration settings, keys related to a particular module or component can be tagged with the module's name, making it easier to manage and retrieve configurations for specific parts of the application.
You can add tags to new objects when you upload them, or you can add them to existing objects. Quoting the AWS S3 Docs -
* You can associate up to 10 tags with an object. Tags that are associated with an object must have unique tag keys.
* A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length. Amazon S3 object tags are internally represented in UTF-16. Note that in UTF-16, characters consume either 1 or 2 character positions.
* The key and values are case sensitive.
**IMPORTANT**
If a key is overwritten any tags added prior will be deleted.
```
# Import the s3kv library
from s3kv import S3KV
import os
# Initialize the s3kv database with local caching and key indexing
s3kv = S3KV(os.environ['S3_ENDPOINT_URL'],
os.environ['S3_BUCKET'],
os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'],
enable_local_cache=True)
# Store data in the database with tags
s3kv.tag_key('test_c', tags={'module': 'user_management', 'environment': 'production'})
# Get tags assigned to a specific key
tags = s3kv.get_tags('s3kv/test_c.json')
print(tags)
# Find all keys with a tag key set to a specific value
keys = s3kv.find_keys_by_tag_value('environment','production')
print(keys)
# Tag multiple keys which share a prefix
s3kv.tag_keys_with_prefix('s3kv/test', {'module': 'user_management', 'environment': 'production'})
# Delete a key by tag (delete all keys where the "module" tag == "user_management")
s3kv.delete_by_tag('module', 'user_management')
```
### Listing Keys
The following are the list operations.
```
# Import the s3kv library
from s3kv import S3KV
import os
# Initialize the database with S3 credentials and bucket name
s3kv = S3KV(os.environ['S3_ENDPOINT_URL'],
os.environ['S3_BUCKET'],
os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'],
enable_local_cache=True)
# List all keys
keys = s3kv.list_keys()
# List all keys with a specific prefix
s3kv.list_keys_with_prefix('s3kv/test')
# Find all keys with a tag set to a specific value
keys = s3kv.find_keys_by_tag_value('environment','production')
print(keys)
```
### Locking Keys - Retention and Legal Hold
Introducing the ability to apply locks to keys offers enhanced data governance and compliance capabilities. These locks come in two types - retention and legal hold - each serving a specific purpose in ensuring data immutability and compliance with regulations or internal policies.
1. Retention Lock: A retention lock allows users to enforce a specified period during which a key cannot be modified or deleted. This feature is particularly useful for maintaining data integrity and ensuring that critical data remains unchanged for regulatory or compliance reasons. *Once a retention lock is applied to a key, it cannot be altered or deleted until the specified retention period expires.*
2. Legal Hold: On the other hand, a legal hold lock is a more flexible form of locking. *When a legal hold is applied to a key, it prevents the key from being deleted, but allows it to be modified.* The lock remains in effect until explicitly removed, providing a higher level of control over the data lifecycle while still allowing for data updates as needed.
**IMPORTANT**
Key locks require bucket versioning to be enabled on bucket creation.
```
# Import the s3kv library
from s3kv import S3KV
import os
# Initialize the database with S3 credentials and bucket name
s3kv = S3KV(os.environ['S3_ENDPOINT_URL'],
os.environ['S3_BUCKET'],
os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'],
enable_local_cache=True)
data = {'name': 'fat_boy_slim'}
s3kv.add('test_retention', data)
# Place a retention lock on a key (object cannot be deleted or
# changed for 365 days)
s3kv.place_retention_lock('test_retention', 365)
# Remove a retention lock
s3kv.remove_retention_lock('test_retention')
# Apply legal hold to a key (object cannot be deleted but can be modified)
s3kv.apply_legal_hold('test_retention')
# Check if a key is under legal hold
if s3kv.is_legal_hold_applied('test_retention'):
print("Key 'test_retention' is under legal hold.")
# Release legal hold on a key
s3kv.release_legal_hold('test_retention')
# Check if a key is under legal hold
if s3kv.is_legal_hold_applied('test_retention'):
print("Key 'test_retention' is under legal hold.")
```
### Utility functions
The following are utility functions
```
# Import the s3kv library
from s3kv import S3KV
import os
# Initialize the database with S3 credentials and bucket name
s3kv = S3KV(os.environ['S3_ENDPOINT_URL'],
os.environ['S3_BUCKET'],
os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'],
enable_local_cache=True)
data = {"dj": "fatboy slim"}
s3kv.add('test', data)
# Get the key size (file size)
size = s3kv.get_key_size('test')
print(size)
# Get the last updated time of the key
last_updated_date = s3kv.get_key_last_updated_time('test')
print(last_updated_date)
```
| /s3kv-0.1.5.tar.gz/s3kv-0.1.5/README.md | 0.463444 | 0.877582 | README.md | pypi |
# s3labeler
s3 - Amazon Simple Storage Service
label - attached to an object and giving information about it
## S3 Object Labeling Tool
[](https://pypi.python.org/pypi/s3labeler/)
[](https://www.python.org/downloads/release/python-360/)
## install
```
pip install s3labeler
```
## cli
```
s3labeler --help
```
```
Usage: ./s3labeler.py option
options:
list-buckets|buckets
list|ls <s3bucket>/<s3object>
label|set <s3bucket>/<s3object> '{"label":"value"}'
delete|del <s3bucket>/<s3object> label
get <s3bucket>/<s3object>
save <s3bucket>/<s3object> destination
upload source <s3bucket>/<s3object>
rekognition <s3bucket>/<s3object>
rekognition <s3bucket>/<s3object> detect-labels
rekognition <s3bucket>/<s3object> words
rekognition <s3bucket>/<s3object> s3tag
rekognition <s3bucket>/<s3object> confidence
rekognition <s3bucket>/<s3object> s3tag confidence top 3
object <s3bucket>/<s3object>
b2sum <s3bucket>/<s3object>
identify|id <s3bucket>/<s3object>
server 8880
--help
--version
```
## run as a service
```
s3labeler server
```
## s3 rest api
#### list all buckets HTTP GET
```
curl http://127.0.0.1:8880/s3/
```
#### list files in a bucket (1000 record limit) HTTP GET
```
curl http://127.0.0.1:8880/s3/<s3bucket>/
```
#### list files in a bucket subdirectory (1000 record limit) HTTP GET
```
curl http://127.0.0.1:8880/s3/<s3bucket>/rekognition/
```
#### list s3objects s3tags HTTP GET
```
curl http://127.0.0.1:8880/s3/<s3bucket>/<s3object>
```
#### view s3object content, specify content-type header HTTP GET
```
curl http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?image=jpeg
```
image/gif image/jpeg image/png image/tiff image/vnd.microsoft.icon image/x-icon image/vnd.djvu image/svg+xml
#### list s3object s3tags HTTP GET (same as default get)
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?tags=s3"
```
#### list file rekognition json HTTP GET
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?tags=rekognition"
```
#### set s3object tag HTTP GET
```
curl -X GET "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?label=tag_name&value=something"
```
#### delete s3object tag HTTP GET
```
curl -X GET "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?delete=tag_name"
```
#### set or update s3object tag HTTP PATCH (single key/value)
```
curl -X PATCH \
-H "Content-Type: application/json" \
-d '{"labeler":"karl"}' \
http://127.0.0.1:8880/s3/<s3bucket>/<s3object>
```
#### delete s3object tag HTTP DELETE
```
curl -X DELETE "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?tag=tag_name"
```
#### set s3object tag set HTTP PUT (Warning: this method overwrites the entire tagset)
```
curl -X PUT \
-H "Content-Type: application/json" \
-d '{"labeler":"karl","image_url":"https://<s3bucket>.s3.us-west-2.amazonaws.com/<s3object>"}' \
http://127.0.0.1:8880/s3/<s3bucket>/<s3object>
```
### HTTP POST
two available endpoints
```
/s3/<s3bucket>/<s3object>
```
and/or
```
/api
```
http post json or form data to s3object endpoint
#### set or update s3object tag HTTP POST (single key/value) s3object endpoint
```
curl -X POST \
-H "Content-Type: application/json" \
-d '{"labeler":"karl"}' \
http://127.0.0.1:8880/s3/<s3bucket>/<s3object>
```
```
<form action="http://127.0.0.1:8880/s3/<s3bucket>/<s3object>" method="POST">
<input type="text" name="label" value="label">
<input type="text" name="value" value="value">
<input type="submit">
</form>
```
http post json or form data to api endpoint
#### set or update s3object tag HTTP POST /api endpoint
```
curl -X POST \
-H "Content-Type: application/json" \
-d '{"s3bucket":"<s3bucket>","s3object":"<s3object>","labeler":"karl"}' \
http://127.0.0.1:8880/api
```
```
<form action="http://127.0.0.1:8880/api" method="POST">
<input type="text" name="s3bucket" value="s3bucket">
<input type="text" name="s3object" value="s3object">
<input type="text" name="label" value="label">
<input type="text" name="value" value="value">
<input type="submit">
</form>
```
---
#### get rekognition json HTTP GET (same as ?tags=rekognition)
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?rekognition=json"
```
#### get rekognition words HTTP GET
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?rekognition=words"
```
#### set rekognition words to s3object tag
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?rekognition=words&save=s3tag"
```
#### get rekognition confidence HTTP GET
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?rekognition=confidence"
```
#### get rekognition confidence top 3 HTTP GET
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?rekognition=confidence&top=3"
```
#### get rekognition confidence top 90 percent HTTP GET
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?rekognition=confidence&top=90&percent=true"
```
#### set rekognition confidence to s3object tag (all confidence words)
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?rekognition=confidence&save=s3tag"
```
#### set rekognition confidence to s3object tag (top 3 confidence)
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?rekognition=confidence&top=3&save=s3tag"
```
#### set rekognition confidence to s3object tag (top 90 precent)
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?rekognition=confidence&top=90&percent=true&save=s3tag"
```
### run image through aws rekognition
#### run image through aws rekognition detect-labels HTTP GET
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?rekognition=detect-labels"
```
#### run image through aws rekognition detect-labels and save/overwrite with new json HTTP GET
```
curl "http://127.0.0.1:8880/s3/<s3bucket>/<s3object>?rekognition=detect-labels&save=true"
```
---
## run from source
```
python3 src/s3labeler/s3labeler.py
```
## run as python module
```
cd src/ && python3 -m s3labeler
```
---
image label manager
run an image through rekognition only once (to prevent recurring costs). update s3objects tags.
---
tech notes
- aws rekognition detect-labels leverages amazons proprietary ML model for image recognition
- image file bytes are not transfered or processed through this interface for rekognition
https://aws.amazon.com/s3/
https://aws.amazon.com/rekognition/
| /s3labeler-1.0.5.tar.gz/s3labeler-1.0.5/README.md | 0.772402 | 0.753852 | README.md | pypi |
# [S3pipe](https://pypi.org/project/sphericalunet/)
Python-based spherical cortical surface processing tools, including spherical resampling, interpolation, parcellation, registration, atlas construction, etc. It provides fast and accurate cortical surface-based data analysis using deep learning techniques.
## Install
It can be installed from PyPI using:
```
conda env create -f environment.yml
conda activate s3env
Then install pytorch from https://pytorch.org/get-started/locally/ with correct gpu/cpu and cuda choices
Install paraview ***5.10.1*** on Ubuntu 20.04
```
## Main tools
[**I/O vtk file**](https://github.com/zhaofenqiang/SphericalUNetPackage/blob/main/sphericalunet/utils/vtk.py). Python function for reading and writing .vtk surface file. Example code:
```
from sphericalunet.utils.vtk import read_vtk, write_vtk
surf = read_vtk(file_name)
# some operations to the surface
write_vtk(surf, new_file_name)
```
For matlab users, please refer to this [issue](https://github.com/zhaofenqiang/Spherical_U-Net/issues/3#issuecomment-763334969) and this [repository](https://github.com/Zhengwang-Wu/CorticalSurfaceMetric).
[**Layers**](https://github.com/zhaofenqiang/SphericalUNetPackage/blob/main/sphericalunet/layers.py) provide basic spherical convolution, pooling, upsampling layers for constructing spherical convolutional neural networks.
[**Models**](https://github.com/zhaofenqiang/SphericalUNetPackage/blob/main/sphericalunet/model.py) provide some baseline spherical convolutional neural networks, e.g., [Spherical U-Net](https://github.com/zhaofenqiang/SphericalUNetPackage/blob/6d6c50fc6d20a0d7a2c09669ff1e9e7c78f82007/sphericalunet/model.py#L92), Spherical SegNet, Spherical VGG, Spherical ResNet, Spherical CycleGAN, etc.
[**Resample feature**](https://github.com/zhaofenqiang/SphericalUNetPackage/blob/d838ced91b6878d78e81a3350db01fcbb2591286/sphericalunet/utils/interp_numpy.py#L211) on spherical surface to standard icosahedron subdivision spheres. Example code:
```
from sphericalunet.utils.interp_numpy import resampleSphereSurf
from sphericalunet.utils.vtk import read_vtk, write_vtk
template_163842 = read_vtk('/media/fenqiang/DATA/unc/Data/Template/sphere_163842.vtk')
neigh_orders_163842 = get_neighs_order('neigh_indices/adj_mat_order_163842_rotated_' + str(0) + '.mat')
data = read_vtk(file)
resampled_feat = resampleSphereSurf(data['vertices'], template_163842['vertices'],
np.concatenate((data['sulc'][:,np.newaxis], data['curv'][:,np.newaxis]), axis=1),
neigh_orders=neigh_orders_163842)
surf = {'vertices': template_163842['vertices'],
'faces': template_163842['faces'],
'sulc': resampled_feat[:,0],
'curv': resampled_feat[:,1]}
write_vtk(surf, file.replace('.vtk', '.resample.vtk'))
```
Note if you want to run it on GPU, change `interp_numpy` to `interp_torch`.
[**Resample label**](https://github.com/zhaofenqiang/SphericalUNetPackage/blob/d838ced91b6878d78e81a3350db01fcbb2591286/sphericalunet/utils/interp_numpy.py#L211) on spherical surface to standard icosahedron subdivision spheres. Example code:
```
from sphericalunet.utils.vtk import read_vtk, write_vtk, resample_label
template_163842 = read_vtk('/media/ychenp/DATA/unc/Data/Template/sphere_163842.vtk')
surf = read_vtk(file)
resampled_par = resample_label(surf['vertices'], template_163842['vertices'], surf['par_fs_vec'])
```
[**Smooth feature**](https://github.com/zhaofenqiang/SphericalUNetPackage/blob/d838ced91b6878d78e81a3350db01fcbb2591286/sphericalunet/utils/vtk.py#L131) on spherical surface.
[**Cortical surface parcellation**](https://github.com/zhaofenqiang/Spherical_U-Net) with trained models based on this package.
[**Deformable cortical surface registration**](https://github.com/zhaofenqiang/spherical-registration) with trained models based on this package.
[**Rigid cortical surface registration**](https://github.com/zhaofenqiang/SphericalUNetPackage/blob/main/sphericalunet/utils/initial_rigid_align.py). An example code can be found [here](https://github.com/zhaofenqiang/SphericalUNetPackage/blob/main/example/initialRigidAlignUsingSearch_longleaf.py).
[**Chcek folded triangles**](https://github.com/zhaofenqiang/SphericalUNetPackage/blob/6d6c50fc6d20a0d7a2c09669ff1e9e7c78f82007/sphericalunet/utils/utils.py#L294), and correct them (not implemented yet...).
## Papers
If you use this code, please cite:
Fenqiang Zhao, et.al. [Spherical U-Net on Cortical Surfaces: Methods and Applications](https://link.springer.com/chapter/10.1007/978-3-030-20351-1_67). Information Processing in Medical Imaging (IPMI), 2019.
Fenqiang Zhao, et.al. [Spherical Deformable U-Net: Application to Cortical Surface Parcellation and Development Prediction](https://ieeexplore.ieee.org/document/9316936). IEEE Transactions on Medical Imaging, 2021.
Fenqiang Zhao, et.al. [S3Reg: Superfast Spherical Surface Registration Based on Deep Learning](https://ieeexplore.ieee.org/document/9389746). IEEE Transactions on Medical Imaging, 2021.
| /s3pipe-1.2.0.tar.gz/s3pipe-1.2.0/README.md | 0.44746 | 0.918845 | README.md | pypi |
import logging
from fastdtw import fastdtw
import librosa
import numpy as np
import scipy
from scipy.io import wavfile
from s3prl_vc.utils.signal import world_extract, extfrm
def calculate_mcd_f0(x, y, fs, f0min, f0max):
"""
x and y must be in range [-1, 1]
"""
# extract ground truth and converted features
gt_feats = world_extract(x, fs, f0min, f0max)
cvt_feats = world_extract(y, fs, f0min, f0max)
# VAD & DTW based on power
gt_mcep_nonsil_pow = extfrm(gt_feats["mcep"], gt_feats["npow"])
cvt_mcep_nonsil_pow = extfrm(cvt_feats["mcep"], cvt_feats["npow"])
_, path = fastdtw(
cvt_mcep_nonsil_pow, gt_mcep_nonsil_pow, dist=scipy.spatial.distance.euclidean
)
twf_pow = np.array(path).T
# MCD using power-based DTW
cvt_mcep_dtw_pow = cvt_mcep_nonsil_pow[twf_pow[0]]
gt_mcep_dtw_pow = gt_mcep_nonsil_pow[twf_pow[1]]
diff2sum = np.sum((cvt_mcep_dtw_pow - gt_mcep_dtw_pow) ** 2, 1)
mcd = np.mean(10.0 / np.log(10.0) * np.sqrt(2 * diff2sum), 0)
# VAD & DTW based on f0
gt_nonsil_f0_idx = np.where(gt_feats["f0"] > 0)[0]
cvt_nonsil_f0_idx = np.where(cvt_feats["f0"] > 0)[0]
try:
gt_mcep_nonsil_f0 = gt_feats["mcep"][gt_nonsil_f0_idx]
cvt_mcep_nonsil_f0 = cvt_feats["mcep"][cvt_nonsil_f0_idx]
_, path = fastdtw(
cvt_mcep_nonsil_f0, gt_mcep_nonsil_f0, dist=scipy.spatial.distance.euclidean
)
twf_f0 = np.array(path).T
# f0RMSE, f0CORR using f0-based DTW
cvt_f0_dtw = cvt_feats["f0"][cvt_nonsil_f0_idx][twf_f0[0]]
gt_f0_dtw = gt_feats["f0"][gt_nonsil_f0_idx][twf_f0[1]]
f0rmse = np.sqrt(np.mean((cvt_f0_dtw - gt_f0_dtw) ** 2))
f0corr = scipy.stats.pearsonr(cvt_f0_dtw, gt_f0_dtw)[0]
except ValueError:
logging.warning(
"No nonzero f0 is found. Skip f0rmse f0corr computation and set them to NaN. "
"This might due to unconverge training. Please tune the training time and hypers."
)
f0rmse = np.nan
f0corr = np.nan
# DDUR
# energy-based VAD with librosa
x_trim, _ = librosa.effects.trim(y=x)
y_trim, _ = librosa.effects.trim(y=y)
ddur = float(abs(len(x_trim) - len(y_trim)) / fs)
return mcd, f0rmse, f0corr, ddur | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/evaluate/dtw_based.py | 0.406862 | 0.247996 | dtw_based.py | pypi |
import editdistance as ed
import jiwer
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
ASR_PRETRAINED_MODEL = "facebook/wav2vec2-large-960h-lv60-self"
def load_asr_model(device):
"""Load model"""
print(f"[INFO]: Load the pre-trained ASR by {ASR_PRETRAINED_MODEL}.")
model = Wav2Vec2ForCTC.from_pretrained(ASR_PRETRAINED_MODEL).to(device)
tokenizer = Wav2Vec2Tokenizer.from_pretrained(ASR_PRETRAINED_MODEL)
models = {"model": model, "tokenizer": tokenizer}
return models
def normalize_sentence(sentence):
"""Normalize sentence"""
# Convert all characters to upper.
sentence = sentence.upper()
# Delete punctuations.
sentence = jiwer.RemovePunctuation()(sentence)
# Remove \n, \t, \r, \x0c.
sentence = jiwer.RemoveWhiteSpace(replace_by_space=True)(sentence)
# Remove multiple spaces.
sentence = jiwer.RemoveMultipleSpaces()(sentence)
# Remove white space in two end of string.
sentence = jiwer.Strip()(sentence)
# Convert all characters to upper.
sentence = sentence.upper()
return sentence
def calculate_measures(groundtruth, transcription):
"""Calculate character/word measures (hits, subs, inserts, deletes) for one given sentence"""
groundtruth = normalize_sentence(groundtruth)
transcription = normalize_sentence(transcription)
# cer = ed.eval(transcription, groundtruth) / len(groundtruth)
# c_result = jiwer.compute_measures([c for c in groundtruth if c != " "], [c for c in transcription if c != " "])
c_result = jiwer.cer(groundtruth, transcription, return_dict=True)
w_result = jiwer.compute_measures(groundtruth, transcription)
return c_result, w_result, groundtruth, transcription
def transcribe(model, device, wav):
"""Calculate score on one single waveform"""
# preparation
inputs = model["tokenizer"](
wav, sampling_rate=16000, return_tensors="pt", padding="longest"
)
input_values = inputs.input_values.to(device)
attention_mask = inputs.attention_mask.to(device)
# forward
logits = model["model"](input_values, attention_mask=attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = model["tokenizer"].batch_decode(predicted_ids)[0]
return transcription | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/evaluate/asr.py | 0.779154 | 0.344678 | asr.py | pypi |
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Decode directly from upstream features with trained model."""
import argparse
import logging
import os
import time
import numpy as np
import soundfile as sf
import torch
import yaml
from tqdm import tqdm
from s3prl.nn import Featurizer
import s3prl_vc.models
from s3prl_vc.upstream.interface import get_upstream
from s3prl_vc.datasets.datasets import FeatDataset
from s3prl_vc.utils import read_hdf5, write_hdf5
from s3prl_vc.utils.data import pad_list
from s3prl_vc.utils.plot import plot_generated_and_ref_2d, plot_1d
from s3prl_vc.vocoder import Vocoder
def main():
"""Run decoding process."""
parser = argparse.ArgumentParser(
description=("Decode with trained model " "(See detail in bin/decode.py).")
)
parser.add_argument(
"--featdir",
required=True,
type=str,
help=("directory including input feat files."),
)
parser.add_argument(
"--trg-stats",
type=str,
required=True,
help="stats file for target denormalization.",
)
parser.add_argument(
"--outdir",
type=str,
required=True,
help="directory to save generated speech.",
)
parser.add_argument(
"--checkpoint",
type=str,
required=True,
help="checkpoint file to be loaded.",
)
parser.add_argument(
"--config",
default=None,
type=str,
help=(
"yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)"
),
)
parser.add_argument(
"--feat_type",
type=str,
default="feats",
help=("feature type. this is used as key name to read h5 featyre files. "),
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
if args.config is None:
dirname = os.path.dirname(args.checkpoint)
args.config = os.path.join(dirname, "config.yml")
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
# setup device
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
# load target stats for denormalization
config["trg_stats"] = {
"mean": torch.from_numpy(read_hdf5(args.trg_stats, "mean")).float().to(device),
"scale": torch.from_numpy(read_hdf5(args.trg_stats, "scale"))
.float()
.to(device),
}
# get dataset
dataset = FeatDataset(
args.featdir,
config,
query="*.h5",
load_fn=lambda x: read_hdf5(x, args.feat_type), # NOQA
return_utt_id=True,
)
logging.info(f"The number of features to be decoded = {len(dataset)}.")
dataloader = torch.utils.data.DataLoader(dataset, batch_size=len(dataset))
# define upstream model
upstream_model = get_upstream(config["upstream"]).to(device)
upstream_featurizer = Featurizer(upstream_model).to(device)
# get model and load parameters
model_class = getattr(s3prl_vc.models, config["model_type"])
model = model_class(
upstream_featurizer.output_size,
config["num_mels"],
config["sampling_rate"]
/ config["hop_size"]
* upstream_featurizer.downsample_rate
/ 16000,
config["trg_stats"],
**config["model_params"],
).to(device)
model.load_state_dict(torch.load(args.checkpoint, map_location="cpu")["model"])
model = model.eval().to(device)
logging.info(f"Loaded model parameters from {args.checkpoint}.")
# load vocoder
if config.get("vocoder", False):
vocoder = Vocoder(
config["vocoder"]["checkpoint"],
config["vocoder"]["config"],
config["vocoder"]["stats"],
config["trg_stats"],
device,
)
# start generation
with torch.no_grad():
for items in tqdm(dataset):
utt_id = items["utt_id"]
x = items["feat"]
xs = torch.from_numpy(x).unsqueeze(0).float().to(device)
ilens = torch.LongTensor([x.shape[0]]).to(device)
start_time = time.time()
outs, _ = model(xs, ilens, spk_embs=None)
out = outs[0]
logging.info(
"inference speed = %.1f frames / sec."
% (int(out.size(0)) / (time.time() - start_time))
)
plot_generated_and_ref_2d(
out.cpu().numpy(),
config["outdir"] + f"/plot_mel/{utt_id}.png",
origin="lower",
)
# write feats
if not os.path.exists(os.path.join(config["outdir"], "mel")):
os.makedirs(os.path.join(config["outdir"], "mel"), exist_ok=True)
write_hdf5(
config["outdir"] + f"/mel/{utt_id}.h5",
"mel",
out.cpu().numpy().astype(np.float32),
)
# write waveform
if not os.path.exists(os.path.join(config["outdir"], "wav")):
os.makedirs(os.path.join(config["outdir"], "wav"), exist_ok=True)
y, sr = vocoder.decode(out)
sf.write(
os.path.join(config["outdir"], "wav", f"{utt_id}.wav"),
y.cpu().numpy(),
sr,
"PCM_16",
)
if __name__ == "__main__":
main() | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/bin/decode_downstream.py | 0.72526 | 0.175326 | decode_downstream.py | pypi |
import argparse
import logging
import os
from pathlib import Path
from joblib import Parallel, delayed
import librosa
import matplotlib
import numpy as np
from s3prl_vc.utils.signal import world_extract
from s3prl_vc.utils import find_files
matplotlib.use("Agg") # noqa #isort:skip
import matplotlib.pyplot as plt # noqa isort:skip
def create_histogram(
data, figure_path, range_min=-70, range_max=20, step=10, xlabel="Power [dB]"
):
"""Create histogram
Parameters
----------
data : list,
List of several data sequences
figure_path : str,
Filepath to be output figure
range_min : int, optional,
Minimum range for histogram
Default set to -70
range_max : int, optional,
Maximum range for histogram
Default set to -20
step : int, optional
Stap size of label in horizontal axis
Default set to 10
xlabel : str, optional
Label of the horizontal axis
Default set to 'Power [dB]'
"""
# plot histgram
plt.hist(
data,
bins=200,
range=(range_min, range_max),
density=True,
histtype="stepfilled",
)
plt.xlabel(xlabel)
plt.ylabel("Probability")
plt.xticks(np.arange(range_min, range_max, step))
figure_dir = os.path.dirname(figure_path)
if not os.path.exists(figure_dir):
os.makedirs(figure_dir)
plt.savefig(figure_path)
plt.close()
def extract_f0_and_npow(wavf, f0min=40, f0max=500):
"""
F0 and npow extraction
Parameters
----------
wavf : str,
File path of waveform file
Returns
-------
dict :
Dictionary consisting of F0 and npow arrays
"""
x, fs = librosa.load(wavf, sr=None)
return world_extract(x, fs, f0min, f0max)
def main():
dcp = "Create histogram for speaker-dependent configure"
parser = argparse.ArgumentParser(description=dcp)
parser.add_argument("--n_jobs", type=int, default=16, help="# of CPUs")
parser.add_argument(
"--wav_dir", type=str, default=None, help="Directory of wav file"
)
parser.add_argument(
"--scp",
default=None,
type=str,
help="kaldi-style wav.scp file.",
)
parser.add_argument("--figure_dir", type=str, help="Directory for figure output")
args = parser.parse_args()
# set logger
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
f0histogrampath = os.path.join(args.figure_dir, "f0histogram.png")
npowhistogrampath = os.path.join(args.figure_dir, "npowhistogram.png")
if not os.path.exists(f0histogrampath) and not os.path.exists(npowhistogrampath):
# sanity check
assert (args.scp is None and args.wav_dir is not None) or (
args.scp is not None and args.wav_dir is None
), "Please assure only either --scp or --wav_dir is specified."
# get file list
if args.scp is not None:
with open(args.scp, "r") as f:
file_list = [line.split(" ")[1] for line in f.read().splitlines()]
else:
file_list = sorted(find_files(args.wav_dir))
# extract features in parallel
results = Parallel(n_jobs=args.n_jobs)(
[delayed(extract_f0_and_npow)(str(f)) for f in file_list]
)
# parse results
f0s = [r["f0"] for r in results]
npows = [r["npow"] for r in results]
# stack feature vectors
f0s = np.hstack(f0s).flatten()
npows = np.hstack(npows).flatten()
# create a histogram to visualize F0 range of the speaker
create_histogram(
f0s,
f0histogrampath,
range_min=40,
range_max=700,
step=50,
xlabel="Fundamental frequency [Hz]",
)
# create a histogram to visualize npow range of the speaker
create_histogram(
npows,
npowhistogrampath,
range_min=-70,
range_max=20,
step=10,
xlabel="Frame power [dB]",
)
if __name__ == "__main__":
main() | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/bin/create_histogram.py | 0.707405 | 0.337859 | create_histogram.py | pypi |
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Calculate statistics of feature files."""
import argparse
import logging
import os
import numpy as np
import yaml
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from tqdm import tqdm
from s3prl_vc.utils import write_hdf5
from s3prl_vc.datasets.datasets import AudioSCPMelDataset
def main():
"""Run preprocessing process."""
parser = argparse.ArgumentParser(
description=(
"Compute mean and variance of dumped raw features "
"(See detail in bin/compute_statistics.py)."
)
)
parser.add_argument(
"--scp",
default=None,
type=str,
required=True,
help=(
"kaldi-style feats.scp file. "
"you need to specify either feats-scp or rootdir."
),
)
parser.add_argument(
"--config",
type=str,
required=True,
help="yaml format configuration file.",
)
parser.add_argument(
"--dumpdir",
default=None,
type=str,
required=True,
help=(
"directory to save statistics. if not provided, "
"stats will be saved in the above root directory. (default=None)"
),
)
parser.add_argument("--f0", action="store_true", help="calculate f0 statistics")
parser.add_argument(
"--f0_path", default=None, type=str, help="yaml file storing f0 ranges"
)
parser.add_argument(
"--spk", default=None, type=str, help="speaker (for getting the f0 range)"
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
# load config
if args.f0:
with open(args.f0_path) as f:
f0_config = yaml.load(f, Loader=yaml.Loader)
f0min = f0_config[args.spk]["f0min"]
f0max = f0_config[args.spk]["f0max"]
# check directory existence
if not os.path.exists(args.dumpdir):
os.makedirs(args.dumpdir)
# get dataset
if args.f0:
dataset = AudioSCPMelDataset(
config,
args.scp,
extract_f0=config.get("use_f0", False),
f0_extractor=config.get("f0_extractor", "world"),
f0_min=f0min,
f0_max=f0max,
log_f0=config.get("log_f0", True),
)
else:
dataset = AudioSCPMelDataset(
config,
args.scp,
)
logging.info(f"The number of files = {len(dataset)}.")
# calculate statistics
scaler = StandardScaler()
for items in tqdm(dataset):
mel = items["mel"]
scaler.partial_fit(mel)
# write statistics
write_hdf5(
os.path.join(args.dumpdir, "stats.h5"),
"mean",
scaler.mean_.astype(np.float32),
)
write_hdf5(
os.path.join(args.dumpdir, "stats.h5"),
"scale",
scaler.scale_.astype(np.float32),
)
if args.f0:
scaler = StandardScaler()
minmaxscaler = MinMaxScaler()
for items in tqdm(dataset):
f0 = items["f0"]
f0 = f0[f0 > 0]
scaler.partial_fit(f0.reshape([-1, 1]))
minmaxscaler.partial_fit(f0.reshape([-1, 1]))
# write statistics
write_hdf5(
os.path.join(args.dumpdir, "stats.h5"),
"lf0_mean",
scaler.mean_.astype(np.float32),
)
write_hdf5(
os.path.join(args.dumpdir, "stats.h5"),
"lf0_scale",
scaler.scale_.astype(np.float32),
)
write_hdf5(
os.path.join(args.dumpdir, "stats.h5"),
"lf0_max",
minmaxscaler.data_max_.astype(np.float32),
)
write_hdf5(
os.path.join(args.dumpdir, "stats.h5"),
"lf0_min",
minmaxscaler.data_min_.astype(np.float32),
)
write_hdf5(
os.path.join(args.dumpdir, "stats.h5"),
"f0_max",
f0max,
)
write_hdf5(
os.path.join(args.dumpdir, "stats.h5"),
"f0_min",
f0min,
)
if __name__ == "__main__":
main() | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/bin/compute_statistics.py | 0.746971 | 0.164047 | compute_statistics.py | pypi |
import torch
def make_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):
"""Make mask tensor containing indices of padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
Tensor: Mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 1],
[0, 0, 0, 1]],
[[0, 0, 1, 1],
[0, 0, 1, 1]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_pad_mask(lengths, xs, 1)
tensor([[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
>>> make_pad_mask(lengths, xs, 2)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
"""
if length_dim == 0:
raise ValueError("length_dim cannot be 0: {}".format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.long().tolist()
bs = int(len(lengths))
if maxlen is None:
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
else:
assert xs is None
assert maxlen >= int(max(lengths))
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(
slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
)
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def make_non_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
ByteTensor: mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 0],
[1, 1, 1, 0]],
[[1, 1, 0, 0],
[1, 1, 0, 0]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_non_pad_mask(lengths, xs, 1)
tensor([[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
>>> make_non_pad_mask(lengths, xs, 2)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
return ~make_pad_mask(lengths, xs, length_dim) | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/layers/utils.py | 0.898689 | 0.784629 | utils.py | pypi |
import numpy as np
import pysptk
import pyworld as pw
from scipy.signal import firwin
from scipy.signal import lfilter
MCEP_DIM = 39
MCEP_ALPHA = 0.466
MCEP_SHIFT = 5
MCEP_FFTL = 1024
def low_cut_filter(x, fs, cutoff=70):
"""FUNCTION TO APPLY LOW CUT FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low cut filter
Return:
(ndarray): Low cut filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def spc2npow(spectrogram):
"""Calculate normalized power sequence from spectrogram
Parameters
----------
spectrogram : array, shape (T, `fftlen / 2 + 1`)
Array of spectrum envelope
Return
------
npow : array, shape (`T`, `1`)
Normalized power sequence
"""
# frame based processing
npow = np.apply_along_axis(_spvec2pow, 1, spectrogram)
meanpow = np.mean(npow)
npow = 10.0 * np.log10(npow / meanpow)
return npow
def _spvec2pow(specvec):
"""Convert a spectrum envelope into a power
Parameters
----------
specvec : vector, shape (`fftlen / 2 + 1`)
Vector of specturm envelope |H(w)|^2
Return
------
power : scala,
Power of a frame
"""
# set FFT length
fftl2 = len(specvec) - 1
fftl = fftl2 * 2
# specvec is not amplitude spectral |H(w)| but power spectral |H(w)|^2
power = specvec[0] + specvec[fftl2]
for k in range(1, fftl2):
power += 2.0 * specvec[k]
power /= fftl
return power
def extfrm(data, npow, power_threshold=-20):
"""Extract frame over the power threshold
Parameters
----------
data: array, shape (`T`, `dim`)
Array of input data
npow : array, shape (`T`)
Vector of normalized power sequence.
power_threshold : float, optional
Value of power threshold [dB]
Default set to -20
Returns
-------
data: array, shape (`T_ext`, `dim`)
Remaining data after extracting frame
`T_ext` <= `T`
"""
T = data.shape[0]
if T != len(npow):
raise ("Length of two vectors is different.")
valid_index = np.where(npow > power_threshold)
extdata = data[valid_index]
assert extdata.shape[0] <= T
return extdata
def world_extract(x, fs, f0min, f0max):
# scale from [-1, 1] to [-32768, 32767]
x = x * np.iinfo(np.int16).max
x = np.array(x, dtype=np.float64)
x = low_cut_filter(x, fs)
# extract features
f0, time_axis = pw.harvest(
x, fs, f0_floor=f0min, f0_ceil=f0max, frame_period=MCEP_SHIFT
)
sp = pw.cheaptrick(x, f0, time_axis, fs, fft_size=MCEP_FFTL)
ap = pw.d4c(x, f0, time_axis, fs, fft_size=MCEP_FFTL)
mcep = pysptk.sp2mc(sp, MCEP_DIM, MCEP_ALPHA)
npow = spc2npow(sp)
return {
"sp": sp,
"mcep": mcep,
"ap": ap,
"f0": f0,
"npow": npow,
} | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/utils/signal.py | 0.876872 | 0.59884 | signal.py | pypi |
from typing import Tuple
import torch
from s3prl_vc.upstream.ppg_sxliu.nets_utils import make_pad_mask
class UtteranceMVN(torch.nn.Module):
def __init__(
self, norm_means: bool = True, norm_vars: bool = False, eps: float = 1.0e-20,
):
super().__init__()
self.norm_means = norm_means
self.norm_vars = norm_vars
self.eps = eps
def extra_repr(self):
return f"norm_means={self.norm_means}, norm_vars={self.norm_vars}"
def forward(
self, x: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward function
Args:
x: (B, L, ...)
ilens: (B,)
"""
return utterance_mvn(
x,
ilens,
norm_means=self.norm_means,
norm_vars=self.norm_vars,
eps=self.eps,
)
def utterance_mvn(
x: torch.Tensor,
ilens: torch.Tensor = None,
norm_means: bool = True,
norm_vars: bool = False,
eps: float = 1.0e-20,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply utterance mean and variance normalization
Args:
x: (B, T, D), assumed zero padded
ilens: (B,)
norm_means:
norm_vars:
eps:
"""
if ilens is None:
ilens = x.new_full([x.size(0)], x.size(1))
ilens_ = ilens.to(x.device, x.dtype).view(-1, *[1 for _ in range(x.dim() - 1)])
# Zero padding
if x.requires_grad:
x = x.masked_fill(make_pad_mask(ilens, x, 1), 0.0)
else:
x.masked_fill_(make_pad_mask(ilens, x, 1), 0.0)
# mean: (B, 1, D)
mean = x.sum(dim=1, keepdim=True) / ilens_
if norm_means:
x -= mean
if norm_vars:
var = x.pow(2).sum(dim=1, keepdim=True) / ilens_
std = torch.clamp(var.sqrt(), min=eps)
x = x / std.sqrt()
return x, ilens
else:
if norm_vars:
y = x - mean
y.masked_fill_(make_pad_mask(ilens, y, 1), 0.0)
var = y.pow(2).sum(dim=1, keepdim=True) / ilens_
std = torch.clamp(var.sqrt(), min=eps)
x /= std
return x, ilens | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/utterance_mvn.py | 0.946794 | 0.547464 | utterance_mvn.py | pypi |
import librosa
import numpy as np
import torch
from typing import Tuple
from s3prl_vc.upstream.ppg_sxliu.nets_utils import make_pad_mask
class LogMel(torch.nn.Module):
"""Convert STFT to fbank feats
The arguments is same as librosa.filters.mel
Args:
fs: number > 0 [scalar] sampling rate of the incoming signal
n_fft: int > 0 [scalar] number of FFT components
n_mels: int > 0 [scalar] number of Mel bands to generate
fmin: float >= 0 [scalar] lowest frequency (in Hz)
fmax: float >= 0 [scalar] highest frequency (in Hz).
If `None`, use `fmax = fs / 2.0`
htk: use HTK formula instead of Slaney
norm: {None, 1, np.inf} [scalar]
if 1, divide the triangular mel weights by the width of the mel band
(area normalization). Otherwise, leave all the triangles aiming for
a peak value of 1.0
"""
def __init__(
self,
fs: int = 16000,
n_fft: int = 512,
n_mels: int = 80,
fmin: float = None,
fmax: float = None,
htk: bool = False,
norm=1,
):
super().__init__()
fmin = 0 if fmin is None else fmin
fmax = fs / 2 if fmax is None else fmax
_mel_options = dict(
sr=fs, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax, htk=htk, norm=norm
)
self.mel_options = _mel_options
# Note(kamo): The mel matrix of librosa is different from kaldi.
melmat = librosa.filters.mel(**_mel_options)
# melmat: (D2, D1) -> (D1, D2)
self.register_buffer("melmat", torch.from_numpy(melmat.T).float())
inv_mel = np.linalg.pinv(melmat)
self.register_buffer("inv_melmat", torch.from_numpy(inv_mel.T).float())
def extra_repr(self):
return ", ".join(f"{k}={v}" for k, v in self.mel_options.items())
def forward(
self, feat: torch.Tensor, ilens: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
# feat: (B, T, D1) x melmat: (D1, D2) -> mel_feat: (B, T, D2)
mel_feat = torch.matmul(feat, self.melmat)
logmel_feat = (mel_feat + 1e-20).log()
# Zero padding
if ilens is not None:
logmel_feat = logmel_feat.masked_fill(
make_pad_mask(ilens, logmel_feat, 1), 0.0
)
else:
ilens = feat.new_full(
[feat.size(0)], fill_value=feat.size(1), dtype=torch.long
)
return logmel_feat, ilens | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/log_mel.py | 0.94323 | 0.615203 | log_mel.py | pypi |
import logging
from typing import Dict
import numpy as np
import torch
def to_device(m, x):
"""Send tensor into the device of the module.
Args:
m (torch.nn.Module): Torch module.
x (Tensor): Torch tensor.
Returns:
Tensor: Torch tensor located in the same place as torch module.
"""
assert isinstance(m, torch.nn.Module)
device = next(m.parameters()).device
return x.to(device)
def pad_list(xs, pad_value):
"""Perform padding for the list of tensors.
Args:
xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
pad_value (float): Value for padding.
Returns:
Tensor: Padded tensor (B, Tmax, `*`).
Examples:
>>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
>>> x
[tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
>>> pad_list(x, 0)
tensor([[1., 1., 1., 1.],
[1., 1., 0., 0.],
[1., 0., 0., 0.]])
"""
n_batch = len(xs)
max_len = max(x.size(0) for x in xs)
pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
for i in range(n_batch):
pad[i, :xs[i].size(0)] = xs[i]
return pad
def make_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor. If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor. See the example.
Returns:
Tensor: Mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 1],
[0, 0, 0, 1]],
[[0, 0, 1, 1],
[0, 0, 1, 1]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_pad_mask(lengths, xs, 1)
tensor([[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
>>> make_pad_mask(lengths, xs, 2)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
"""
if length_dim == 0:
raise ValueError('length_dim cannot be 0: {}'.format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(slice(None) if i in (0, length_dim) else None
for i in range(xs.dim()))
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def make_non_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor. If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor. See the example.
Returns:
ByteTensor: mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 0],
[1, 1, 1, 0]],
[[1, 1, 0, 0],
[1, 1, 0, 0]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_non_pad_mask(lengths, xs, 1)
tensor([[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
>>> make_non_pad_mask(lengths, xs, 2)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
return ~make_pad_mask(lengths, xs, length_dim)
def mask_by_length(xs, lengths, fill=0):
"""Mask tensor according to length.
Args:
xs (Tensor): Batch of input tensor (B, `*`).
lengths (LongTensor or List): Batch of lengths (B,).
fill (int or float): Value to fill masked part.
Returns:
Tensor: Batch of masked input tensor (B, `*`).
Examples:
>>> x = torch.arange(5).repeat(3, 1) + 1
>>> x
tensor([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5]])
>>> lengths = [5, 3, 2]
>>> mask_by_length(x, lengths)
tensor([[1, 2, 3, 4, 5],
[1, 2, 3, 0, 0],
[1, 2, 0, 0, 0]])
"""
assert xs.size(0) == len(lengths)
ret = xs.data.new(*xs.size()).fill_(fill)
for i, l in enumerate(lengths):
ret[i, :l] = xs[i, :l]
return ret
def th_accuracy(pad_outputs, pad_targets, ignore_label):
"""Calculate accuracy.
Args:
pad_outputs (Tensor): Prediction tensors (B * Lmax, D).
pad_targets (LongTensor): Target label tensors (B, Lmax, D).
ignore_label (int): Ignore label id.
Returns:
float: Accuracy value (0.0 - 1.0).
"""
pad_pred = pad_outputs.view(
pad_targets.size(0),
pad_targets.size(1),
pad_outputs.size(1)).argmax(2)
mask = pad_targets != ignore_label
numerator = torch.sum(pad_pred.masked_select(mask) == pad_targets.masked_select(mask))
denominator = torch.sum(mask)
return float(numerator) / float(denominator)
def to_torch_tensor(x):
"""Change to torch.Tensor or ComplexTensor from numpy.ndarray.
Args:
x: Inputs. It should be one of numpy.ndarray, Tensor, ComplexTensor, and dict.
Returns:
Tensor or ComplexTensor: Type converted inputs.
Examples:
>>> xs = np.ones(3, dtype=np.float32)
>>> xs = to_torch_tensor(xs)
tensor([1., 1., 1.])
>>> xs = torch.ones(3, 4, 5)
>>> assert to_torch_tensor(xs) is xs
>>> xs = {'real': xs, 'imag': xs}
>>> to_torch_tensor(xs)
ComplexTensor(
Real:
tensor([1., 1., 1.])
Imag;
tensor([1., 1., 1.])
)
"""
# If numpy, change to torch tensor
if isinstance(x, np.ndarray):
if x.dtype.kind == 'c':
# Dynamically importing because torch_complex requires python3
from torch_complex.tensor import ComplexTensor
return ComplexTensor(x)
else:
return torch.from_numpy(x)
# If {'real': ..., 'imag': ...}, convert to ComplexTensor
elif isinstance(x, dict):
# Dynamically importing because torch_complex requires python3
from torch_complex.tensor import ComplexTensor
if 'real' not in x or 'imag' not in x:
raise ValueError("has 'real' and 'imag' keys: {}".format(list(x)))
# Relative importing because of using python3 syntax
return ComplexTensor(x['real'], x['imag'])
# If torch.Tensor, as it is
elif isinstance(x, torch.Tensor):
return x
else:
error = ("x must be numpy.ndarray, torch.Tensor or a dict like "
"{{'real': torch.Tensor, 'imag': torch.Tensor}}, "
"but got {}".format(type(x)))
try:
from torch_complex.tensor import ComplexTensor
except Exception:
# If PY2
raise ValueError(error)
else:
# If PY3
if isinstance(x, ComplexTensor):
return x
else:
raise ValueError(error)
def get_subsample(train_args, mode, arch):
"""Parse the subsampling factors from the training args for the specified `mode` and `arch`.
Args:
train_args: argument Namespace containing options.
mode: one of ('asr', 'mt', 'st')
arch: one of ('rnn', 'rnn-t', 'rnn_mix', 'rnn_mulenc', 'transformer')
Returns:
np.ndarray / List[np.ndarray]: subsampling factors.
"""
if arch == 'transformer':
return np.array([1])
elif mode == 'mt' and arch == 'rnn':
# +1 means input (+1) and layers outputs (train_args.elayer)
subsample = np.ones(train_args.elayers + 1, dtype=np.int)
logging.warning('Subsampling is not performed for machine translation.')
logging.info('subsample: ' + ' '.join([str(x) for x in subsample]))
return subsample
elif (mode == 'asr' and arch in ('rnn', 'rnn-t')) or \
(mode == 'mt' and arch == 'rnn') or \
(mode == 'st' and arch == 'rnn'):
subsample = np.ones(train_args.elayers + 1, dtype=np.int)
if train_args.etype.endswith("p") and not train_args.etype.startswith("vgg"):
ss = train_args.subsample.split("_")
for j in range(min(train_args.elayers + 1, len(ss))):
subsample[j] = int(ss[j])
else:
logging.warning(
'Subsampling is not performed for vgg*. It is performed in max pooling layers at CNN.')
logging.info('subsample: ' + ' '.join([str(x) for x in subsample]))
return subsample
elif mode == 'asr' and arch == 'rnn_mix':
subsample = np.ones(train_args.elayers_sd + train_args.elayers + 1, dtype=np.int)
if train_args.etype.endswith("p") and not train_args.etype.startswith("vgg"):
ss = train_args.subsample.split("_")
for j in range(min(train_args.elayers_sd + train_args.elayers + 1, len(ss))):
subsample[j] = int(ss[j])
else:
logging.warning(
'Subsampling is not performed for vgg*. It is performed in max pooling layers at CNN.')
logging.info('subsample: ' + ' '.join([str(x) for x in subsample]))
return subsample
elif mode == 'asr' and arch == 'rnn_mulenc':
subsample_list = []
for idx in range(train_args.num_encs):
subsample = np.ones(train_args.elayers[idx] + 1, dtype=np.int)
if train_args.etype[idx].endswith("p") and not train_args.etype[idx].startswith("vgg"):
ss = train_args.subsample[idx].split("_")
for j in range(min(train_args.elayers[idx] + 1, len(ss))):
subsample[j] = int(ss[j])
else:
logging.warning(
'Encoder %d: Subsampling is not performed for vgg*. '
'It is performed in max pooling layers at CNN.', idx + 1)
logging.info('subsample: ' + ' '.join([str(x) for x in subsample]))
subsample_list.append(subsample)
return subsample_list
else:
raise ValueError('Invalid options: mode={}, arch={}'.format(mode, arch))
def rename_state_dict(old_prefix: str, new_prefix: str, state_dict: Dict[str, torch.Tensor]):
"""Replace keys of old prefix with new prefix in state dict."""
# need this list not to break the dict iterator
old_keys = [k for k in state_dict if k.startswith(old_prefix)]
if len(old_keys) > 0:
logging.warning(f'Rename: {old_prefix} -> {new_prefix}')
for k in old_keys:
v = state_dict.pop(k)
new_k = k.replace(old_prefix, new_prefix)
state_dict[new_k] = v
def get_activation(act):
"""Return activation function."""
# Lazy load to avoid unused import
from .encoder.swish import Swish
activation_funcs = {
"hardtanh": torch.nn.Hardtanh,
"relu": torch.nn.ReLU,
"selu": torch.nn.SELU,
"swish": Swish,
}
return activation_funcs[act]() | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/nets_utils.py | 0.936489 | 0.780119 | nets_utils.py | pypi |
import logging
import six
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
from .e2e_asr_common import get_vgg2l_odim
from .nets_utils import make_pad_mask, to_device
class RNNP(torch.nn.Module):
"""RNN with projection layer module
:param int idim: dimension of inputs
:param int elayers: number of encoder layers
:param int cdim: number of rnn units (resulted in cdim * 2 if bidirectional)
:param int hdim: number of projection units
:param np.ndarray subsample: list of subsampling numbers
:param float dropout: dropout rate
:param str typ: The RNN type
"""
def __init__(self, idim, elayers, cdim, hdim, subsample, dropout, typ="blstm"):
super(RNNP, self).__init__()
bidir = typ[0] == "b"
for i in six.moves.range(elayers):
if i == 0:
inputdim = idim
else:
inputdim = hdim
rnn = torch.nn.LSTM(inputdim, cdim, dropout=dropout, num_layers=1, bidirectional=bidir,
batch_first=True) if "lstm" in typ \
else torch.nn.GRU(inputdim, cdim, dropout=dropout, num_layers=1, bidirectional=bidir, batch_first=True)
setattr(self, "%s%d" % ("birnn" if bidir else "rnn", i), rnn)
# bottleneck layer to merge
if bidir:
setattr(self, "bt%d" % i, torch.nn.Linear(2 * cdim, hdim))
else:
setattr(self, "bt%d" % i, torch.nn.Linear(cdim, hdim))
self.elayers = elayers
self.cdim = cdim
self.subsample = subsample
self.typ = typ
self.bidir = bidir
def forward(self, xs_pad, ilens, prev_state=None):
"""RNNP forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor prev_state: batch of previous RNN states
:return: batch of hidden state sequences (B, Tmax, hdim)
:rtype: torch.Tensor
"""
logging.debug(self.__class__.__name__ + ' input lengths: ' + str(ilens))
elayer_states = []
for layer in six.moves.range(self.elayers):
xs_pack = pack_padded_sequence(xs_pad, ilens, batch_first=True, enforce_sorted=False)
rnn = getattr(self, ("birnn" if self.bidir else "rnn") + str(layer))
rnn.flatten_parameters()
if prev_state is not None and rnn.bidirectional:
prev_state = reset_backward_rnn_state(prev_state)
ys, states = rnn(xs_pack, hx=None if prev_state is None else prev_state[layer])
elayer_states.append(states)
# ys: utt list of frame x cdim x 2 (2: means bidirectional)
ys_pad, ilens = pad_packed_sequence(ys, batch_first=True)
sub = self.subsample[layer + 1]
if sub > 1:
ys_pad = ys_pad[:, ::sub]
ilens = [int(i + 1) // sub for i in ilens]
# (sum _utt frame_utt) x dim
projected = getattr(self, 'bt' + str(layer)
)(ys_pad.contiguous().view(-1, ys_pad.size(2)))
if layer == self.elayers - 1:
xs_pad = projected.view(ys_pad.size(0), ys_pad.size(1), -1)
else:
xs_pad = torch.tanh(projected.view(ys_pad.size(0), ys_pad.size(1), -1))
return xs_pad, ilens, elayer_states # x: utt list of frame x dim
class RNN(torch.nn.Module):
"""RNN module
:param int idim: dimension of inputs
:param int elayers: number of encoder layers
:param int cdim: number of rnn units (resulted in cdim * 2 if bidirectional)
:param int hdim: number of final projection units
:param float dropout: dropout rate
:param str typ: The RNN type
"""
def __init__(self, idim, elayers, cdim, hdim, dropout, typ="blstm"):
super(RNN, self).__init__()
bidir = typ[0] == "b"
self.nbrnn = torch.nn.LSTM(idim, cdim, elayers, batch_first=True,
dropout=dropout, bidirectional=bidir) if "lstm" in typ \
else torch.nn.GRU(idim, cdim, elayers, batch_first=True, dropout=dropout,
bidirectional=bidir)
if bidir:
self.l_last = torch.nn.Linear(cdim * 2, hdim)
else:
self.l_last = torch.nn.Linear(cdim, hdim)
self.typ = typ
def forward(self, xs_pad, ilens, prev_state=None):
"""RNN forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, D)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor prev_state: batch of previous RNN states
:return: batch of hidden state sequences (B, Tmax, eprojs)
:rtype: torch.Tensor
"""
logging.debug(self.__class__.__name__ + ' input lengths: ' + str(ilens))
xs_pack = pack_padded_sequence(xs_pad, ilens, batch_first=True)
self.nbrnn.flatten_parameters()
if prev_state is not None and self.nbrnn.bidirectional:
# We assume that when previous state is passed, it means that we're streaming the input
# and therefore cannot propagate backward BRNN state (otherwise it goes in the wrong direction)
prev_state = reset_backward_rnn_state(prev_state)
ys, states = self.nbrnn(xs_pack, hx=prev_state)
# ys: utt list of frame x cdim x 2 (2: means bidirectional)
ys_pad, ilens = pad_packed_sequence(ys, batch_first=True)
# (sum _utt frame_utt) x dim
projected = torch.tanh(self.l_last(
ys_pad.contiguous().view(-1, ys_pad.size(2))))
xs_pad = projected.view(ys_pad.size(0), ys_pad.size(1), -1)
return xs_pad, ilens, states # x: utt list of frame x dim
def reset_backward_rnn_state(states):
"""Sets backward BRNN states to zeroes - useful in processing of sliding windows over the inputs"""
if isinstance(states, (list, tuple)):
for state in states:
state[1::2] = 0.
else:
states[1::2] = 0.
return states
class VGG2L(torch.nn.Module):
"""VGG-like module
:param int in_channel: number of input channels
"""
def __init__(self, in_channel=1, downsample=True):
super(VGG2L, self).__init__()
# CNN layer (VGG motivated)
self.conv1_1 = torch.nn.Conv2d(in_channel, 64, 3, stride=1, padding=1)
self.conv1_2 = torch.nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv2_1 = torch.nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.conv2_2 = torch.nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.in_channel = in_channel
self.downsample = downsample
if downsample:
self.stride = 2
else:
self.stride = 1
def forward(self, xs_pad, ilens, **kwargs):
"""VGG2L forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, D)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:return: batch of padded hidden state sequences (B, Tmax // 4, 128 * D // 4) if downsample
:rtype: torch.Tensor
"""
logging.debug(self.__class__.__name__ + ' input lengths: ' + str(ilens))
# x: utt x frame x dim
# xs_pad = F.pad_sequence(xs_pad)
# x: utt x 1 (input channel num) x frame x dim
xs_pad = xs_pad.view(xs_pad.size(0), xs_pad.size(1), self.in_channel,
xs_pad.size(2) // self.in_channel).transpose(1, 2)
# NOTE: max_pool1d ?
xs_pad = F.relu(self.conv1_1(xs_pad))
xs_pad = F.relu(self.conv1_2(xs_pad))
if self.downsample:
xs_pad = F.max_pool2d(xs_pad, 2, stride=self.stride, ceil_mode=True)
xs_pad = F.relu(self.conv2_1(xs_pad))
xs_pad = F.relu(self.conv2_2(xs_pad))
if self.downsample:
xs_pad = F.max_pool2d(xs_pad, 2, stride=self.stride, ceil_mode=True)
if torch.is_tensor(ilens):
ilens = ilens.cpu().numpy()
else:
ilens = np.array(ilens, dtype=np.float32)
if self.downsample:
ilens = np.array(np.ceil(ilens / 2), dtype=np.int64)
ilens = np.array(
np.ceil(np.array(ilens, dtype=np.float32) / 2), dtype=np.int64).tolist()
# x: utt_list of frame (remove zeropaded frames) x (input channel num x dim)
xs_pad = xs_pad.transpose(1, 2)
xs_pad = xs_pad.contiguous().view(
xs_pad.size(0), xs_pad.size(1), xs_pad.size(2) * xs_pad.size(3))
return xs_pad, ilens, None # no state in this layer
class Encoder(torch.nn.Module):
"""Encoder module
:param str etype: type of encoder network
:param int idim: number of dimensions of encoder network
:param int elayers: number of layers of encoder network
:param int eunits: number of lstm units of encoder network
:param int eprojs: number of projection units of encoder network
:param np.ndarray subsample: list of subsampling numbers
:param float dropout: dropout rate
:param int in_channel: number of input channels
"""
def __init__(self, etype, idim, elayers, eunits, eprojs, subsample, dropout, in_channel=1):
super(Encoder, self).__init__()
typ = etype.lstrip("vgg").rstrip("p")
if typ not in ['lstm', 'gru', 'blstm', 'bgru']:
logging.error("Error: need to specify an appropriate encoder architecture")
if etype.startswith("vgg"):
if etype[-1] == "p":
self.enc = torch.nn.ModuleList([VGG2L(in_channel),
RNNP(get_vgg2l_odim(idim, in_channel=in_channel), elayers, eunits,
eprojs,
subsample, dropout, typ=typ)])
logging.info('Use CNN-VGG + ' + typ.upper() + 'P for encoder')
else:
self.enc = torch.nn.ModuleList([VGG2L(in_channel),
RNN(get_vgg2l_odim(idim, in_channel=in_channel), elayers, eunits,
eprojs,
dropout, typ=typ)])
logging.info('Use CNN-VGG + ' + typ.upper() + ' for encoder')
else:
if etype[-1] == "p":
self.enc = torch.nn.ModuleList(
[RNNP(idim, elayers, eunits, eprojs, subsample, dropout, typ=typ)])
logging.info(typ.upper() + ' with every-layer projection for encoder')
else:
self.enc = torch.nn.ModuleList([RNN(idim, elayers, eunits, eprojs, dropout, typ=typ)])
logging.info(typ.upper() + ' without projection for encoder')
def forward(self, xs_pad, ilens, prev_states=None):
"""Encoder forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, D)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor prev_state: batch of previous encoder hidden states (?, ...)
:return: batch of hidden state sequences (B, Tmax, eprojs)
:rtype: torch.Tensor
"""
if prev_states is None:
prev_states = [None] * len(self.enc)
assert len(prev_states) == len(self.enc)
current_states = []
for module, prev_state in zip(self.enc, prev_states):
xs_pad, ilens, states = module(xs_pad, ilens, prev_state=prev_state)
current_states.append(states)
# make mask to remove bias value in padded part
mask = to_device(self, make_pad_mask(ilens).unsqueeze(-1))
return xs_pad.masked_fill(mask, 0.0), ilens, current_states
def encoder_for(args, idim, subsample):
"""Instantiates an encoder module given the program arguments
:param Namespace args: The arguments
:param int or List of integer idim: dimension of input, e.g. 83, or
List of dimensions of inputs, e.g. [83,83]
:param List or List of List subsample: subsample factors, e.g. [1,2,2,1,1], or
List of subsample factors of each encoder. e.g. [[1,2,2,1,1], [1,2,2,1,1]]
:rtype torch.nn.Module
:return: The encoder module
"""
num_encs = getattr(args, "num_encs", 1) # use getattr to keep compatibility
if num_encs == 1:
# compatible with single encoder asr mode
return Encoder(args.etype, idim, args.elayers, args.eunits, args.eprojs, subsample, args.dropout_rate)
elif num_encs >= 1:
enc_list = torch.nn.ModuleList()
for idx in range(num_encs):
enc = Encoder(args.etype[idx], idim[idx], args.elayers[idx], args.eunits[idx], args.eprojs, subsample[idx],
args.dropout_rate[idx])
enc_list.append(enc)
return enc_list
else:
raise ValueError("Number of encoders needs to be more than one. {}".format(num_encs)) | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/encoders.py | 0.902528 | 0.467028 | encoders.py | pypi |
import copy
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import torch
from torch_complex.tensor import ComplexTensor
from s3prl_vc.upstream.ppg_sxliu.log_mel import LogMel
from s3prl_vc.upstream.ppg_sxliu.stft import Stft
class DefaultFrontend(torch.nn.Module):
"""Conventional frontend structure for ASR
Stft -> WPE -> MVDR-Beamformer -> Power-spec -> Mel-Fbank -> CMVN
"""
def __init__(
self,
fs: Union[int, str] = 16000,
n_fft: int = 1024,
win_length: int = 800,
hop_length: int = 160,
center: bool = True,
pad_mode: str = "reflect",
normalized: bool = False,
onesided: bool = True,
n_mels: int = 80,
fmin: int = None,
fmax: int = None,
htk: bool = False,
norm=1,
frontend_conf=None, #Optional[dict] = get_default_kwargs(Frontend),
kaldi_padding_mode=False,
downsample_rate: int = 1,
):
super().__init__()
if isinstance(fs, str):
fs = int(fs)
self.downsample_rate = downsample_rate
# Deepcopy (In general, dict shouldn't be used as default arg)
frontend_conf = copy.deepcopy(frontend_conf)
self.stft = Stft(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=center,
pad_mode=pad_mode,
normalized=normalized,
onesided=onesided,
kaldi_padding_mode=kaldi_padding_mode,
)
if frontend_conf is not None:
self.frontend = Frontend(idim=n_fft // 2 + 1, **frontend_conf)
else:
self.frontend = None
self.logmel = LogMel(
fs=fs, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax, htk=htk, norm=norm,
)
self.n_mels = n_mels
def output_size(self) -> int:
return self.n_mels
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
# 1. Domain-conversion: e.g. Stft: time -> time-freq
input_stft, feats_lens = self.stft(input, input_lengths)
assert input_stft.dim() >= 4, input_stft.shape
# "2" refers to the real/imag parts of Complex
assert input_stft.shape[-1] == 2, input_stft.shape
# Change torch.Tensor to ComplexTensor
# input_stft: (..., F, 2) -> (..., F)
input_stft = ComplexTensor(input_stft[..., 0], input_stft[..., 1])
# 2. [Option] Speech enhancement
if self.frontend is not None:
assert isinstance(input_stft, ComplexTensor), type(input_stft)
# input_stft: (Batch, Length, [Channel], Freq)
input_stft, _, mask = self.frontend(input_stft, feats_lens)
# 3. [Multi channel case]: Select a channel
if input_stft.dim() == 4:
# h: (B, T, C, F) -> h: (B, T, F)
if self.training:
# Select 1ch randomly
ch = np.random.randint(input_stft.size(2))
input_stft = input_stft[:, :, ch, :]
else:
# Use the first channel
input_stft = input_stft[:, :, 0, :]
# 4. STFT -> Power spectrum
# h: ComplexTensor(B, T, F) -> torch.Tensor(B, T, F)
input_power = input_stft.real ** 2 + input_stft.imag ** 2
# 5. Feature transform e.g. Stft -> Log-Mel-Fbank
# input_power: (Batch, [Channel,] Length, Freq)
# -> input_feats: (Batch, Length, Dim)
input_feats, _ = self.logmel(input_power, feats_lens)
# NOTE(sx): pad
max_len = input_feats.size(1)
if self.downsample_rate > 1 and max_len % self.downsample_rate != 0:
padding = self.downsample_rate - max_len % self.downsample_rate
# print("Logmel: ", input_feats.size())
input_feats = torch.nn.functional.pad(input_feats, (0, 0, 0, padding),
"constant", 0)
# print("Logmel(after padding): ",input_feats.size())
feats_lens[torch.argmax(feats_lens)] = max_len + padding
return input_feats, feats_lens | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/frontend.py | 0.87312 | 0.298351 | frontend.py | pypi |
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Common functions for ASR."""
import argparse
import editdistance
import json
import logging
import numpy as np
import six
import sys
from itertools import groupby
def end_detect(ended_hyps, i, M=3, D_end=np.log(1 * np.exp(-10))):
"""End detection.
desribed in Eq. (50) of S. Watanabe et al
"Hybrid CTC/Attention Architecture for End-to-End Speech Recognition"
:param ended_hyps:
:param i:
:param M:
:param D_end:
:return:
"""
if len(ended_hyps) == 0:
return False
count = 0
best_hyp = sorted(ended_hyps, key=lambda x: x['score'], reverse=True)[0]
for m in six.moves.range(M):
# get ended_hyps with their length is i - m
hyp_length = i - m
hyps_same_length = [x for x in ended_hyps if len(x['yseq']) == hyp_length]
if len(hyps_same_length) > 0:
best_hyp_same_length = sorted(hyps_same_length, key=lambda x: x['score'], reverse=True)[0]
if best_hyp_same_length['score'] - best_hyp['score'] < D_end:
count += 1
if count == M:
return True
else:
return False
# TODO(takaaki-hori): add different smoothing methods
def label_smoothing_dist(odim, lsm_type, transcript=None, blank=0):
"""Obtain label distribution for loss smoothing.
:param odim:
:param lsm_type:
:param blank:
:param transcript:
:return:
"""
if transcript is not None:
with open(transcript, 'rb') as f:
trans_json = json.load(f)['utts']
if lsm_type == 'unigram':
assert transcript is not None, 'transcript is required for %s label smoothing' % lsm_type
labelcount = np.zeros(odim)
for k, v in trans_json.items():
ids = np.array([int(n) for n in v['output'][0]['tokenid'].split()])
# to avoid an error when there is no text in an uttrance
if len(ids) > 0:
labelcount[ids] += 1
labelcount[odim - 1] = len(transcript) # count <eos>
labelcount[labelcount == 0] = 1 # flooring
labelcount[blank] = 0 # remove counts for blank
labeldist = labelcount.astype(np.float32) / np.sum(labelcount)
else:
logging.error(
"Error: unexpected label smoothing type: %s" % lsm_type)
sys.exit()
return labeldist
def get_vgg2l_odim(idim, in_channel=3, out_channel=128, downsample=True):
"""Return the output size of the VGG frontend.
:param in_channel: input channel size
:param out_channel: output channel size
:return: output size
:rtype int
"""
idim = idim / in_channel
if downsample:
idim = np.ceil(np.array(idim, dtype=np.float32) / 2) # 1st max pooling
idim = np.ceil(np.array(idim, dtype=np.float32) / 2) # 2nd max pooling
return int(idim) * out_channel # numer of channels
class ErrorCalculator(object):
"""Calculate CER and WER for E2E_ASR and CTC models during training.
:param y_hats: numpy array with predicted text
:param y_pads: numpy array with true (target) text
:param char_list:
:param sym_space:
:param sym_blank:
:return:
"""
def __init__(self, char_list, sym_space, sym_blank, report_cer=False, report_wer=False,
trans_type="char"):
"""Construct an ErrorCalculator object."""
super(ErrorCalculator, self).__init__()
self.report_cer = report_cer
self.report_wer = report_wer
self.trans_type = trans_type
self.char_list = char_list
self.space = sym_space
self.blank = sym_blank
self.idx_blank = self.char_list.index(self.blank)
if self.space in self.char_list:
self.idx_space = self.char_list.index(self.space)
else:
self.idx_space = None
def __call__(self, ys_hat, ys_pad, is_ctc=False):
"""Calculate sentence-level WER/CER score.
:param torch.Tensor ys_hat: prediction (batch, seqlen)
:param torch.Tensor ys_pad: reference (batch, seqlen)
:param bool is_ctc: calculate CER score for CTC
:return: sentence-level WER score
:rtype float
:return: sentence-level CER score
:rtype float
"""
cer, wer = None, None
if is_ctc:
return self.calculate_cer_ctc(ys_hat, ys_pad)
elif not self.report_cer and not self.report_wer:
return cer, wer
seqs_hat, seqs_true = self.convert_to_char(ys_hat, ys_pad)
if self.report_cer:
cer = self.calculate_cer(seqs_hat, seqs_true)
if self.report_wer:
wer = self.calculate_wer(seqs_hat, seqs_true)
return cer, wer
def calculate_cer_ctc(self, ys_hat, ys_pad):
"""Calculate sentence-level CER score for CTC.
:param torch.Tensor ys_hat: prediction (batch, seqlen)
:param torch.Tensor ys_pad: reference (batch, seqlen)
:return: average sentence-level CER score
:rtype float
"""
cers, char_ref_lens = [], []
for i, y in enumerate(ys_hat):
y_hat = [x[0] for x in groupby(y)]
y_true = ys_pad[i]
seq_hat, seq_true = [], []
for idx in y_hat:
idx = int(idx)
if idx != -1 and idx != self.idx_blank and idx != self.idx_space:
seq_hat.append(self.char_list[int(idx)])
for idx in y_true:
idx = int(idx)
if idx != -1 and idx != self.idx_blank and idx != self.idx_space:
seq_true.append(self.char_list[int(idx)])
if self.trans_type == "char":
hyp_chars = "".join(seq_hat)
ref_chars = "".join(seq_true)
else:
hyp_chars = " ".join(seq_hat)
ref_chars = " ".join(seq_true)
if len(ref_chars) > 0:
cers.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
cer_ctc = float(sum(cers)) / sum(char_ref_lens) if cers else None
return cer_ctc
def convert_to_char(self, ys_hat, ys_pad):
"""Convert index to character.
:param torch.Tensor seqs_hat: prediction (batch, seqlen)
:param torch.Tensor seqs_true: reference (batch, seqlen)
:return: token list of prediction
:rtype list
:return: token list of reference
:rtype list
"""
seqs_hat, seqs_true = [], []
for i, y_hat in enumerate(ys_hat):
y_true = ys_pad[i]
eos_true = np.where(y_true == -1)[0]
eos_true = eos_true[0] if len(eos_true) > 0 else len(y_true)
# To avoid wrong higher WER than the one obtained from the decoding
# eos from y_true is used to mark the eos in y_hat
# because of that y_hats has not padded outs with -1.
seq_hat = [self.char_list[int(idx)] for idx in y_hat[:eos_true]]
seq_true = [self.char_list[int(idx)] for idx in y_true if int(idx) != -1]
# seq_hat_text = "".join(seq_hat).replace(self.space, ' ')
seq_hat_text = " ".join(seq_hat).replace(self.space, ' ')
seq_hat_text = seq_hat_text.replace(self.blank, '')
# seq_true_text = "".join(seq_true).replace(self.space, ' ')
seq_true_text = " ".join(seq_true).replace(self.space, ' ')
seqs_hat.append(seq_hat_text)
seqs_true.append(seq_true_text)
return seqs_hat, seqs_true
def calculate_cer(self, seqs_hat, seqs_true):
"""Calculate sentence-level CER score.
:param list seqs_hat: prediction
:param list seqs_true: reference
:return: average sentence-level CER score
:rtype float
"""
char_eds, char_ref_lens = [], []
for i, seq_hat_text in enumerate(seqs_hat):
seq_true_text = seqs_true[i]
hyp_chars = seq_hat_text.replace(' ', '')
ref_chars = seq_true_text.replace(' ', '')
char_eds.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
return float(sum(char_eds)) / sum(char_ref_lens)
def calculate_wer(self, seqs_hat, seqs_true):
"""Calculate sentence-level WER score.
:param list seqs_hat: prediction
:param list seqs_true: reference
:return: average sentence-level WER score
:rtype float
"""
word_eds, word_ref_lens = [], []
for i, seq_hat_text in enumerate(seqs_hat):
seq_true_text = seqs_true[i]
hyp_words = seq_hat_text.split()
ref_words = seq_true_text.split()
word_eds.append(editdistance.eval(hyp_words, ref_words))
word_ref_lens.append(len(ref_words))
return float(sum(word_eds)) / sum(word_ref_lens)
class ErrorCalculatorTrans(object):
"""Calculate CER and WER for transducer models.
Args:
decoder (nn.Module): decoder module
args (Namespace): argument Namespace containing options
report_cer (boolean): compute CER option
report_wer (boolean): compute WER option
"""
def __init__(self, decoder, args, report_cer=False, report_wer=False):
"""Construct an ErrorCalculator object for transducer model."""
super(ErrorCalculatorTrans, self).__init__()
self.dec = decoder
recog_args = {'beam_size': args.beam_size,
'nbest': args.nbest,
'space': args.sym_space,
'score_norm_transducer': args.score_norm_transducer}
self.recog_args = argparse.Namespace(**recog_args)
self.char_list = args.char_list
self.space = args.sym_space
self.blank = args.sym_blank
self.report_cer = args.report_cer
self.report_wer = args.report_wer
def __call__(self, hs_pad, ys_pad):
"""Calculate sentence-level WER/CER score for transducer models.
Args:
hs_pad (torch.Tensor): batch of padded input sequence (batch, T, D)
ys_pad (torch.Tensor): reference (batch, seqlen)
Returns:
(float): sentence-level CER score
(float): sentence-level WER score
"""
cer, wer = None, None
if not self.report_cer and not self.report_wer:
return cer, wer
batchsize = int(hs_pad.size(0))
batch_nbest = []
for b in six.moves.range(batchsize):
if self.recog_args.beam_size == 1:
nbest_hyps = self.dec.recognize(hs_pad[b], self.recog_args)
else:
nbest_hyps = self.dec.recognize_beam(hs_pad[b], self.recog_args)
batch_nbest.append(nbest_hyps)
ys_hat = [nbest_hyp[0]['yseq'][1:] for nbest_hyp in batch_nbest]
seqs_hat, seqs_true = self.convert_to_char(ys_hat, ys_pad.cpu())
if self.report_cer:
cer = self.calculate_cer(seqs_hat, seqs_true)
if self.report_wer:
wer = self.calculate_wer(seqs_hat, seqs_true)
return cer, wer
def convert_to_char(self, ys_hat, ys_pad):
"""Convert index to character.
Args:
ys_hat (torch.Tensor): prediction (batch, seqlen)
ys_pad (torch.Tensor): reference (batch, seqlen)
Returns:
(list): token list of prediction
(list): token list of reference
"""
seqs_hat, seqs_true = [], []
for i, y_hat in enumerate(ys_hat):
y_true = ys_pad[i]
eos_true = np.where(y_true == -1)[0]
eos_true = eos_true[0] if len(eos_true) > 0 else len(y_true)
seq_hat = [self.char_list[int(idx)] for idx in y_hat[:eos_true]]
seq_true = [self.char_list[int(idx)] for idx in y_true if int(idx) != -1]
seq_hat_text = "".join(seq_hat).replace(self.space, ' ')
seq_hat_text = seq_hat_text.replace(self.blank, '')
seq_true_text = "".join(seq_true).replace(self.space, ' ')
seqs_hat.append(seq_hat_text)
seqs_true.append(seq_true_text)
return seqs_hat, seqs_true
def calculate_cer(self, seqs_hat, seqs_true):
"""Calculate sentence-level CER score for transducer model.
Args:
seqs_hat (torch.Tensor): prediction (batch, seqlen)
seqs_true (torch.Tensor): reference (batch, seqlen)
Returns:
(float): average sentence-level CER score
"""
char_eds, char_ref_lens = [], []
for i, seq_hat_text in enumerate(seqs_hat):
seq_true_text = seqs_true[i]
hyp_chars = seq_hat_text.replace(' ', '')
ref_chars = seq_true_text.replace(' ', '')
char_eds.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
return float(sum(char_eds)) / sum(char_ref_lens)
def calculate_wer(self, seqs_hat, seqs_true):
"""Calculate sentence-level WER score for transducer model.
Args:
seqs_hat (torch.Tensor): prediction (batch, seqlen)
seqs_true (torch.Tensor): reference (batch, seqlen)
Returns:
(float): average sentence-level WER score
"""
word_eds, word_ref_lens = [], []
for i, seq_hat_text in enumerate(seqs_hat):
seq_true_text = seqs_true[i]
hyp_words = seq_hat_text.split()
ref_words = seq_true_text.split()
word_eds.append(editdistance.eval(hyp_words, ref_words))
word_ref_lens.append(len(ref_words))
return float(sum(word_eds)) / sum(word_ref_lens) | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/e2e_asr_common.py | 0.624752 | 0.417628 | e2e_asr_common.py | pypi |
# Copyright 2020 Johns Hopkins University (Shinji Watanabe)
# Northwestern Polytechnical University (Pengcheng Guo)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""ConvolutionModule definition."""
from torch import nn
class ConvolutionModule(nn.Module):
"""ConvolutionModule in Conformer model.
:param int channels: channels of cnn
:param int kernel_size: kernerl size of cnn
"""
def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):
"""Construct an ConvolutionModule object."""
super(ConvolutionModule, self).__init__()
# kernerl_size should be a odd number for 'SAME' padding
assert (kernel_size - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(
channels,
2 * channels,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
self.depthwise_conv = nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
groups=channels,
bias=bias,
)
self.norm = nn.BatchNorm1d(channels)
self.pointwise_conv2 = nn.Conv1d(
channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
self.activation = activation
def forward(self, x):
"""Compute convolution module.
:param torch.Tensor x: (batch, time, size)
:return torch.Tensor: convoluted `value` (batch, time, d_model)
"""
# exchange the temporal dimension and the feature dimension
x = x.transpose(1, 2)
# GLU mechanism
x = self.pointwise_conv1(x) # (batch, 2*channel, dim)
x = nn.functional.glu(x, dim=1) # (batch, channel, dim)
# 1D Depthwise Conv
x = self.depthwise_conv(x)
x = self.activation(self.norm(x))
x = self.pointwise_conv2(x)
return x.transpose(1, 2) | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/encoder/convolution.py | 0.96944 | 0.536009 | convolution.py | pypi |
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Subsampling layer definition."""
import logging
import torch
from .embedding import PositionalEncoding
class Conv2dSubsampling(torch.nn.Module):
"""Convolutional 2D subsampling (to 1/4 length or 1/2 length).
:param int idim: input dim
:param int odim: output dim
:param flaot dropout_rate: dropout rate
:param torch.nn.Module pos_enc: custom position encoding layer
"""
def __init__(self, idim, odim, dropout_rate, pos_enc=None,
subsample_by_2=False,
):
"""Construct an Conv2dSubsampling object."""
super(Conv2dSubsampling, self).__init__()
self.subsample_by_2 = subsample_by_2
if subsample_by_2:
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, kernel_size=5, stride=1, padding=2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, kernel_size=4, stride=2, padding=1),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * (idim // 2), odim),
pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),
)
else:
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, kernel_size=4, stride=2, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, kernel_size=4, stride=2, padding=1),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * (idim // 4), odim),
pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),
)
def forward(self, x, x_mask):
"""Subsample x.
:param torch.Tensor x: input tensor
:param torch.Tensor x_mask: input mask
:return: subsampled x and mask
:rtype Tuple[torch.Tensor, torch.Tensor]
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
if self.subsample_by_2:
return x, x_mask[:, :, ::2]
else:
return x, x_mask[:, :, ::2][:, :, ::2]
def __getitem__(self, key):
"""Subsample x.
When reset_parameters() is called, if use_scaled_pos_enc is used,
return the positioning encoding.
"""
if key != -1:
raise NotImplementedError("Support only `-1` (for `reset_parameters`).")
return self.out[key]
class Conv2dNoSubsampling(torch.nn.Module):
"""Convolutional 2D without subsampling.
:param int idim: input dim
:param int odim: output dim
:param flaot dropout_rate: dropout rate
:param torch.nn.Module pos_enc: custom position encoding layer
"""
def __init__(self, idim, odim, dropout_rate, pos_enc=None):
"""Construct an Conv2dSubsampling object."""
super().__init__()
logging.info("Encoder does not do down-sample on mel-spectrogram.")
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, kernel_size=5, stride=1, padding=2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, kernel_size=5, stride=1, padding=2),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * idim, odim),
pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),
)
def forward(self, x, x_mask):
"""Subsample x.
:param torch.Tensor x: input tensor
:param torch.Tensor x_mask: input mask
:return: subsampled x and mask
:rtype Tuple[torch.Tensor, torch.Tensor]
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
return x, x_mask
def __getitem__(self, key):
"""Subsample x.
When reset_parameters() is called, if use_scaled_pos_enc is used,
return the positioning encoding.
"""
if key != -1:
raise NotImplementedError("Support only `-1` (for `reset_parameters`).")
return self.out[key]
class Conv2dSubsampling6(torch.nn.Module):
"""Convolutional 2D subsampling (to 1/6 length).
:param int idim: input dim
:param int odim: output dim
:param flaot dropout_rate: dropout rate
"""
def __init__(self, idim, odim, dropout_rate):
"""Construct an Conv2dSubsampling object."""
super(Conv2dSubsampling6, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 5, 3),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3), odim),
PositionalEncoding(odim, dropout_rate),
)
def forward(self, x, x_mask):
"""Subsample x.
:param torch.Tensor x: input tensor
:param torch.Tensor x_mask: input mask
:return: subsampled x and mask
:rtype Tuple[torch.Tensor, torch.Tensor]
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
return x, x_mask[:, :, :-2:2][:, :, :-4:3]
class Conv2dSubsampling8(torch.nn.Module):
"""Convolutional 2D subsampling (to 1/8 length).
:param int idim: input dim
:param int odim: output dim
:param flaot dropout_rate: dropout rate
"""
def __init__(self, idim, odim, dropout_rate):
"""Construct an Conv2dSubsampling object."""
super(Conv2dSubsampling8, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 2),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim),
PositionalEncoding(odim, dropout_rate),
)
def forward(self, x, x_mask):
"""Subsample x.
:param torch.Tensor x: input tensor
:param torch.Tensor x_mask: input mask
:return: subsampled x and mask
:rtype Tuple[torch.Tensor, torch.Tensor]
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
return x, x_mask[:, :, :-2:2][:, :, :-2:2][:, :, :-2:2] | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/encoder/subsampling.py | 0.967387 | 0.618593 | subsampling.py | pypi |
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Encoder definition."""
import logging
import torch
from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule
from espnet.nets.pytorch_backend.conformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.nets_utils import get_activation
from espnet.nets.pytorch_backend.transducer.vgg import VGG2L
from espnet.nets.pytorch_backend.transformer.attention import (
MultiHeadedAttention, # noqa: H301
RelPositionMultiHeadedAttention, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.embedding import (
PositionalEncoding, # noqa: H301
ScaledPositionalEncoding, # noqa: H301
RelPositionalEncoding, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import Conv1dLinear
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import MultiLayeredConv1d
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling
class Encoder(torch.nn.Module):
"""Conformer encoder module.
:param int idim: input dim
:param int attention_dim: dimention of attention
:param int attention_heads: the number of heads of multi head attention
:param int linear_units: the number of units of position-wise feed forward
:param int num_blocks: the number of decoder blocks
:param float dropout_rate: dropout rate
:param float attention_dropout_rate: dropout rate in attention
:param float positional_dropout_rate: dropout rate after adding positional encoding
:param str or torch.nn.Module input_layer: input layer type
:param bool normalize_before: whether to use layer_norm before the first block
:param bool concat_after: whether to concat attention layer's input and output
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
:param str positionwise_layer_type: linear of conv1d
:param int positionwise_conv_kernel_size: kernel size of positionwise conv1d layer
:param str encoder_pos_enc_layer_type: encoder positional encoding layer type
:param str encoder_attn_layer_type: encoder attention layer type
:param str activation_type: encoder activation function type
:param bool macaron_style: whether to use macaron style for positionwise layer
:param bool use_cnn_module: whether to use convolution module
:param int cnn_module_kernel: kernerl size of convolution module
:param int padding_idx: padding_idx for input_layer=embed
"""
def __init__(
self,
idim,
attention_dim=256,
attention_heads=4,
linear_units=2048,
num_blocks=6,
dropout_rate=0.1,
positional_dropout_rate=0.1,
attention_dropout_rate=0.0,
input_layer="conv2d",
normalize_before=True,
concat_after=False,
positionwise_layer_type="linear",
positionwise_conv_kernel_size=1,
macaron_style=False,
pos_enc_layer_type="abs_pos",
selfattention_layer_type="selfattn",
activation_type="swish",
use_cnn_module=False,
cnn_module_kernel=31,
padding_idx=-1,
):
"""Construct an Encoder object."""
super(Encoder, self).__init__()
activation = get_activation(activation_type)
if pos_enc_layer_type == "abs_pos":
pos_enc_class = PositionalEncoding
elif pos_enc_layer_type == "scaled_abs_pos":
pos_enc_class = ScaledPositionalEncoding
elif pos_enc_layer_type == "rel_pos":
assert selfattention_layer_type == "rel_selfattn"
pos_enc_class = RelPositionalEncoding
else:
raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type)
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(idim, attention_dim),
torch.nn.LayerNorm(attention_dim),
torch.nn.Dropout(dropout_rate),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer == "conv2d":
self.embed = Conv2dSubsampling(
idim,
attention_dim,
dropout_rate,
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer == "vgg2l":
self.embed = VGG2L(idim, attention_dim)
elif input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer,
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer is None:
self.embed = torch.nn.Sequential(
pos_enc_class(attention_dim, positional_dropout_rate)
)
else:
raise ValueError("unknown input_layer: " + input_layer)
self.normalize_before = normalize_before
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
attention_dim,
linear_units,
dropout_rate,
activation,
)
elif positionwise_layer_type == "conv1d":
positionwise_layer = MultiLayeredConv1d
positionwise_layer_args = (
attention_dim,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
elif positionwise_layer_type == "conv1d-linear":
positionwise_layer = Conv1dLinear
positionwise_layer_args = (
attention_dim,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
else:
raise NotImplementedError("Support only linear or conv1d.")
if selfattention_layer_type == "selfattn":
logging.info("encoder self-attention layer type = self-attention")
encoder_selfattn_layer = MultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
attention_dim,
attention_dropout_rate,
)
elif selfattention_layer_type == "rel_selfattn":
assert pos_enc_layer_type == "rel_pos"
encoder_selfattn_layer = RelPositionMultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
attention_dim,
attention_dropout_rate,
)
else:
raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type)
convolution_layer = ConvolutionModule
convolution_layer_args = (attention_dim, cnn_module_kernel, activation)
self.encoders = repeat(
num_blocks,
lambda lnum: EncoderLayer(
attention_dim,
encoder_selfattn_layer(*encoder_selfattn_layer_args),
positionwise_layer(*positionwise_layer_args),
positionwise_layer(*positionwise_layer_args) if macaron_style else None,
convolution_layer(*convolution_layer_args) if use_cnn_module else None,
dropout_rate,
normalize_before,
concat_after,
),
)
if self.normalize_before:
self.after_norm = LayerNorm(attention_dim)
def forward(self, xs, masks):
"""Encode input sequence.
:param torch.Tensor xs: input tensor
:param torch.Tensor masks: input mask
:return: position embedded tensor and mask
:rtype Tuple[torch.Tensor, torch.Tensor]:
"""
if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):
xs, masks = self.embed(xs, masks)
else:
xs = self.embed(xs)
xs, masks = self.encoders(xs, masks)
if isinstance(xs, tuple):
xs = xs[0]
if self.normalize_before:
xs = self.after_norm(xs)
return xs, masks | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/encoder/encoder.py | 0.959705 | 0.428652 | encoder.py | pypi |
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Positonal Encoding Module."""
import math
import torch
def _pre_hook(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
"""Perform pre-hook in load_state_dict for backward compatibility.
Note:
We saved self.pe until v.0.5.2 but we have omitted it later.
Therefore, we remove the item "pe" from `state_dict` for backward compatibility.
"""
k = prefix + "pe"
if k in state_dict:
state_dict.pop(k)
class PositionalEncoding(torch.nn.Module):
"""Positional encoding.
:param int d_model: embedding dim
:param float dropout_rate: dropout rate
:param int max_len: maximum input length
:param reverse: whether to reverse the input position
"""
def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):
"""Construct an PositionalEncoding object."""
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.reverse = reverse
self.xscale = math.sqrt(self.d_model)
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
self._register_load_state_dict_pre_hook(_pre_hook)
def extend_pe(self, x):
"""Reset the positional encodings."""
if self.pe is not None:
if self.pe.size(1) >= x.size(1):
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
pe = torch.zeros(x.size(1), self.d_model)
if self.reverse:
position = torch.arange(
x.size(1) - 1, -1, -1.0, dtype=torch.float32
).unsqueeze(1)
else:
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32)
* -(math.log(10000.0) / self.d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, x: torch.Tensor):
"""Add positional encoding.
Args:
x (torch.Tensor): Input. Its shape is (batch, time, ...)
Returns:
torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)
"""
self.extend_pe(x)
x = x * self.xscale + self.pe[:, : x.size(1)]
return self.dropout(x)
class ScaledPositionalEncoding(PositionalEncoding):
"""Scaled positional encoding module.
See also: Sec. 3.2 https://arxiv.org/pdf/1809.08895.pdf
"""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Initialize class.
:param int d_model: embedding dim
:param float dropout_rate: dropout rate
:param int max_len: maximum input length
"""
super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)
self.alpha = torch.nn.Parameter(torch.tensor(1.0))
def reset_parameters(self):
"""Reset parameters."""
self.alpha.data = torch.tensor(1.0)
def forward(self, x):
"""Add positional encoding.
Args:
x (torch.Tensor): Input. Its shape is (batch, time, ...)
Returns:
torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)
"""
self.extend_pe(x)
x = x + self.alpha * self.pe[:, : x.size(1)]
return self.dropout(x)
class RelPositionalEncoding(PositionalEncoding):
"""Relitive positional encoding module.
See : Appendix B in https://arxiv.org/abs/1901.02860
:param int d_model: embedding dim
:param float dropout_rate: dropout rate
:param int max_len: maximum input length
"""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Initialize class.
:param int d_model: embedding dim
:param float dropout_rate: dropout rate
:param int max_len: maximum input length
"""
super().__init__(d_model, dropout_rate, max_len, reverse=True)
def forward(self, x):
"""Compute positional encoding.
Args:
x (torch.Tensor): Input. Its shape is (batch, time, ...)
Returns:
torch.Tensor: x. Its shape is (batch, time, ...)
torch.Tensor: pos_emb. Its shape is (1, time, ...)
"""
self.extend_pe(x)
x = x * self.xscale
pos_emb = self.pe[:, : x.size(1)]
return self.dropout(x), self.dropout(pos_emb) | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/encoder/embedding.py | 0.96569 | 0.422564 | embedding.py | pypi |
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Layer modules for FFT block in FastSpeech (Feed-forward Transformer)."""
import torch
class MultiLayeredConv1d(torch.nn.Module):
"""Multi-layered conv1d for Transformer block.
This is a module of multi-leyered conv1d designed
to replace positionwise feed-forward network
in Transforner block, which is introduced in
`FastSpeech: Fast, Robust and Controllable Text to Speech`_.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize MultiLayeredConv1d module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(MultiLayeredConv1d, self).__init__()
self.w_1 = torch.nn.Conv1d(
in_chans,
hidden_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
)
self.w_2 = torch.nn.Conv1d(
hidden_chans,
in_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, ..., in_chans).
Returns:
Tensor: Batch of output tensors (B, ..., hidden_chans).
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)
class Conv1dLinear(torch.nn.Module):
"""Conv1D + Linear for Transformer block.
A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize Conv1dLinear module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(Conv1dLinear, self).__init__()
self.w_1 = torch.nn.Conv1d(
in_chans,
hidden_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
)
self.w_2 = torch.nn.Linear(hidden_chans, in_chans)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, ..., in_chans).
Returns:
Tensor: Batch of output tensors (B, ..., hidden_chans).
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x)) | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/encoder/multi_layer_conv.py | 0.968096 | 0.636268 | multi_layer_conv.py | pypi |
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Multi-Head Attention layer definition."""
import math
import numpy
import torch
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
:param int n_head: the number of head s
:param int n_feat: the number of features
:param float dropout_rate: dropout rate
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an MultiHeadedAttention object."""
super(MultiHeadedAttention, self).__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.attn = None
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query, key, value):
"""Transform query, key and value.
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:return torch.Tensor transformed query, key and value
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2) # (batch, head, time1, d_k)
k = k.transpose(1, 2) # (batch, head, time2, d_k)
v = v.transpose(1, 2) # (batch, head, time2, d_k)
return q, k, v
def forward_attention(self, value, scores, mask):
"""Compute attention context vector.
:param torch.Tensor value: (batch, head, time2, size)
:param torch.Tensor scores: (batch, head, time1, time2)
:param torch.Tensor mask: (batch, 1, time2) or (batch, time1, time2)
:return torch.Tensor transformed `value` (batch, time1, d_model)
weighted by the attention score (batch, time1, time2)
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
min_value = float(
numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
)
scores = scores.masked_fill(mask, min_value)
self.attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0
) # (batch, head, time1, time2)
else:
self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
p_attn = self.dropout(self.attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(self, query, key, value, mask):
"""Compute 'Scaled Dot Product Attention'.
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:param torch.Tensor mask: (batch, 1, time2) or (batch, time1, time2)
:param torch.nn.Dropout dropout:
:return torch.Tensor: attention output (batch, time1, d_model)
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding.
Paper: https://arxiv.org/abs/1901.02860
:param int n_head: the number of head s
:param int n_feat: the number of features
:param float dropout_rate: dropout rate
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
# linear transformation for positional ecoding
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x, zero_triu=False):
"""Compute relative positinal encoding.
:param torch.Tensor x: (batch, time, size)
:param bool zero_triu: return the lower triangular part of the matrix
"""
zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, query, key, value, pos_emb, mask):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:param torch.Tensor pos_emb: (batch, time1, size)
:param torch.Tensor mask: (batch, time1, time2)
:param torch.nn.Dropout dropout:
:return torch.Tensor: attention output (batch, time1, d_model)
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2) # (batch, time1, head, d_k)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2) # (batch, head, time1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
# compute matrix b and matrix d
# (batch, head, time1, time2)
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
matrix_bd = self.rel_shift(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(
self.d_k
) # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask) | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/encoder/attention.py | 0.966898 | 0.658445 | attention.py | pypi |
import torch
class VGG2L(torch.nn.Module):
"""VGG2L module for transformer-transducer encoder."""
def __init__(self, idim, odim):
"""Construct a VGG2L object.
Args:
idim (int): dimension of inputs
odim (int): dimension of outputs
"""
super(VGG2L, self).__init__()
self.vgg2l = torch.nn.Sequential(
torch.nn.Conv2d(1, 64, 3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 64, 3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d((3, 2)),
torch.nn.Conv2d(64, 128, 3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(128, 128, 3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d((2, 2)),
)
self.output = torch.nn.Linear(128 * ((idim // 2) // 2), odim)
def forward(self, x, x_mask):
"""VGG2L forward for x.
Args:
x (torch.Tensor): input torch (B, T, idim)
x_mask (torch.Tensor): (B, 1, T)
Returns:
x (torch.Tensor): input torch (B, sub(T), attention_dim)
x_mask (torch.Tensor): (B, 1, sub(T))
"""
x = x.unsqueeze(1)
x = self.vgg2l(x)
b, c, t, f = x.size()
x = self.output(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
else:
x_mask = self.create_new_mask(x_mask, x)
return x, x_mask
def create_new_mask(self, x_mask, x):
"""Create a subsampled version of x_mask.
Args:
x_mask (torch.Tensor): (B, 1, T)
x (torch.Tensor): (B, sub(T), attention_dim)
Returns:
x_mask (torch.Tensor): (B, 1, sub(T))
"""
x_t1 = x_mask.size(2) - (x_mask.size(2) % 3)
x_mask = x_mask[:, :, :x_t1][:, :, ::3]
x_t2 = x_mask.size(2) - (x_mask.size(2) % 2)
x_mask = x_mask[:, :, :x_t2][:, :, ::2]
return x_mask | /s3prl_vc-0.2.0.tar.gz/s3prl_vc-0.2.0/s3prl_vc/upstream/ppg_sxliu/encoder/vgg.py | 0.965053 | 0.636014 | vgg.py | pypi |
<p align="center">
<img src="https://raw.githubusercontent.com/s3prl/s3prl/main/file/S3PRL-logo.png" width="900"/>
<br>
<br>
<a href="./LICENSE.txt"><img alt="Apache License 2.0" src="https://raw.githubusercontent.com/s3prl/s3prl/main/file/license.svg" /></a>
<a href="https://creativecommons.org/licenses/by-nc/4.0/"><img alt="CC_BY_NC License" src="https://img.shields.io/badge/License-CC%20BY--NC%204.0-lightgrey.svg" /></a>
<a href="https://github.com/s3prl/s3prl/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/s3prl/s3prl/actions/workflows/ci.yml/badge.svg?branch=main&event=push"></a>
<a href="#development-pattern-for-contributors"><img alt="Codecov" src="https://img.shields.io/badge/contributions-welcome-brightgreen.svg"></a>
<a href="https://github.com/s3prl/s3prl/issues"><img alt="Bitbucket open issues" src="https://img.shields.io/github/issues/s3prl/s3prl"></a>
</p>
## Notice for pull requests
Please first discuss with us on the issue page about your feature request before implementing the actual pull request, so we can discuss about how to achieve the functionality. If we did not discuss about the detail, it is highly possible that we are not accepting the pull request due to the difficulty of maintenance.
## Environment compatibilities [](https://github.com/s3prl/s3prl/actions/workflows/ci.yml)
We support the following environments. The test cases are ran with **[tox](./tox.ini)** locally and on **[github action](.github/workflows/ci.yml)**:
| Env | versions |
| --- | --- |
| os | `ubuntu-18.04`, `ubuntu-20.04` |
| python | `3.7`, `3.8`, `3.9`, `3.10` |
| pytorch | `1.8.1`, `1.9.1`, `1.10.2`, `1.11.0`, `1.12.1` |
## What's New
* *Mar 2022*: Introduce [**SUPERB-SG**](https://arxiv.org/abs/2203.06849), see [Speech Translation](./s3prl/downstream/speech_translation), [Out-of-domain ASR](./s3prl/downstream/ctc/), [Voice Conversion](./s3prl/downstream/a2o-vc-vcc2020/), [Speech Separation](./s3prl/downstream/separation_stft/) and [Speech Enhancement](./s3prl/downstream/enhancement_stft/) for more info.
* *Nov 2021*: Introduce [**S3PRL-VC**](https://arxiv.org/abs/2110.06280), see [Any-to-one](https://github.com/s3prl/s3prl/tree/master/s3prl/downstream/a2o-vc-vcc2020) and [Any-to-any](https://github.com/s3prl/s3prl/tree/master/s3prl/downstream/a2a-vc-vctk) docs for more info.
* *Oct 2021*: Support [**DistilHuBERT**](https://arxiv.org/abs/2110.01900), see [docs](./s3prl/upstream/distiller/README.md) for more info
* *Sep 2021:* We host a *challenge* in [*AAAI workshop: The 2nd Self-supervised Learning for Audio and Speech Processing*](https://aaai-sas-2022.github.io/)! See [**SUPERB official site**](https://superbbenchmark.org/) for the challenge details and the [**SUPERB documentation**](./s3prl/downstream/docs/superb.md) in this toolkit!
* *Aug 2021:* We now have a tutorial that introduces our toolkit, you can **[watch it on Youtube](https://youtu.be/PkMFnS6cjAc)**!
* *July 2021:* We are now working on packaging s3prl and reorganizing the file structure in **v0.3**. Please consider using the stable **v0.2.0** for now. We will test and release **v0.3** before August.
* *June 2021:* Support [**SUPERB:** **S**peech processing **U**niversal **PER**formance **B**enchmark](https://arxiv.org/abs/2105.01051), submitted to Interspeech 2021. Use the tag **superb-interspeech2021** or **v0.2.0**.
* *June 2021:* Support extracting multiple hidden states from the SSL pretrained models
* *Jan 2021:* Readme updated with detailed instructions on how to use our latest version!
* *Dec 2020:* We are migrating to a newer version for a more general, flexible, and scalable code. See the introduction below for more information! The legacy version can be accessed the tag **v0.1.0**.
## Introduction and Usages
This is an open source toolkit called **s3prl**, which stands for **S**elf-**S**upervised **S**peech **P**re-training and **R**epresentation **L**earning.
Self-supervised speech pre-trained models are called **upstream** in this toolkit, and are utilized in various **downstream** tasks.
The toolkit has **three major usages**:
### Pretrain
- Pretrain upstream models, including Mockingjay, Audio ALBERT and TERA.
- Document: [**pretrain/README.md**](./s3prl/pretrain/README.md)
### Upstream
- Easily load most of the existing upstream models with pretrained weights in a unified I/O interface.
- Pretrained models are registered through **torch.hub**, which means you can use these models in your own project by one-line plug-and-play without depending on this toolkit's coding style.
- Document: [**upstream/README.md**](./s3prl/upstream/README.md)
### Downstream
- Utilize upstream models in lots of downstream tasks
- Benchmark upstream models with [**SUPERB Benchmark**](./s3prl/downstream/docs/superb.md)
- Document: [**downstream/README.md**](./s3prl/downstream/README.md)
Below is an **intuitive illustration** on how this toolkit may help you:
\
\
<img src="https://raw.githubusercontent.com/s3prl/s3prl/main/file/S3PRL-interface.png" width="900"/>
\
\
Feel free to use or modify our toolkit in your research. Here is a [list of papers using our toolkit](#used-by). Any question, bug report or improvement suggestion is welcome through [opening up a new issue](https://github.com/s3prl/s3prl/issues).
If you find this toolkit helpful to your research, please do consider citing [our papers](#citation), thanks!
## Installation
1. **Python** >= 3.6
2. Install **sox** on your OS
3. Install s3prl: [Read doc](https://s3prl.github.io/s3prl/tutorial/installation.html#) or `pip install -e ".[all]"`
4. (Optional) Some upstream models require special dependencies. If you encounter error with a specific upstream model, you can look into the `README.md` under each `upstream` folder. E.g., `upstream/pase/README.md`
## Development pattern for contributors
1. [Create a personal fork](https://help.github.com/articles/fork-a-repo/) of the [main S3PRL repository](https://github.com/andi611/Self-Supervised-Speech-Pretraining-and-Representation-Learning) in GitHub.
2. Make your changes in a named branch different from `master`, e.g. you create a branch `new-awesome-feature`.
3. Contact us if you have any questions during development.
4. [Generate a pull request](https://help.github.com/articles/creating-a-pull-request/) through the Web interface of GitHub.
5. Please verify that your code is free of basic mistakes, we appreciate any contribution!
## Reference Repositories
* [Pytorch](https://github.com/pytorch/pytorch), Pytorch.
* [Audio](https://github.com/pytorch/audio), Pytorch.
* [Kaldi](https://github.com/kaldi-asr/kaldi), Kaldi-ASR.
* [Transformers](https://github.com/huggingface/transformers), Hugging Face.
* [PyTorch-Kaldi](https://github.com/mravanelli/pytorch-kaldi), Mirco Ravanelli.
* [fairseq](https://github.com/pytorch/fairseq), Facebook AI Research.
* [CPC](https://github.com/facebookresearch/CPC_audio), Facebook AI Research.
* [APC](https://github.com/iamyuanchung/Autoregressive-Predictive-Coding), Yu-An Chung.
* [VQ-APC](https://github.com/s3prl/VQ-APC), Yu-An Chung.
* [NPC](https://github.com/Alexander-H-Liu/NPC), Alexander-H-Liu.
* [End-to-end-ASR-Pytorch](https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch), Alexander-H-Liu
* [Mockingjay](https://github.com/andi611/Mockingjay-Speech-Representation), Andy T. Liu.
* [ESPnet](https://github.com/espnet/espnet), Shinji Watanabe
* [speech-representations](https://github.com/awslabs/speech-representations), aws lab
* [PASE](https://github.com/santi-pdp/pase), Santiago Pascual and Mirco Ravanelli
* [LibriMix](https://github.com/JorisCos/LibriMix), Joris Cosentino and Manuel Pariente
## License
The majority of S3PRL Toolkit is licensed under the Apache License version 2.0, however all the files authored by Facebook, Inc. (which have explicit copyright statement on the top) are licensed under CC-BY-NC.
## Used by
<details><summary>List of papers that used our toolkit (Feel free to add your own paper by making a pull request)</summary><p>
### Self-Supervised Pretraining
+ [Mockingjay: Unsupervised Speech Representation Learning with Deep Bidirectional Transformer Encoders (Liu et al., 2020)](https://arxiv.org/abs/1910.12638)
```
@article{mockingjay,
title={Mockingjay: Unsupervised Speech Representation Learning with Deep Bidirectional Transformer Encoders},
ISBN={9781509066315},
url={http://dx.doi.org/10.1109/ICASSP40776.2020.9054458},
DOI={10.1109/icassp40776.2020.9054458},
journal={ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
publisher={IEEE},
author={Liu, Andy T. and Yang, Shu-wen and Chi, Po-Han and Hsu, Po-chun and Lee, Hung-yi},
year={2020},
month={May}
}
```
+ [TERA: Self-Supervised Learning of Transformer Encoder Representation for Speech (Liu et al., 2020)](https://arxiv.org/abs/2007.06028)
```
@misc{tera,
title={TERA: Self-Supervised Learning of Transformer Encoder Representation for Speech},
author={Andy T. Liu and Shang-Wen Li and Hung-yi Lee},
year={2020},
eprint={2007.06028},
archivePrefix={arXiv},
primaryClass={eess.AS}
}
```
+ [Audio ALBERT: A Lite BERT for Self-supervised Learning of Audio Representation (Chi et al., 2020)](https://arxiv.org/abs/2005.08575)
```
@inproceedings{audio_albert,
title={Audio ALBERT: A Lite BERT for Self-supervised Learning of Audio Representation},
author={Po-Han Chi and Pei-Hung Chung and Tsung-Han Wu and Chun-Cheng Hsieh and Shang-Wen Li and Hung-yi Lee},
year={2020},
booktitle={SLT 2020},
}
```
### Explanability
+ [Understanding Self-Attention of Self-Supervised Audio Transformers (Yang et al., 2020)](https://arxiv.org/abs/2006.03265)
```
@inproceedings{understanding_sat,
author={Shu-wen Yang and Andy T. Liu and Hung-yi Lee},
title={{Understanding Self-Attention of Self-Supervised Audio Transformers}},
year=2020,
booktitle={Proc. Interspeech 2020},
pages={3785--3789},
doi={10.21437/Interspeech.2020-2231},
url={http://dx.doi.org/10.21437/Interspeech.2020-2231}
}
```
### Adversarial Attack
+ [Defense for Black-box Attacks on Anti-spoofing Models by Self-Supervised Learning (Wu et al., 2020)](https://arxiv.org/abs/2006.03214), code for computing LNSR: [utility/observe_lnsr.py](https://github.com/s3prl/s3prl/blob/master/utility/observe_lnsr.py)
```
@inproceedings{mockingjay_defense,
author={Haibin Wu and Andy T. Liu and Hung-yi Lee},
title={{Defense for Black-Box Attacks on Anti-Spoofing Models by Self-Supervised Learning}},
year=2020,
booktitle={Proc. Interspeech 2020},
pages={3780--3784},
doi={10.21437/Interspeech.2020-2026},
url={http://dx.doi.org/10.21437/Interspeech.2020-2026}
}
```
+ [Adversarial Defense for Automatic Speaker Verification by Cascaded Self-Supervised Learning Models (Wu et al., 2021)](https://arxiv.org/abs/2102.07047)
```
@misc{asv_ssl,
title={Adversarial defense for automatic speaker verification by cascaded self-supervised learning models},
author={Haibin Wu and Xu Li and Andy T. Liu and Zhiyong Wu and Helen Meng and Hung-yi Lee},
year={2021},
eprint={2102.07047},
archivePrefix={arXiv},
primaryClass={eess.AS}
```
### Voice Conversion
+ [S2VC: A Framework for Any-to-Any Voice Conversion with Self-Supervised Pretrained Representations (Lin et al., 2021)](https://arxiv.org/abs/2104.02901)
```
@misc{s2vc,
title={S2VC: A Framework for Any-to-Any Voice Conversion with Self-Supervised Pretrained Representations},
author={Jheng-hao Lin and Yist Y. Lin and Chung-Ming Chien and Hung-yi Lee},
year={2021},
eprint={2104.02901},
archivePrefix={arXiv},
primaryClass={eess.AS}
}
```
### Benchmark and Evaluation
+ [SUPERB: Speech processing Universal PERformance Benchmark (Yang et al., 2021)](https://arxiv.org/abs/2105.01051)
```
@misc{superb,
title={SUPERB: Speech processing Universal PERformance Benchmark},
author={Shu-wen Yang and Po-Han Chi and Yung-Sung Chuang and Cheng-I Jeff Lai and Kushal Lakhotia and Yist Y. Lin and Andy T. Liu and Jiatong Shi and Xuankai Chang and Guan-Ting Lin and Tzu-Hsien Huang and Wei-Cheng Tseng and Ko-tik Lee and Da-Rong Liu and Zili Huang and Shuyan Dong and Shang-Wen Li and Shinji Watanabe and Abdelrahman Mohamed and Hung-yi Lee},
year={2021},
eprint={2105.01051},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
+ [Utilizing Self-supervised Representations for MOS Prediction (Tseng et al., 2021)](https://arxiv.org/abs/2104.03017)
```
@misc{ssr_mos,
title={Utilizing Self-supervised Representations for MOS Prediction},
author={Wei-Cheng Tseng and Chien-yu Huang and Wei-Tsung Kao and Yist Y. Lin and Hung-yi Lee},
year={2021},
eprint={2104.03017},
archivePrefix={arXiv},
primaryClass={eess.AS}
}
```
}
</p></details>
## Citation
If you find this toolkit useful, please consider citing following papers.
- If you use our pre-training scripts, or the downstream tasks considered in *TERA* and *Mockingjay*, please consider citing the following:
```
@misc{tera,
title={TERA: Self-Supervised Learning of Transformer Encoder Representation for Speech},
author={Andy T. Liu and Shang-Wen Li and Hung-yi Lee},
year={2020},
eprint={2007.06028},
archivePrefix={arXiv},
primaryClass={eess.AS}
}
```
```
@article{mockingjay,
title={Mockingjay: Unsupervised Speech Representation Learning with Deep Bidirectional Transformer Encoders},
ISBN={9781509066315},
url={http://dx.doi.org/10.1109/ICASSP40776.2020.9054458},
DOI={10.1109/icassp40776.2020.9054458},
journal={ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
publisher={IEEE},
author={Liu, Andy T. and Yang, Shu-wen and Chi, Po-Han and Hsu, Po-chun and Lee, Hung-yi},
year={2020},
month={May}
}
```
- If you use our organized upstream interface and features, or the *SUPERB* downstream benchmark, please consider citing the following:
```
@inproceedings{yang21c_interspeech,
author={Shu-wen Yang and Po-Han Chi and Yung-Sung Chuang and Cheng-I Jeff Lai and Kushal Lakhotia and Yist Y. Lin and Andy T. Liu and Jiatong Shi and Xuankai Chang and Guan-Ting Lin and Tzu-Hsien Huang and Wei-Cheng Tseng and Ko-tik Lee and Da-Rong Liu and Zili Huang and Shuyan Dong and Shang-Wen Li and Shinji Watanabe and Abdelrahman Mohamed and Hung-yi Lee},
title={{SUPERB: Speech Processing Universal PERformance Benchmark}},
year=2021,
booktitle={Proc. Interspeech 2021},
pages={1194--1198},
doi={10.21437/Interspeech.2021-1775}
}
```
| /s3prl-0.4.10.tar.gz/s3prl-0.4.10/README.md | 0.853333 | 0.784526 | README.md | pypi |
# [s3recon](https://s3recon.readthedocs.io/en/latest/)
[](https://pypi.org/project/s3recon/)
[](https://pypi.org/project/s3recon/)
[](https://pepy.tech/project/s3recon)
[](https://s3recon.readthedocs.io/en/latest/?badge=latest)
Amazon S3 bucket finder and crawler.
<br>
<a href="https://github.com/clarketm/s3recon">
<p align="center"><img width="40%" src="https://raw.githubusercontent.com/clarketm/s3recon/master/recon.jpeg" /></p>
</a>
[Check out the s3recon docs](https://s3recon.readthedocs.io/en/latest/)
## Installation
> **NOTE**: s3recon requires python version **>=3.6**
```bash
$ pip install s3recon
```
## Usage
```text
usage: s3recon [-h] [-o file] [-d] [-p] [-t seconds] [-v] [-c num] word_list [word_list ...]
positional arguments:
word_list read words from one or more <word-list> files
optional arguments:
-h, --help show this help message and exit
-o file, --output file write output to <file>
-d, --db write output to database
-p, --public only include 'public' buckets in the output
-t seconds, --timeout seconds http request timeout in <seconds> (default: 30)
-v, --version show program's version number and exit
-c num, --concurrency num maximum <num> of concurrent requests (default: # of lcpus)
```
## Example 1: Output to a json file
#### 1. Download a word-list.
The [SecLists](https://github.com/clarketm/s3recon/edit/master/README.md) repository has a multitude of word-lists to choose from. For this example, let's download the sample word-list included in this repository.
```bash
$ curl -sSfL -o "word-list.txt" "https://raw.githubusercontent.com/clarketm/s3recon/master/data/words.txt"
```
#### 2. Run `s3recon`.
Execute `s3recon` using the `word-list.txt` file and output the `public` S3 buckets to a json file named `results.json`.
```bash
$ s3recon "word-list.txt" -o "results.json" --public
- PRIVATE https://s3.sa-east-1.amazonaws.com/test-lyft
- PRIVATE https://s3.ap-south-1.amazonaws.com/test.amazon
+ PUBLIC https://walmart-dev.s3.us-east-1.amazonaws.com
- PRIVATE https://s3.ap-southeast-1.amazonaws.com/apple-prod
- PRIVATE https://walmart.s3.ap-southeast-1.amazonaws.com
...
```
#### 3. Inspect the results.
Check the `results.json` output file to view the S3 buckets you have discovered!
```bash
$ cat "results.json"
```
```json
{
"public": {
"total": 12,
"hits": [
"https://walmart-dev.s3.us-east-1.amazonaws.com",
"https://apple-production.s3.ap-southeast-1.amazonaws.com",
...
]
}
}
```
> **Note:** to include `private` buckets in the results omit the `-p, --public` flag from the command.
#### 4. Crawl the results.
Enumerate the static files located in each bucket and record the findings.
> Coming soon!
## Example 2: Output to a MongoDB database
#### 1. Download a word-list.
The [SecLists](https://github.com/clarketm/s3recon/edit/master/README.md) repository has a multitude of word-lists to choose from. For this example, let's download the sample word-list included in this repository.
```bash
$ curl -sSfL -o "word-list.txt" "https://raw.githubusercontent.com/clarketm/s3recon/master/data/words.txt"
```
#### 2. Start an instance of MongoDB
```text
$ docker run --name "mongo" -p 27017:27017 -v "mongodb_data:/data/db" -v "mongodb_config:/data/configdb" -d mongo
```
#### 3. Run `s3recon`.
Execute `s3recon` using the `word-list.txt` file and output to MongoDB instance.
```bash
$ s3recon "word-list.txt" --db
- PRIVATE https://s3.sa-east-1.amazonaws.com/test-lyft
- PRIVATE https://s3.ap-south-1.amazonaws.com/test.amazon
+ PUBLIC https://walmart-dev.s3.us-east-1.amazonaws.com
- PRIVATE https://s3.ap-southeast-1.amazonaws.com/apple-prod
- PRIVATE https://walmart.s3.ap-southeast-1.amazonaws.com
...
```
#### 3. Inspect the results.
Check the MongoDB database: `s3recon` collection: `hits` to view the S3 buckets you have discovered!
```bash
$ mongo "s3recon" --quiet --eval 'db.hits.find({}, {"url": 1, "access": 1, "_id": 0}).limit(5)'
```
```json
{ "url" : "https://s3.us-east-2.amazonaws.com/apple", "access" : "private" }
{ "url" : "https://s3.us-west-1.amazonaws.com/microsoft-dev", "access" : "private" }
{ "url" : "https://s3.us-west-1.amazonaws.com/dev-microsoft", "access" : "private" }
{ "url" : "https://s3.us-east-2.amazonaws.com/amazon", "access" : "private" }
{ "url" : "https://s3.us-east-1.amazonaws.com/dev-amazon", "access" : "private" }
```
#### 4. Crawl the results.
Enumerate the static files located in each bucket and record the findings.
> Coming soon!
## FAQ
#### Q: How do I configure this utility?
#### A:
`s3recon` can be configure using a yaml configuration file located in either the current working directory (e.g. `./s3recon.yml`) or your home diretory (e.g. `~/s3recon.yml`).
The following is the list of configurable values:
```yaml
# s3recon.yml
database: { host: "0.0.0.0", ... }
separators: ["-", "_", "."]
environments: ["", "backup", "backups", ...]
regions: ["ap-northeast-1", "ap-northeast-2", ...]
```
> To see the full list of configurable values (and their **defaults**) please refer to the [s3recon.yml](https://github.com/clarketm/s3recon/blob/master/s3recon/s3recon.yml) file in this repository.
#### Q: How do I customize the AWS regions used in the recon?
#### A:
The AWS *regions* can be altered by setting the `regions` array in your `s3recon.yml` configuration file.
```yaml
# s3recon.yml
regions: [ "us-west-2", ...]
```
#### Q: How do I customize the environment values used in the recon?
#### A:
The *environments* are modifiers permuted with each item of the *word-list* (and the *separator*) to construct the bucket value in request.
The value can be altered by setting the `environments` array in your `s3recon.yml` configuration file.
For example, to only search lines from the word-list *verbatim* (i.e. without modification) you can set this value to an empty array.
```yaml
# s3recon.yml
environments: []
```
#### Q: How do I customize the MongoDB host and port?
#### A:
The database *host* and *port* can be configured by altering the `database` map in your `s3recon.yml` configuration file.
For example, `host` and `port` can be set directly inside the `database` map
```yaml
# s3recon.yml
database: {
host: "0.0.0.0",
port: 27017
}
```
#### Q: How do I use a database other than MongoDB?
#### A:
Sorry, at the moment only MongoDB is supported.
## Going Forward
- [ ] Create `crawl` command to crawl public/private buckets found in `find` stage.
- [ ] Separate out `find` and `crawl` as subcommands.
- [x] Store discovered buckets in a NoSQL database.
## Disclaimer
This tools is distributed for educational and security purposes. I take no responsibility and assume no liability for the manner in which this tool is used.
## License
MIT © [**Travis Clarke**](https://blog.travismclarke.com/)
| /s3recon-1.4.0.tar.gz/s3recon-1.4.0/README.md | 0.630685 | 0.941761 | README.md | pypi |
import os
import sys
import argparse
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
def set_public_redirect(key, redirect_location):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
(originally from boto)
"""
headers = {'x-amz-website-redirect-location': redirect_location,
'x-amz-acl': 'public-read',
}
response = key.bucket.connection.make_request('PUT', key.bucket.name,
key.name, headers)
if response.status == 200:
return True
else:
raise key.provider.storage_response_error(
response.status, response.reason, response.read())
def clean_key_name(key_name, remove_slash=True, index="index.html"):
"""Remove slash from beginning and append index to end of key"""
if remove_slash and key_name.startswith("/"):
key_name = key_name[1:]
if key_name.endswith("/"):
key_name = key_name + index
return key_name
def redirect_pairs(f, **kwargs):
"""Generates pairs (key, redirect) in given file"""
for i, line in enumerate(f):
line = line.strip()
# ignore blank lines and comments
if not line or line.startswith("#"):
continue
words = line.split()
if len(words) < 2:
sys.stderr.write(
"Ignoring line {0}, missing redirect field\n".format(i+1))
continue
key, loc = clean_key_name(words[0], **kwargs), words[1]
if not loc.startswith(('/', 'http://', 'https://')):
sys.stderr.write(
"Ignoring line {0}, invalid redirect: {1}\n".format(i+1, loc))
continue
if len(words) > 2:
sys.stderr.write(
"Ignoring extra fields in line {0}: {1}\n".format(
i+1, " ".join(words[2:])))
yield key, loc
def upload_redirects(redirects, bucket, remote_keys, dry=False):
"""Pop redirects from remote_keys and upload"""
for local_key, location in redirects:
exists = bool(local_key in remote_keys)
if exists:
key = remote_keys.pop(local_key)
else:
key = bucket.new_key(local_key)
# don't re-upload identical redirects
if exists and location == key.get_redirect():
continue
if not dry:
set_public_redirect(key, location)
print "{2:<6} {0} {1}".format(
local_key, location, "update" if exists else "new")
def sync_redirects(redirects, bucket, delete=False, dry=False):
"""Do the syncing"""
remote_keys = {key.name: key for key in bucket.list()}
upload_redirects(redirects, bucket, remote_keys, dry=dry)
if delete:
for key in remote_keys.values():
# assume all size-non-zero keys aren't redirects to save requests
redirect = key.get_redirect() if key.size == 0 else None
if redirect is None:
continue
if not dry:
key.delete()
print "delete {0} {1}".format(key.name, redirect)
def connection(filename="~/.awssecret"):
"""Creates a connection to S3 using data stored in text file"""
with open(os.path.expanduser(filename)) as f:
access, secret = [s.strip() for s in f.readlines()[:2]]
return S3Connection(access, secret)
def main():
"""Command line interface"""
parser = argparse.ArgumentParser(
description="Sync a list of redirects to an Amazon S3 bucket",
epilog="Amazon access key and secret key should be stored on\
the first and second lines of the key file.")
parser.add_argument("redirects",
help="file containing list of key names and redirect locations,\
separated by a space and listed one per line",
type=argparse.FileType('r'))
parser.add_argument("bucket", help="name of Amazon S3 bucket")
parser.add_argument("-d", "--delete", action="store_true",
help="also delete all redirects not listed in redirects file")
parser.add_argument("-n", "--dry-run", action="store_true",
help="display changes to be made without actually making them")
parser.add_argument("--key", metavar="file", default="~/.awssecret",
type=str, help="use specified key file (default ~/.awssecret)")
args = parser.parse_args()
try:
conn = connection(filename=args.key)
except IOError, ValueError:
sys.exit('Unable to read key file: {0}'.format(args.key))
try:
bucket = conn.get_bucket(args.bucket)
except S3ResponseError as err:
return '{status} {reason}\n{error_message}'.format(**err.__dict__)
if args.dry_run: print "This is a dry run"
sync_redirects(redirects=redirect_pairs(args.redirects),
bucket=bucket,
delete=args.delete,
dry=args.dry_run)
if __name__ == "__main__":
sys.exit(main()) | /s3redirect-0.1.2.zip/s3redirect-0.1.2/s3redirect.py | 0.4436 | 0.178992 | s3redirect.py | pypi |
<span><img src="https://img.shields.io/github/workflow/status/gabriel-oana/s3select_plus/Tests">
<img src="https://img.shields.io/github/languages/top/gabriel-oana/s3select_plus">
<img src="https://img.shields.io/pypi/pyversions/s3select-plus">
<img src="https://img.shields.io/pypi/v/s3select-plus">
<img src="https://img.shields.io/badge/linting-pylint-green">
[](https://pepy.tech/project/s3select-plus)
<img src="https://img.shields.io/pypi/dm/s3select_plus?label=pypi%20downloads">
[](https://codecov.io/gh/gabriel-oana/s3select_plus)
</span>
# S3 Select Plus
- [S3 Select Plus](#s3-select-plus)
+ [1. Description](#1-description)
+ [2 Features](#2-features)
+ [3. Installation](#3-installation)
+ [4. Usage](#4-usage)
- [4.1 Basic](#41-basic)
- [4.2 Running with an "extra function"](#42-running-with-an--extra-function-)
- [4.3 Running with SequentialEngine](#43-running-with-sequentialengine)
- [4.4 Show statistics](#44-show-statistics)
- [4.4 Serialization](#44-serialization)
* [4.4.1 JSON Serialization](#441-json-serialization)
* [4.4.2 CSV Serialization](#442-csv-serialization)
* [4.4.3 Parquet Serialization](#443-parquet-serialization)
* [4.4.4 Indirect Serialization](#444-indirect-serialization)
+ [5. Development](#5-development)
- [5.1 Creating a parallel engine with a different S3 client implementation](#51-creating-a-parallel-engine-with-a-different-s3-client-implementation)
### 1. Description
Utility package to query multiple S3 objects using S3 Select.
More information on AWS S3 Select: https://docs.aws.amazon.com/AmazonS3/latest/userguide/selecting-content-from-objects.html
### 2 Features
* Engine modes:
* Parallel - each file gets queries in a separate process
* Sequential - all files get queried sequentially
* User defined query engine (more on this below)
* Cost estimation (before query) and calculation (after query)
* Possibility to add user defined functions at process level (useful for in-flight transformations)
* Support for formats: JSON, CSV and Parquet files
* Support for compressions: GZIP, BZIP
* Support for Input and Output Serialization using SerializerTypes of dictionary config.
* Support for user defined SQL Query
#### 2.1 Future versions:
* Scan Range functionality exposed
* SSE functionality exposed
* Ability to select profile for S3 connections
#### 2.2 Performance
The parallelisation of the file query allows one to get to time to query multiple files much faster.
The performance test below has been performed.
For many small files of different types the following results have been achieved on an 8 core CPU (network transfer speeds ignored):
```text
| case | engine | file_type | files | total_size | columns | responses | time_taken_sec | cost |
|--------+------------+-------------+---------+--------------+-----------+-------------+------------------+-------------|
| 1 | sequential | json | 200 | 100 MB | 1 | 200 | 42.53 | 0.000232982 |
| 2 | parallel | json | 200 | 100 MB | 1 | 200 | 5.79 | 0.000232982 |
| 3 | sequential | json | 200 | 100 MB | 5 | 200 | 44.71 | 0.000256532 |
| 4 | parallel | json | 200 | 100 MB | 5 | 200 | 6.81 | 0.000256532 |
| 5 | sequential | csv | 100 | 100 MB | 1 | 100 | 26.54 | 0.000247681 |
| 6 | parallel | csv | 100 | 100 MB | 1 | 100 | 7.14 | 0.000247681 |
| 7 | sequential | csv | 100 | 100 MB | 5 | 100 | 38.26 | 0.000373264 |
| 8 | parallel | csv | 100 | 100 MB | 5 | 100 | 16.2 | 0.000373264 |
| 9 | sequential | parquet | 100 | 100 MB | 1 | 100 | 43.79 | 7.39638e-05 |
| 10 | parallel | parquet | 100 | 100 MB | 1 | 100 | 21.05 | 7.39638e-05 |
| 11 | sequential | parquet | 100 | 100 MB | 5 | 100 | 64.34 | 0.00034574 |
| 12 | parallel | parquet | 100 | 100 MB | 5 | 100 | 28.34 | 0.00034574 |
| 13 | sequential | json | 2000 | 1 GB | 1 | 2000 | 552.23 | 0.00232982 |
| 14 | parallel | json | 2000 | 1 GB | 1 | 2000 | 330.23 | 0.00232982 |
```
### 3. Installation
```shell
pip3 install s3select_plus
```
### 4. Usage
#### 4.1 Basic
By default, this is a parallel process querying JSON files.
```python
from select_plus import SSP
ssp = SSP(
bucket_name='bucket-name',
prefix='s3-key-prefix'
)
est_cost = ssp.estimate_cost()
print(f'Estimated Cost: ${format(est_cost, "f")}')
# The line below must be added when executing using the ParallelEngine.
# Reason for this is that each file will be queried into a different process.
# The "multiprocessing" python package is restricted to this.
if __name__ == '__main__':
result = ssp.select(
threads=8,
sql_query='SELECT * FROM s3object[*] s'
)
print(result.payload)
```
#### 4.2 Running with an "extra function"
The "extra function" can be defined to do extra steps for each result from a single SQL query
in a process. For example, if one needs to do some processing or transformation of the results
before all the results are combined into the final result.
The "extra function" also supports "extra function arguments" to be passed to the function.
```python
from select_plus import SSP
ssp = SSP(
bucket_name='bucket-name',
prefix='s3-key-prefix'
)
def transform(response, arg1, arg2):
# Assuming the response from the query looks like: {"column1": 1}
# response = {"column": 1}
response['new_column'] = arg1
response['newer_column'] = arg2
# This function must always return something
return response
if __name__ == '__main__':
result = ssp.select(
threads=8,
sql_query='SELECT * FROM s3object[*] s',
extra_func=transform,
extra_func_args={
"arg1": 1,
"arg2": 2
}
)
print(result.payload)
```
#### 4.3 Running with SequentialEngine
```python
from select_plus import SSP, SequentialEngine
ssp = SSP(
bucket_name='bucket-name',
prefix='s3-key-prefix',
engine=SequentialEngine
)
if __name__ == '__main__':
result = ssp.select(
threads=8,
sql_query='SELECT * FROM s3object[*] s'
)
print(result.payload)
```
#### 4.4 Show statistics
```python
from select_plus import SSP, ParallelEngine
ssp = SSP(
bucket_name='bucket-name',
prefix='s3-key-prefix',
engine=ParallelEngine
)
if __name__ == '__main__':
result = ssp.select(
threads=8,
sql_query='SELECT * FROM s3object[*] s'
)
print(result.payload)
print(result.stats.cost) # dollars
print(result.stats.bytes_processed)
print(result.stats.bytes_returned)
print(result.stats.bytes_scanned)
print(result.stats.files_processed)
```
#### 4.4 Serialization
The ```select_object_content``` S3 API call will expect some serialization parameters depending on the types of files that are queried.
Serialization works by default with all engine types.
A full list of all the parameters accessible for the serializers is better described here:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.select_object_content
##### 4.4.1 JSON Serialization
To query JSON files with specific types and parameters one can use the pre-defined Serialization objects
```python
from select_plus import SSP
from select_plus.serializers import CompressionTypes, InputSerialization, OutputSerialization, JSONInputSerialization, JSONOutputSerialization
ssp = SSP(
bucket_name='bucket-name',
prefix='s3-key-prefix'
)
if __name__ == '__main__':
result = ssp.select(
threads=8,
sql_query='SELECT * FROM s3object[*] s',
input_serialization=InputSerialization(
compression_type=CompressionTypes.gzip,
json=JSONInputSerialization(
Type='DOCUMENT'
)
),
output_serialization=OutputSerialization(
json=JSONOutputSerialization(
record_delimiter='\n'
)
)
)
print(result.payload)
```
##### 4.4.2 CSV Serialization
To query CSV files with specific types and parameters one can use the pre-defined Serialization objects
```python
from select_plus import SSP
from select_plus.serializers import CompressionTypes, InputSerialization, OutputSerialization, CSVInputSerialization, CSVOutputSerialization
ssp = SSP(
bucket_name='bucket-name',
prefix='s3-key-prefix'
)
if __name__ == '__main__':
result = ssp.select(
threads=8,
sql_query='SELECT * FROM s3object[*] s',
input_serialization=InputSerialization(
compression_type=CompressionTypes.gzip,
csv=CSVInputSerialization(
file_header_info='USE'
)
),
output_serialization=OutputSerialization(
csv=CSVOutputSerialization(
record_delimiter='\n'
)
)
)
print(result.payload)
```
##### 4.4.3 Parquet Serialization
To query parquet files with specific types and parameters one can use the pre-defined Serialization objects
```python
from select_plus import SSP
from select_plus.serializers import CompressionTypes, InputSerialization, OutputSerialization, CSVOutputSerialization
ssp = SSP(
bucket_name='bucket-name',
prefix='s3-key-prefix'
)
if __name__ == '__main__':
result = ssp.select(
threads=8,
sql_query='SELECT * FROM s3object[*] s',
input_serialization=InputSerialization(
compression_type=CompressionTypes.gzip,
parquet={}
),
output_serialization=OutputSerialization(
csv=CSVOutputSerialization(
record_delimiter='\n'
)
)
)
print(result.payload)
```
##### 4.4.4 Indirect Serialization
One can use a simple JSON format to input the serialization types if the Serialization objects should be a part of a configuration file for example.
```python
from select_plus import SSP
ssp = SSP(
bucket_name='bucket-name',
prefix='s3-key-prefix'
)
if __name__ == '__main__':
result = ssp.select(
threads=8,
sql_query='SELECT * FROM s3object[*] s',
input_serialization={
"CompressionType": 'GZIP',
"JSON": {
"Type": "LINES"
}
},
output_serialization={
"JSON": {
'RecordDelimiter': '\n'
}
}
)
print(result.payload)
```
### 5. Development
#### 5.1 Creating a parallel engine with a different S3 client implementation
One downside to this package is that the S3 client cannot be treated as an input into the main call.
The reason is that each individual S3 client must be initialised once per process (restricted by AWS) and cannot be pickled.
To circumvent this problem, one can create their own engine where they can implement their own S3 client (or resource).
```python
from select_plus import SSP, BaseEngine
from select_plus.src.aws.s3 import S3
class MyCustomEngine(BaseEngine):
def execute(self, sql_query: str, extra_func: callable = None, extra_func_args: dict = None) -> list:
pass
def _make_func_args(self, sql_query: str, extra_func: callable, extra_func_args: dict):
"""
Gets a list of all keys to be processed
"""
s3 = S3(client='my-custom-client') # This is where you can customize your own S3 client. Even change the entire S3 functionality.
s3_keys = s3.list_objects(bucket_name=self.bucket_name, prefix=self.prefix)
keys = s3_keys['keys']
func_args = [(key, sql_query, extra_func, extra_func_args) for key in keys]
return func_args
def _make_func(self, key: str, sql_query: str, extra_func: callable, extra_func_args: dict):
"""
Performs the SQL query against one single Key.
This process runs as a single process.
As result, all boto initialization must happen inside this function.
It also applies any extra functions added by the user.
"""
s3 = S3() # This is where you can customize your own S3 client. Even change the entire S3 functionality.
response = s3.select(bucket_name=self.bucket_name, key=key, sql_string=sql_query)
if extra_func:
response = self._apply_extra_func(response, extra_func, extra_func_args)
return response
def _wrapper_func(self, args):
"""
This is a wrapper for the function to be executed inside the thread.
The reason why this exists, is because if the tqdm bar is to exist, then tqdm doesn't work with "pool.starmap".
As result, one cannot pass multiple parameters to the function.
"""
result = self._make_func(*args)
return result
@staticmethod
def _apply_extra_func(response: dict, extra_func, extra_func_args):
"""
A user has the possibility of adding an additional function at each thread level to process each chunk of data
before it merges the results from all threads.
Allow the function to access only the payload but not the statistics.
This way, the cost can be computed in the compilation of the results after the proceses have ended.
"""
block_response = {
"stats": response['stats'],
"payload": None
}
func_response = extra_func(response['payload'], **extra_func_args)
block_response['payload'] = func_response
return block_response
ssp = SSP(
bucket_name='bucket-name',
prefix='s3-key-prefix',
engine=MyCustomEngine
)
if __name__ == '__main__':
result = ssp.select(
threads=8,
sql_query='SELECT * FROM s3object[*] s'
)
print(result.payload)
```
Similarly, one can develop new engines. For example using Dask or PySpark. | /s3select_plus-1.0.3.tar.gz/s3select_plus-1.0.3/README.md | 0.693265 | 0.90813 | README.md | pypi |
from typing import Optional, Union
from select_plus.src.engine.base_engine import BaseEngine
from select_plus.src.utils.cost import Cost
from select_plus.src.models.models import EngineResults, EngineResultsStats, InputSerialization, OutputSerialization
class EngineWrapper:
def execute(
self,
sql_query: str,
extra_func: Optional[callable],
extra_func_args: Optional[dict],
engine: BaseEngine,
input_serialization: Union[InputSerialization, dict],
output_serialization: Union[OutputSerialization, dict]
) -> EngineResults:
dict_input_serialization = self.deserialize(input_serialization)
dict_output_serialization = self.deserialize(output_serialization)
response = engine.execute(
sql_query=sql_query,
extra_func=extra_func,
extra_func_args=extra_func_args,
input_serialization=dict_input_serialization,
output_serialization=dict_output_serialization
)
compiled_result = self._compile_results(response)
return compiled_result
@staticmethod
def deserialize(obj) -> dict:
"""
To allow users to have the serializers as either a defined class or a dictionary, this is required to convert
the input / output serializers into dicts to be passed to the S3 select function
"""
if isinstance(obj, OutputSerialization):
return obj.as_dict()
elif isinstance(obj, InputSerialization):
return obj.as_dict()
else:
return obj
@staticmethod
def _compile_results(response: list) -> EngineResults:
cost = Cost()
payload = []
bytes_scanned = 0
bytes_processed = 0
bytes_returned = 0
files_processed = 0
for record in response:
payload.append(record['payload'])
bytes_scanned += record['stats']['bytes_scanned']
bytes_processed += record['stats']['bytes_processed']
bytes_returned += record['stats']['bytes_returned']
files_processed += 1
cost = cost.compute_block(data_scanned=bytes_scanned,
data_returned=bytes_returned,
files_requested=files_processed)
model = EngineResults(
payload=payload,
stats=EngineResultsStats(
cost=cost,
files_processed=files_processed,
bytes_scanned=bytes_scanned,
bytes_returned=bytes_returned,
bytes_processed=bytes_processed
)
)
return model | /s3select_plus-1.0.3.tar.gz/s3select_plus-1.0.3/select_plus/src/engine/engine.py | 0.91501 | 0.214177 | engine.py | pypi |
from abc import ABC, abstractmethod
from typing import Optional, Union
import boto3
from select_plus.src.aws.s3 import S3
from select_plus.src.models.models import InputSerialization, OutputSerialization
class BaseEngine(ABC):
def __init__(self, bucket_name: str, prefix: str, threads: int, verbose: bool):
self.bucket_name = bucket_name
self.prefix = prefix
self.verbose = verbose
self.threads = threads
@abstractmethod
def execute(self,
sql_query: str,
input_serialization: Union[InputSerialization, dict],
output_serialization: Union[OutputSerialization, dict],
extra_func: Optional[callable] = None,
extra_func_args: Optional[dict] = None,
s3_client: Optional[boto3.session.Session.client] = None
):
raise NotImplementedError
def select_s3(self,
key: str,
sql_query: str,
input_serialization: dict,
output_serialization: dict,
extra_func: callable,
extra_func_args: Optional[dict],
s3_client: Optional[boto3.session.Session.client] = None
):
s3 = S3(client=s3_client)
response = s3.select(bucket_name=self.bucket_name, key=key, sql_string=sql_query,
input_serialization=input_serialization, output_serialization=output_serialization)
if extra_func:
response = self._apply_extra_func(response, extra_func, extra_func_args)
return response
@staticmethod
def _apply_extra_func(response: dict, extra_func, extra_func_args):
"""
A user has the possibility of adding an additional function at each thread level to process each chunk of data
before it merges the results from all threads.
Allow the function to access only the payload but not the statistics.
This way, the cost can be computed in the compilation of the results after the proceses have ended.
"""
block_response = {
"stats": response['stats'],
"payload": None
}
if extra_func_args:
func_response = extra_func(response['payload'], **extra_func_args)
else:
func_response = extra_func(response['payload'])
block_response['payload'] = func_response
return block_response | /s3select_plus-1.0.3.tar.gz/s3select_plus-1.0.3/select_plus/src/engine/base_engine.py | 0.900308 | 0.160398 | base_engine.py | pypi |
from typing import Optional, Union
from dataclasses import dataclass
@dataclass
class EngineResultsStats:
cost: float
files_processed: int
bytes_scanned: int
bytes_returned: int
bytes_processed: int
@dataclass
class EngineResults:
payload: list
stats: EngineResultsStats
@property
def payload_dict(self) -> list:
payload_dict = []
dict_repr = []
for item in self.payload:
sub_item = item.replace('\n', ',')
record = list(eval(sub_item))
dict_repr.append(record)
for block in dict_repr:
payload_dict += block
return payload_dict
@property
def payload_csv(self):
# CSV Parser
payload_csv = []
for item in self.payload:
sub_item = item.split('\n')
for row in sub_item:
if len(row) > 0:
payload_csv.append(row.split(','))
return payload_csv
@dataclass
class CompressionTypes:
none: str = 'NONE'
gzip: str = 'GZIP'
bzip2: str = 'BZIP2'
@dataclass
class CSVInputSerialization:
file_header_info: Optional[str] = None
comments: Optional[str] = None
quote_escape_character: Optional[str] = None
record_delimiter: Optional[str] = None
field_delimiter: Optional[str] = None
quote_character: Optional[str] = None
allow_quoted_record_delimiter: Optional[bool] = False
def __post_init__(self):
# Validate the file_header_info
if self.file_header_info:
if self.file_header_info not in ['USE', 'IGNORE', 'NONE']:
raise RuntimeError('CSV Input Serialization field file_header_info must be USE | IGNORE | NONE')
def as_dict(self) -> dict:
params = {}
if self.file_header_info:
params['FileHeaderInfo'] = self.file_header_info
if self.comments:
params['Comments'] = self.comments
if self.quote_escape_character:
params['QuoteEscapeCharacter'] = self.quote_escape_character
if self.record_delimiter:
params['RecordDelimiter'] = self.record_delimiter
if self.field_delimiter:
params['FieldDelimiter'] = self.field_delimiter
if self.quote_character:
params['QuoteCharacter'] = self.quote_character
if self.allow_quoted_record_delimiter:
params['AllowQuotedRecordDelimiter'] = self.allow_quoted_record_delimiter
return params
@dataclass
class JSONInputSerialization:
Type: Optional[str] = None
def __post_init__(self):
# Validate the file_header_info
if self.Type:
if self.Type not in ['LINES', 'DOCUMENT']:
raise RuntimeError('JSON Input Serialization field Type must be ALWAYS | DOCUMENT')
def as_dict(self) -> dict:
params = {}
if self.Type:
params['Type'] = self.Type
return params
@dataclass
class InputSerialization:
compression_type: Union[Optional[CompressionTypes], str] = CompressionTypes.none
csv: Union[Optional[CSVInputSerialization], dict] = None
json: Union[Optional[JSONInputSerialization], dict] = None
parquet: dict = None
def __post_init__(self):
# At least one type must be set between CSV, JSON or PARQUET
param_list = [self.csv, self.json, self.parquet]
items_in_param_list = sum(x is not None for x in param_list)
if items_in_param_list != 1:
raise RuntimeError('InputSerializationError: Only one of the inputs must be selected: csv, json or parquet')
def as_dict(self) -> dict:
params = {}
if self.compression_type:
params['CompressionType'] = self.compression_type
if self.csv:
params['CSV'] = self.csv.as_dict() if isinstance(self.csv, CSVInputSerialization) else self.csv
if self.json:
params['JSON'] = self.json.as_dict() if isinstance(self.json, JSONInputSerialization) else self.json
if self.parquet is not None:
params['Parquet'] = {}
return params
@dataclass
class CSVOutputSerialization:
quote_fields: Optional[str] = 'ASNEEDED'
quote_escape_character: Optional[str] = None
record_delimiter: Optional[str] = None
field_delimiter: Optional[str] = None
quote_character: Optional[str] = None
def __post_init__(self):
# Validate the quote_fields
if self.quote_fields:
if self.quote_fields not in ['ALWAYS', 'ASNEEDED']:
raise RuntimeError('CSV Output Serialization field quote_fields must be ALWAYS | ASNEEDED')
def as_dict(self):
params = {}
if self.quote_fields:
params['QuoteFields'] = self.quote_fields
if self.quote_character:
params['QuoteEscapeCharacter'] = self.quote_escape_character
if self.record_delimiter:
params['RecordDelimiter'] = self.record_delimiter
if self.field_delimiter:
params['FieldDelimiter'] = self.field_delimiter
if self.quote_character:
params['QuoteCharacter'] = self.quote_character
return params
@dataclass
class JSONOutputSerialization:
record_delimiter: Optional[str] = None
def as_dict(self):
params = {}
if self.record_delimiter:
params['RecordDelimiter'] = self.record_delimiter
return params
@dataclass()
class OutputSerialization:
csv: Union[Optional[CSVOutputSerialization], dict] = None
json: Union[Optional[JSONOutputSerialization], dict] = None
def __post_init__(self):
# At least one type must be set between CSV, JSON
param_list = [self.csv, self.json]
items_in_param_list = sum(x is not None for x in param_list)
if items_in_param_list != 1:
raise RuntimeError('OutputSerializationError: Only one of the inputs must be selected: csv or json')
def as_dict(self):
params = {}
if self.csv:
params['CSV'] = self.csv.as_dict() if isinstance(self.csv, CSVOutputSerialization) else self.csv
if self.json:
params['JSON'] = self.json.as_dict() if isinstance(self.json, JSONOutputSerialization) else self.json
return params | /s3select_plus-1.0.3.tar.gz/s3select_plus-1.0.3/select_plus/src/models/models.py | 0.875421 | 0.235207 | models.py | pypi |
## S3 select

Example query run on 10GB of GZIP compressed JSON data (>60GB uncompressed)
### Motivation
[Amazon S3 select](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference-select.html) is one of the coolest features AWS released in 2018. It's benefits are:
1) Very fast and low on network utilization as it allows you to return only a subset of the file contents from S3 using limited SQL select query. Since filtering of the data takes place on AWS machine where S3 file resides, network data transfer can be significantly limited depending on query issued.
2) Is lightweight on the client side because all filtering is done on a machine where the S3 data is located
4) It's [cheap](https://aws.amazon.com/s3/pricing/#Request_pricing_.28varies_by_region.29) at $0.002 per GB scanned and $0.0007 per GB returned<br>
For more details about S3 select see this [presentation](https://www.youtube.com/watch?v=uxcyoc6uaLM).<p>
Unfortunately, S3 select API query call is limited to only one file on S3 and syntax is quite cumbersome, making it very impractical for daily usage. These are and more flaws are intended to be fixed with this s3select command.
### Features at a glance
Most important features:
1) Queries all files beneath given S3 prefix(es)
2) The whole process is multi-threaded and fast. A scan of 1.1TB of data in stored in 20,000 files takes 5 minutes). Threads don't slow down client much as heavy lifting is done on AWS.
3) The compression of the file is automatically inferred for you by picking GZIP or plain text depending on file extension.
4) Real-time execution progress display.
5) The exact cost of the query returned for each run.
6) Ability to only count records matching the filter in a fast and efficient manner.
7) You can easily limit the number of results returned while still keeping multi-threaded execution.
8) Failed requests are properly handled and repeated if they are retriable (e.g. throttled calls).
### Installation and Upgrade
s3select is developed in Python and uses [pip](http://www.pip-installer.org/en/latest/).<p>
The easiest way to install/upgrade s3select is to use `pip` in a `virtualenv`:
<pre>$ pip install -U s3select</pre>
or, if you are not installing in a `virtualenv`, to install/upgrade globally:
<pre>$ sudo pip install -U s3select</pre>
or for your user:
<pre>$ pip install --user -U s3select</pre>
### Authentication
s3select uses the same authentication and endpoint configuration as [aws-cli](https://github.com/aws/aws-cli#getting-started). If aws command is working on your machine, there is no need for any additional configuration.
### Example usage
First get some help:
<pre>
$ s3select -h
usage: s3select [-h] [-w WHERE] [-d FIELD_DELIMITER] [-D RECORD_DELIMITER]
[-l LIMIT] [-v] [-c] [-H] [-o OUTPUT_FIELDS] [-t THREAD_COUNT]
[--profile PROFILE] [-M MAX_RETRIES]
prefixes [prefixes ...]
s3select makes s3 select querying API much easier and faster
positional arguments:
prefixes S3 prefix (or more) beneath which all files are
queried
optional arguments:
-h, --help show this help message and exit
-w WHERE, --where WHERE
WHERE part of the SQL query
-d FIELD_DELIMITER, --field_delimiter FIELD_DELIMITER
Field delimiter to be used for CSV files. If specified
CSV parsing will be used. By default we expect JSON
input
-D RECORD_DELIMITER, --record_delimiter RECORD_DELIMITER
Record delimiter to be used for CSV files. If
specified CSV parsing will be used. By default we
expect JSON input
-l LIMIT, --limit LIMIT
Maximum number of results to return
-v, --verbose Be more verbose
-c, --count Only count records without printing them to stdout
-H, --with_filename Output s3 path of a filename that contained the match
-o OUTPUT_FIELDS, --output_fields OUTPUT_FIELDS
What fields or columns to output
-t THREAD_COUNT, --thread_count THREAD_COUNT
How many threads to use when executing s3_select api
requests. Default of 150 seems to be on safe side. If
you increase this there is a chance you'll need also
to increase nr of open files on your OS
--profile PROFILE Use a specific AWS profile from your credential file.
-M MAX_RETRIES, --max_retries MAX_RETRIES
Maximum number of retries per queried S3 object in
case API request fails
</pre>
It's always useful to peek at first few lines of input files to figure out contents:
<pre>
$ s3select -l 3 s3://testing.bucket/json_example/
{"name":"Gilbert","wins":[["straight","7♣"],["one pair","10♥"]]}
{"name":"Alexa","wins":[["two pair","4♠"],["two pair","9♠"]]}
{"name":"May","wins":[]}</pre>
It's JSON. Great - that's s3select default format. Let's get a subset of its data
<pre>
$ s3select -l 3 -w "s.name LIKE '%Gil%'" -o "s.wins" s3://testing.bucket/json_example
{"wins":[["straight","7♣"],["one pair","10♥"]]}
</pre>
What if the input is not in JSON:
<pre>
$ s3select -l 3 s3://testing.bucket/csv_example
Exception caught when querying csv_example/example.csv: An error occurred (JSONParsingError) when calling the SelectObjectContent operation: Error parsing JSON file. Please check the file and try again.
</pre>
Exception means input isn't parsable JSON. Let's switch to CSV file delimited with `,` but you can specify any other delimiter char. Often used is `TAB` specified with `\\t`
<pre>
$ s3select -l 3 -d , s3://testing.bucket/csv_example
Gilbert,straight,7♣,one pair,10♥
Alexa,two pair,4♠,two pair,9♠
May,,,,
</pre>
Since utilising the first line of CSV as a header isn't yet supported we'll select a subset of data using column enumeration:
<pre>
$ s3select -l 3 -d , -w "s._1 LIKE '%i%'" -o "s._2" s3://testing.bucket/csv_example
straight
three of a kind
</pre>
If you are interested in pricing for your requests, add `-v` to increase verbosity which will include pricing information at the end:
<pre>
$ s3select -v -c s3://testing.bucket/10G_sample
Files processed: 77/77 Records matched: 5696395 Bytes scanned: 21 GB
Cost for data scanned: $0.02
Cost for data returned: $0.00
Cost for SELECT requests: $0.00
Total cost: $0.02
</pre>
### License
Distributed under the MIT license. See `LICENSE` for more information.
| /s3select-0.0.14.tar.gz/s3select-0.0.14/README.md | 0.797793 | 0.689489 | README.md | pypi |
from botocore.client import BaseClient
def multipart_upload_to_s3(local_file_path: str, s3: BaseClient, bucket_name: str, s3_prefix: str):
"""
Uploads a local file to S3 using multipart upload.
Args:
local_file_path (str): The path to the local file to be uploaded.
s3 (BaseClient): An instance of the boto3 S3 client or resource.
s3_bucket (str): The name of the S3 bucket.
s3_prefix (str): The prefix to use for S3 object keys.
"""
# Initialize multipart upload
response = s3.create_multipart_upload(Bucket=bucket_name, Key=s3_prefix)
upload_id = response['UploadId']
# Calculate part size
part_size = 5 * 1024 * 1024 # 5 MB
# Prepare parts
with open(local_file_path, 'rb') as file:
parts = []
part_number = 1
while True:
data = file.read(part_size)
if not data:
break
part_response = s3.upload_part(
Bucket=bucket_name,
Key=s3_prefix,
PartNumber=part_number,
UploadId=upload_id,
Body=data
)
parts.append({'PartNumber': part_number, 'ETag': part_response['ETag']})
part_number += 1
# Complete multipart upload
s3.complete_multipart_upload(
Bucket=bucket_name,
Key=s3_prefix,
UploadId=upload_id,
MultipartUpload={'Parts': parts}
)
def multipart_download_from_s3(local_file_path: str, s3: BaseClient, bucket_name: str, s3_prefix: str, total_size: int) -> None:
"""
Downloads an object from S3 using multipart download.
Args:
local_file_path (str): The desired path for the downloaded file.
s3 (BaseClient): An instance of the boto3 S3 client or resource.
bucket_name (str): The name of the S3 bucket.
s3_prefix (str): The key of the object to be downloaded.
"""
if isinstance(s3, BaseClient):
s3_client = s3
else:
raise ValueError("s3 must be an instance of boto3 S3 client or resource")
# Get object information to determine the total size
# response = s3_client.head_object(Bucket=bucket_name, Key=object_key)
# total_size = response['ContentLength']
# Calculate part size
part_size = 5 * 1024 * 1024 # 5 MB
# Download parts
with open(local_file_path, 'wb') as file:
for part_number in range(1, total_size // part_size + 1):
start_byte = (part_number - 1) * part_size
end_byte = min(part_number * part_size, total_size) - 1
response = s3_client.get_object(
Bucket=bucket_name,
Key=s3_prefix,
Range=f"bytes={start_byte}-{end_byte}"
)
file.write(response['Body'].read()) | /s3sync_util-2.3.2-py3-none-any.whl/s3sync_util/commands/multipart.py | 0.782663 | 0.156201 | multipart.py | pypi |
import os
import boto3
from botocore.exceptions import BotoCoreError, NoCredentialsError
def get_total_upload_size(directory:str, exclude_list:list) -> int:
"""Calculate the total size of files in a directory for upload, excluding specified files.
Args:
directory (str): The directory to calculate the upload size for.
exclude_list (list): List of items to exclude from the upload size calculation.
Returns:
int: Total size of files in bytes.
"""
total_size: int = 0
for root, dirs, files in os.walk(directory):
dirs[:] = [d for d in dirs if d not in exclude_list]
for file in files:
if file not in exclude_list:
file_path = os.path.join(root, file)
total_size += os.path.getsize(file_path)
return total_size
def get_total_download_size(s3_bucket:str, s3_prefix:str, exclude_list:list) -> int:
"""Calculate the total size of objects to be downloaded from an S3 bucket and prefix.
Args:
s3_bucket (str): The name of the S3 bucket.
s3_prefix (str): The prefix to filter S3 objects.
exclude_list (list): List of items to exclude.
Returns:
int: Total size of objects in bytes.
"""
total_size: int = 0
try:
s3 = boto3.client('s3')
response = s3.list_objects_v2(Bucket=s3_bucket, Prefix=s3_prefix)
for obj in response.get('Contents', []):
s3_key = obj['Key']
if not any(item in s3_key for item in exclude_list):
total_size += obj['Size']
except (BotoCoreError, NoCredentialsError) as e:
print(f"Error occurred: {e}")
return total_size
def format_size(size_in_bytes:int) -> float:
if size_in_bytes < 1024:
return f"{size_in_bytes} bytes"
elif size_in_bytes < 1024 * 1024:
return f"{size_in_bytes / 1024:.2f} KB"
elif size_in_bytes < 1024 * 1024 * 1024:
return f"{size_in_bytes / (1024 * 1024):.2f} MB"
else:
return f"{size_in_bytes / (1024 * 1024 * 1024):.2f} GB" | /s3sync_util-2.3.2-py3-none-any.whl/s3sync_util/commands/size.py | 0.668772 | 0.237598 | size.py | pypi |
# S3Synchrony
_Created by Sevan Brodjian for Ameren at the Innovation Center @ UIUC_
This package provides a service for synchronizing file creations, deletions, and modifications across users on an AWS S3 prefix. Support also exists for easily expanding to other database systems.
## Installation
Use the package manager [pip](https://pip.pypa.io/en/stable/) to install s3synchrony.
```bash
pip install s3synchrony
```
## Requirements
S3Synchrony relies on Python 3 and the following packages to operate:
- hashlib
- datetime
- pandas
- boto3
- botocore
- pyperclip
## Usage
S3Synchrony comes with three primary functions, which can be called as follows:
```python
import s3synchrony as s3s
# returns a list of data platforms currently supported
s3s.get_supported_platforms()
# prompts user to synchronize all detected changes in the local and remote repositories
s3s.smart_sync(platform="S3", aws_bkt="bucket_name", aws_prfx="prfx_path")
# prompts user to remove all synchronization support on the local and remote repositories
s3s.reset_all(platform="S3", aws_bkt="bucket_name", aws_prfx="prfx_path")
```
## The Data Folder
When using S3Synchrony, you are synchronizing all of the data stored in a local directory with the data stored in an S3 directory. The S3 directory is referenced through both an AWS bucket, an AWS prefix, and the necessary credentials to access said prefix. The local directory to be used can be a relative or full path, and by default will be a subdirectory named "Data" stored in the same working directory.
- Project Folder
- Data
- code, etc.
## smart_sync
```python
def smart_sync(platform="S3", **kwargs):
"""Perform all necessary steps to synchronize a local repository with a remote repo.
Notes:
Keyword arguments are dependent on platform selection.
"""
if(platform in _supported_platforms):
connection = _supported_platforms[platform](**kwargs)
else:
connection = baseconn.DataPlatformConnection(**kwargs)
connection.intro_message()
connection.establish_connection()
connection.synchronize()
connection.close_message()
```
The smart_sync function is the premier work of this package, and will perform all of the data synchronization for you. This function will check the passed platform name, and reference a self-contained list of supported platforms to instantiate the proper class. This list of supported platforms can be accessed via a call to get_supported_platforms().
Each connection type will require a different set of keyword arguments. For S3, the minimum arguments are "aws_bkt" and "aws_prfx". Please check the class docstrings for each connection type for more information.
All platform classes should be children of the DataPlatformConnection class which is an interface will all necessary public functions. For S3, a folder named .S3 will be created within your data folder. This .S3 folder will contain CSVs used for monitoring data changes and text files for storing small bits of information.
- **versions.csv:** Contains the state of data stored on s3
- **versionsLocal.csv:** Contains the state of data stored locally
- **deletedS3.csv:** Contains all files deleted from S3
- **deletedLocal.csv:** Contains all files deleted locally
- **ignores3.txt:** Contains a list of file paths to be ignored entirely
- **user_name.txt:** Contains the name attached to your file modifications
- **aws.txt:** Contains credentials used to access the AWS prefix
Using these CSVs, S3Synchrony can determine what files you have newly created, deleted, and modified. It will then prompt you to upload these changes to S3. Once you have done so, it will upload new CSVs as needed. After downloading these new CSVs, your collaborative peers will be prompted to download your own changes as well as upload their own.
In addition, a tmp folder will be utilised within the .S3 folder. This tmp folder contains downloaded files from S3 that are used to compute certain CSVs.
## Deletions
When deleting files, the user will be prompted to confirm their deletions. Files that are deleted locally will simply be removed. Files deleted from S3, however, will simply be moved into a "deleted" subfolder of the .S3 folder on S3.
## Logs
When there are any issues with a file being uploaded or downloaded, an error message will be printed and that file will be skipped. A log will then be created a saved locally inside of the "logs" subfolder of the local .S3 folder.
## reset_all
```python
def reset_all(platform="S3", **kwargs):
"""Reset local and remote directories to original state.
Notes:
Keyword arguments are dependent on platform selection.
"""
if(platform in _supported_platforms):
connection = _supported_platforms[platform](**kwargs)
else:
connection = baseconn.DataPlatformConnection(**kwargs)
connection.intro_message()
connection.establish_connection()
if connection.reset_confirm():
connection.reset_local()
connection.reset_remote()
connection.close_message()
```
Resetting all S3Synchrony services is as simple as deleting the .S3 folders contained locally and on S3. Once these are deleted, synchronization cannot occur until they are recreated, which can be done by simply making a new call to S3Synchrony.
Before resetting, however, a call to reset_confirm **must** occur. The user will then be prompted to confirm that they would like their .S3 folders removed.
## License
[GNU GPLv3](https://www.gnu.org/licenses/)
| /s3synchrony-0.1.2.tar.gz/s3synchrony-0.1.2/README.md | 0.603348 | 0.803251 | README.md | pypi |
from s3transfer.tasks import Task
from s3transfer.tasks import SubmissionTask
class DeleteSubmissionTask(SubmissionTask):
"""Task for submitting tasks to execute an object deletion."""
def _submit(self, client, request_executor, transfer_future, **kwargs):
"""
:param client: The client associated with the transfer manager
:type config: s3transfer.manager.TransferConfig
:param config: The transfer config associated with the transfer
manager
:type osutil: s3transfer.utils.OSUtil
:param osutil: The os utility associated to the transfer manager
:type request_executor: s3transfer.futures.BoundedExecutor
:param request_executor: The request executor associated with the
transfer manager
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The transfer future associated with the
transfer request that tasks are being submitted for
"""
call_args = transfer_future.meta.call_args
self._transfer_coordinator.submit(
request_executor,
DeleteObjectTask(
transfer_coordinator=self._transfer_coordinator,
main_kwargs={
'client': client,
'bucket': call_args.bucket,
'key': call_args.key,
'extra_args': call_args.extra_args,
},
is_final=True
)
)
class DeleteObjectTask(Task):
def _main(self, client, bucket, key, extra_args):
"""
:param client: The S3 client to use when calling DeleteObject
:type bucket: str
:param bucket: The name of the bucket.
:type key: str
:param key: The name of the object to delete.
:type extra_args: dict
:param extra_args: Extra arguments to pass to the DeleteObject call.
"""
client.delete_object(Bucket=bucket, Key=key, **extra_args) | /s3transfer-meiqia-0.1.10.tar.gz/s3transfer-meiqia-0.1.10/s3transfer/delete.py | 0.76074 | 0.207175 | delete.py | pypi |
from botocore.compat import six
from s3transfer.compat import accepts_kwargs
from s3transfer.exceptions import InvalidSubscriberMethodError
class BaseSubscriber(object):
"""The base subscriber class
It is recommended that all subscriber implementations subclass and then
override the subscription methods (i.e. on_{subsribe_type}() methods).
"""
VALID_SUBSCRIBER_TYPES = [
'queued',
'progress',
'done'
]
def __new__(cls, *args, **kwargs):
cls._validate_subscriber_methods()
return super(BaseSubscriber, cls).__new__(cls)
@classmethod
def _validate_subscriber_methods(cls):
for subscriber_type in cls.VALID_SUBSCRIBER_TYPES:
subscriber_method = getattr(cls, 'on_' + subscriber_type)
if not six.callable(subscriber_method):
raise InvalidSubscriberMethodError(
'Subscriber method %s must be callable.' %
subscriber_method)
if not accepts_kwargs(subscriber_method):
raise InvalidSubscriberMethodError(
'Subscriber method %s must accept keyword '
'arguments (**kwargs)' % subscriber_method)
def on_queued(self, future, **kwargs):
"""Callback to be invoked when transfer request gets queued
This callback can be useful for:
* Keeping track of how many transfers have been requested
* Providing the expected transfer size through
future.meta.provide_transfer_size() so a HeadObject would not
need to be made for copies and downloads.
:type future: s3transfer.futures.TransferFuture
:param future: The TransferFuture representing the requested transfer.
"""
pass
def on_progress(self, future, bytes_transferred, **kwargs):
"""Callback to be invoked when progress is made on transfer
This callback can be useful for:
* Recording and displaying progress
:type future: s3transfer.futures.TransferFuture
:param future: The TransferFuture representing the requested transfer.
:type bytes_transferred: int
:param bytes_transferred: The number of bytes transferred for that
invocation of the callback. Note that a negative amount can be
provided, which usually indicates that an in-progress request
needed to be retried and thus progress was rewound.
"""
pass
def on_done(self, future, **kwargs):
"""Callback to be invoked once a transfer is done
This callback can be useful for:
* Recording and displaying whether the transfer succeeded or
failed using future.result()
* Running some task after the transfer completed like changing
the last modified time of a downloaded file.
:type future: s3transfer.futures.TransferFuture
:param future: The TransferFuture representing the requested transfer.
"""
pass | /s3transfer-meiqia-0.1.10.tar.gz/s3transfer-meiqia-0.1.10/s3transfer/subscribers.py | 0.798226 | 0.19475 | subscribers.py | pypi |
# s3tup
Python package that offers configuration management and deployment for Amazon S3 through simple declarative yaml files.
## Why?
Because writing custom scripts for configuring and deploying to S3 through boto was a major pain. Though tools like s3sync exist, they lack robust options for configuration and you often still need some customization or outside scripting to get them to do exactly what you want.
With s3tup configuration is straightforward. It uses etags to only upload and delete the files that need to be changed, just like many other tools around, but also supports syncing configurations to files that you've already uploaded, making your configurations truly declarative.
## Installation
Install via pip:
$ pip install s3tup
Install from source:
$ git clone git://github.com/HeyImAlex/s3tup.git
$ cd s3tup
$ python setup.py
## Usage
S3tup can be used as a command line tool or a python library. Just write out a config file (the following sets up a simple website):
```yaml
# config.yml
---
- bucket: example-bucket
rsync: /path/to/your/website
key_config:
- canned_acl: public-read
reduced_redundancy: true
- patterns: ['static/*']
cache_control: 'max-age=32850000'
website: |
<WebsiteConfiguration xmlns='http://s3.amazonaws.com/doc/2006-03-01/'>
<IndexDocument>
<Suffix>index.html</Suffix>
</IndexDocument>
<ErrorDocument>
<Key>404</Key>
</ErrorDocument>
</WebsiteConfiguration>
```
Set your AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env vars and then run:
$ s3tup config.yml
Easy as that. The configuration file can be as simple or robust as you need, and there are a couple examples in the repo to help you out.
With the `--rsync` option, your deployments will only change what needs to be changed, and with `--dryrun` you can preview your changes before you actually commit to making them.
Alternatively you can use s3tup as a library within python.
```python
from s3tup.connection import Connection
from s3tup.bucket import Bucket
conn = Connection()
b = Bucket(conn, 'test-bucket')
b.canned_acl = 'public-read'
b.sync()
```
Documentation here is lacking at the moment, but I'm working on it (and the source is a short read).
## Config File
The s3tup configuration file is plain yaml. The base is a list of bucket configurations which are defined below. An example configuration is available [here](https://github.com/HeyImAlex/s3tup/blob/master/examples/complete.yml) to help you and I'll try and keep it as up to date as possible. Because s3tup is just a thin wrapper over the S3 REST api, the best way to understand what all of these options actually do is to consult the [online documentation for S3](http://docs.aws.amazon.com/AmazonS3/latest/API/APIRest.html).
**Note**: Setting an option to `None` and not setting it at all are not the same thing. For many fields `None` will assert that the configuration option is not set at all.
#### Bucket Configuration
The bucket configuration is a dict that contains, predictably, the configuration options for the bucket named by the required field `bucket`. All other fields are optional.
field | default | description
:---- | :------ | :----------
bucket | required | The target bucket name.
region | '' | The region that the bucket is in. Valid values: EU, eu-west-1, us-west-1, us-west-2, ap-southeast-1, ap-southeast-2, ap-northeast-1, sa-east-1, empty string (for the US Classic Region). Note that a bucket's region cannot change; s3tup will raise an exception if the bucket already exists and the regions don't match.
canned_acl | | The [canned acl](http://docs.aws.amazon.com/AmazonS3/latest/dev/ACLOverview.html#CannedACL) of the bucket. Valid values: private, public-read, public-read-write, authenticated-read, bucket-owner-read, bucket-owner-full-control.
website | | The website configuration of the bucket. Valid values: Either a string xml website configuration (detailed on [this](http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html) page) or `None` which will delete the website configuration for this bucket all together.
acl | | The acl set on this bucket. Valid values: Either a string xml acl (detailed on [this](http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTcors.html) page) or `None`, which will set the defualt acl on the bucket.
cors | | The cors configuration of the bucket. Valid values: Either a string xml cors configuration (detailed on [this](http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTcors.html) page) or `None` which will delete the cors configuration for this bucket all together.
lifecycle | | The lifecycle configuration of the bucket. Valid values: Either a string xml lifecycle configuration (detailed on [this](http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) page) or `None` which will delete the lifecycle configuration for this bucket all together.
logging | | The logging configuration of the bucket. Valid values: Either a string xml logging configuration (detailed on [this](http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) page) or `None` which will delete the logging configuration for this bucket all together.
notification | | The notification configuration of the bucket. Valid values: Either a string xml notification configuration (detailed on [this](http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTnotification.html) page) or `None` which will delete the notification configuration for this bucket all together.
policy | | The policy set on this bucket. Valid values: Either a string json policy (detailed on [this](http://docs.aws.amazon.com/AmazonS3/latest/dev/AccessPolicyLanguage_HowToWritePolicies.html) page) or `None` which will delete the policy from this bucket all together.
tagging | | The tagging configuration of the bucket. Valid values: Either a string xml tagging configuration (detailed on [this](http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTtagging.html) page) or `None` which will delete all tags from this bucket.
versioning | | Boolean value that says wether to enable or suspend versioning. Note: Once versioning is enabled on a bucket it cannot be disabled, only suspended! Any bucket that has ever had versioning enabled cannot have a lifecycle configuration set!
key_config | | Takes a list of key configuration dicts and applies them to all of the applicable keys in the bucket. See section Key Configuration for details.
rsync | | Takes either an rsync configuration dict or a list of them and "rsyncs" a folder with the bucket. See section Rsync Configuration for details.
redirects | [ ] | Takes a list of [key, redirect location] pairs and will create a zero byte object at `key` that redirects to whatever redirect location you specify.
#### Key Configuration
The key configuration field allows you to define key configurations that apply to all keys matched by your matcher fields. These configurations are applied in the order that they appear, and conflicting fields will be overwritten by whichever configuration was applied last. The bucket configuration takes a list of key configurations, so you can have as many as you like. Keep in mind that many of these options are not idempotent; if you already have configuration set on an S3 key, s3tup will overwrite it when it syncs.
field | default | description
:---- | :------ | :----------
matcher fields | | See section Matcher Fields below.
reduced_redundancy | False | Boolean option to use reduced redundancy storage.
encrypted | False | Boolean option to use server side encryption.
canned_acl | | The [canned acl](http://docs.aws.amazon.com/AmazonS3/latest/dev/ACLOverview.html#CannedACL) for the key.
acl | | String xml acl policy for this key.
cache_control | None | String value of the cache-control header.
content_disposition | None | String value of the content-disposition header.
content_encoding | None | String value of the content-encoding header. S3tup will not guess content encoding.
content_language | None | String value of the content-language header.
content_type | None | String value of the content-type header. If not explicitly set, s3tup will make a best guess based on the extension.
expires | None | String value of the expires header.
metadata | { } | Dict of metadata headers to set on the key.
#### Rsync Configuration
The rsync field allows you to "rsync" a local folder with an S3 bucket. All keys that are uploaded are configured by any present key configurations. Remember that the rsync configuration definition contains the matcher fields and any *local paths* (relative to the synced directory) not matched will not be rsynced. This is helpfull for ignoring certain files or folders during rsync (and basically emulates the inclue/exclude/rinclude/rexclude options of s3cmd's sync). The matching process is run on the local pathname relative to src.
field | default | description
:---- | :------ | :----------
matcher fields | | See section Matcher Fields below.
src | required | Relative or absolute path to folder to rsync. Trailing slash is not important.
dest | '' | Optional, allows you to rsync with a specific folder on S3.
delete | False | Option to delete keys present in the bucket that are not present locally. Other rsyncs and redirects will override this if there are conflicts.
#### Matcher Fields
Both the key and rsync configuration definitions contain these optional fields to constrain which keys they act upon. These are intended to function as intuitively as possible, but in the name of explicitness:
If none of these fields are present, all keys are matched. If neither `patterns` nor `regexes` are present, all keys except those matched by `ignore_patterns` and `ignore_regexes` are matched. If either `patterns` or `regexes` are present, only keys that `patterns` or `regexes` match and are not matched by either `ignore_patterns` or `ignore_regexes` are matched. Whew.
Remember to always pass a list in!
field | default | description
:---- | :------ | :----------
patterns | None | List of unix style patterns to include
ignore_patterns | None | List of unix style patterns exclude
regexes | None | List of regex patterns to include
ignore_regexes | None | List of regex patterns exclude
## Cli
positional arguments:
* **config** - relative or absolute path to the config file
optional arguments:
- **-h, --help** - show this help message and exit
- **--dryrun** - show what will happen when s3tup runs without actually running s3tup
- **--rsync** - only upload and delete modified and removed keys. no key syncing, no redirecting, no bucket configuring.
- **-c** <concurrency> - the number of concurrent requests you'd like to make. anything below one runs linearly. defaults to 5.
- **-v, --verbose** - increase output verbosity
- **-q, --quiet** - silence all output
- **--access_key_id** <access_key_id> - your aws access key id
- **--secret_access_key** <secret_access_key> - your aws secret access key
## TODO
This project is in early development and still has plenty of work before I can confidently say that it's production ready. However it's slowly getting there.
* Need to gracefully handle sync of objects > 5GB
* Larger test suite
* Implement requester pays
* Implement mfa delete
| /s3tup-0.1.0.tar.gz/s3tup-0.1.0/README.md | 0.461017 | 0.718792 | README.md | pypi |
import sys
import time
import threading
import contextlib
import Queue
from multiprocessing import pool
try:
import cStringIO
StringIO = cStringIO
except ImportError:
import StringIO
def data_collector(iterable, def_buf_size=5242880):
''' Buffers n bytes of data
Args:
iterable: could be a list, generator or string
def_buf_size: number of bytes to buffer, default is 5mb
Returns:
A generator object
'''
buf = ''
for data in iterable:
buf += data
if len(buf) >= def_buf_size:
output = buf[:def_buf_size]
buf = buf[def_buf_size:]
yield output
if len(buf) > 0:
yield buf
def upload_part(upload_func, progress_cb, part_no, part_data):
num_retries = 5
def _upload_part(retries_left=num_retries):
try:
with contextlib.closing(StringIO.StringIO(part_data)) as f:
f.seek(0)
cb = lambda c,t:progress_cb(part_no, c, t) if progress_cb else None
upload_func(f, part_no, cb=cb, num_cb=100)
except Exception, exc:
retries_left -= 1
if retries_left > 0:
return _upload_part(retries_left=retries_left)
else:
return threading.ThreadError(repr(threading.current_thread()) + ' ' + repr(exc))
return _upload_part()
def upload(bucket, aws_access_key, aws_secret_key,
iterable, key, progress_cb=None,
threads=5, replace=False, secure=True,
connection=None):
''' Upload data to s3 using the s3 multipart upload API.
Args:
bucket: name of s3 bucket
aws_access_key: aws access key
aws_secret_key: aws secret key
iterable: The data to upload. Each 'part' in the list
will be uploaded in parallel. Each part must be at
least 5242880 bytes (5mb).
key: the name of the key to create in the s3 bucket
progress_cb: will be called with (part_no, uploaded, total)
each time a progress update is available.
threads: the number of threads to use while uploading.
replace: will replace the key in s3 if set to true. (Default is false)
secure: use ssl when talking to s3. (Default is true)
connection: used for testing
'''
if not connection:
from boto.s3 import connection
c = connection.S3Connection(aws_access_key, aws_secret_key, is_secure=secure)
b = c.get_bucket(bucket)
if not replace and b.lookup(key):
raise Exception('s3 key ' + key + ' already exists')
multipart_obj = b.initiate_multipart_upload(key)
err_queue = Queue.Queue()
lock = threading.Lock()
upload.counter = 0
try:
tpool = pool.ThreadPool(processes=threads)
def check_errors():
try:
exc = err_queue.get(block=False)
except Queue.Empty:
pass
else:
raise exc
def waiter():
while upload.counter >= threads:
check_errors()
time.sleep(0.1)
def cb(err):
if err: err_queue.put(err)
with lock: upload.counter -= 1
args = [multipart_obj.upload_part_from_file, progress_cb]
for part_no, part in enumerate(iterable):
part_no += 1
tpool.apply_async(upload_part, args + [part_no, part], callback=cb)
with lock: upload.counter += 1
waiter()
tpool.close()
tpool.join()
# Check for thread errors before completing the upload,
# sometimes an error can be left unchecked until we
# get to this point.
check_errors()
multipart_obj.complete_upload()
except:
multipart_obj.cancel_upload()
tpool.terminate()
raise
def cli():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-b', '--bucket', dest='bucket',
help='the s3 bucket to upload to')
parser.add_option('-k', '--key', dest='key',
help='the name of the key to create in the bucket')
parser.add_option('-K', '--aws_key', dest='aws_key',
help='aws access key')
parser.add_option('-s', '--aws_secret', dest='aws_secret',
help='aws secret key')
parser.add_option('-d', '--data', dest='data',
help='the data to upload to s3 -- if left blank will be read from STDIN')
parser.add_option('-t', '--threads', dest='threads', default=5, type='int',
help='number of threads to use while uploading in parallel')
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(0)
if not options.bucket:
parser.error('bucket not provided')
if not options.key:
parser.error('key not provided')
if not options.aws_key:
parser.error('aws access key not provided')
if not options.aws_secret:
parser.error('aws secret key not provided')
data = sys.stdin if not options.data else [options.data]
def cb(part_no, uploaded, total):
print part_no, uploaded, total
upload(options.bucket, options.aws_key, options.aws_secret, data_collector(data), options.key,
progress_cb=cb, replace=True, threads=options.threads)
if __name__ == '__main__':
cli() | /s3upload-0.2.4.tar.gz/s3upload-0.2.4/s3upload.py | 0.402744 | 0.208682 | s3upload.py | pypi |
import logging
import sys
import boto3
import argparse
import pathlib
import os
import dotenv
import fnmatch
# Define the logger
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def separate_arguments(string : str) -> list[str] :
"""
Converts a comma-separated string into a list of strings.
Args:
string (str): A comma-separated string.
Returns:
list[str]: A list of strings obtained by splitting the input string by commas.
"""
if ',' in string:
return [s.strip() for s in string.split(',') if s.strip()]
else:
return [string]
def parse_args(sys_args):
"""
Parses command-line arguments for the script.
Args:
sys_args (list[str]): Command-line arguments passed to the script.
Returns:
argparse.Namespace: An object containing the parsed command-line arguments.
"""
parser = argparse.ArgumentParser(description='Upload files to an S3 bucket.')
parser.add_argument('--bucket_name', required=True, help='the name of the S3 bucket')
parser.add_argument('--region', default='eu-west-1', help='region')
parser.add_argument('--upload_prefix', default='', type=str, help='prefix which will be used for uploading to S3 bucket')
parser.add_argument('--upload_prefix_config_file', default='', type=str, help='prefix will be loaded from config file (default: output_path.txt)')
parser.add_argument('--source_dir', type=str, default='.', help='the relative path of the directory containing the files for upload (default: dist/)')
parser.add_argument('--include', default='*', type=separate_arguments, help='the file pattern to include in the upload (default: dist/*)')
parser.add_argument('--exclude', default='', type=separate_arguments, help='the file pattern to exclude from the upload (default: empty list)')
args = parser.parse_args()
return args
def upload_file(s3, bucket_name :str, file_path : str, key : str) -> None:
"""
Uploads a file to an AWS S3 bucket using the regular upload method.
Args:
bucket_name (str): The name of the S3 bucket to upload the file to.
file_path (str): The local file path of the file to upload.
key (str): The S3 object key to use for the uploaded file.
"""
s3.upload_file(str(file_path), bucket_name, key)
def upload_files_to_s3(bucket_name : str, region: str, files: list[pathlib.Path], upload_prefix : str, source_path: pathlib.Path) ->None:
"""
Uploads each file in the given list to an AWS S3 bucket.
Args:
bucket_name (str): The name of the S3 bucket to upload the files to.
files (list[pathlib.Path]): A list of file paths to upload.
upload_prefix (str): The S3 object key prefix to use for the uploaded files.
source_path (pathlib.Path): The path of the directory containing the files for upload.
"""
dotenv.load_dotenv()
ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY_ID')
SECRET_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
for file_path in files:
key = (pathlib.Path(upload_prefix).joinpath(file_path.relative_to(source_path))).as_posix()
logging.info(f'Uploading {file_path} to S3 bucket {bucket_name} with key {key}')
upload_file(s3, bucket_name, file_path, key)
logging.info(f'Uploading finished to: https://s3.console.aws.amazon.com/s3/buckets/{bucket_name}?region={region}&prefix={key}')
def construct_source_path_for_upload (source_dir : str) -> pathlib.Path:
"""
Constructs the absolute path for the source directory of files to be uploaded.
Args:
source_dir (str): The relative path of the directory containing the files for upload.
Returns:
pathlib.Path: The absolute path of the directory containing the files for upload.
"""
return pathlib.Path.cwd().joinpath(source_dir)
def construct_upload_prefix (upload_prefix: str, output_path_config : pathlib.Path) -> str:
"""
Constructs the final upload prefix for the files in the AWS S3 bucket.
Args:
upload_prefix (str): A string representing the upload prefix.
output_path_config (pathlib.Path): A pathlib.Path object representing the path to the output_path config file.
Returns:
str: The final upload prefix for the files in the AWS S3 bucket.
"""
# we can expect that we will find output path in config file called output_path.txt
output_path_config = pathlib.Path.cwd().joinpath(output_path_config)
if upload_prefix:
return upload_prefix
elif output_path_config.is_file():
logging.info(f'Loading upload prefix from config file: {output_path_config}')
return output_path_config.read_text(encoding="utf-8")
else:
return ''
def is_excluded(file_path: pathlib.Path, exclude_patterns: list[str]) -> bool:
"""
Determines whether a given file path should be excluded from the upload based on a list of exclude patterns.
Args:
file_path (pathlib.Path): The file path to check for exclusion.
exclude_patterns (list[str]): A list of file patterns to exclude from the upload.
Returns:
bool: True if the file should be excluded, False otherwise.
"""
# Iterate over each exclusion pattern
for pattern in exclude_patterns:
# Check if the file path matches the exclusion pattern
if fnmatch.fnmatch(file_path, pattern):
return True
# Check if the relative path (from the file path's parent) matches the exclusion pattern
# This helps handle exclusion patterns with subdirectories correctly
elif fnmatch.fnmatch(file_path.relative_to(file_path.parent), pattern):
return True
# Check if the file path matches the exclusion pattern with ** wildcard for any number of subdirectories
elif fnmatch.fnmatch(file_path, f"**/{pattern}"):
return True
# If none of the exclusion patterns match, return False (the file is not excluded)
return False
def get_files_matching_pattern(source_path: pathlib.Path, pattern: str, exclude_patterns: list[str]) -> set[pathlib.Path]:
"""
Retrieves a set of files in the source directory that match a given pattern.
Args:
source_path (pathlib.Path): The path of the directory containing the files for upload.
pattern (str): The file pattern to match.
exclude_patterns (list[str]): A list of file patterns to exclude from the upload.
Returns:
set[pathlib.Path]: A set of file paths that match the given pattern.
"""
files = set()
logging.info(f'Searching for files to upload in {source_path} ...')
for file_path in source_path.rglob(pattern):
if file_path.is_file() and not is_excluded(file_path, exclude_patterns):
logging.info(f'File complies with include pattern: {file_path}.')
files.add(file_path)
return files
# Define the function to get the list of files to upload
def get_files_to_upload(source_path : pathlib.Path, include_patterns : list[str], exclude_patterns: list[str]) -> set[pathlib.Path]:
"""
Retrieves a set of files in the source directory that match the include patterns.
Args:
source_path (pathlib.Path): The path of the directory containing the files for upload.
include_patterns (list[str]): A list of file patterns to include in the upload.
exclude_patterns (list[str]): A list of file patterns to exclude from the upload.
Returns:
set[pathlib.Path]: A set of file paths that match the include patterns.
"""
files = set()
for include_pattern in include_patterns:
files.update(get_files_matching_pattern(source_path, include_pattern, exclude_patterns))
logging.info(f'Found {len(files)} files to upload.')
return files
def main(bucket_name : str, region : str, upload_prefix : str, upload_prefix_config_file : str, source_dir: str, include_pattern : str, exclude_pattern : str) -> None:
"""
Main function that uploads files to an AWS S3 bucket.
Args:
bucket_name (str): The name of the S3 bucket to upload the files to.
upload_prefix (str): The S3 object key prefix to use for the uploaded files.
upload_prefix_config_file (str): The path to the output_path config file containing the upload prefix.
source_dir (str): The relative path of the directory containing the files for upload.
include_pattern (str): A comma-separated string of file patterns to include in the upload.
"""
logging.info('Starting S3 upload')
logging.info(f'Bucket name: {bucket_name}')
logging.info(f'Region: {region}')
logging.info(f'source directory for upload: {source_dir}')
logging.info(f'Include pattern: {include_pattern}')
logging.info(f'Exclude pattern: {exclude_pattern}')
# Get path to data for upload
source_path : pathlib.Path = construct_source_path_for_upload(source_dir)
# Get the list of files to upload
files : list = get_files_to_upload(source_path, include_pattern, exclude_pattern)
# create upload prefix
final_upload_prefix : str = construct_upload_prefix(upload_prefix, upload_prefix_config_file)
logging.info(f'Upload directory(prefix): {final_upload_prefix}')
# Upload files
upload_files_to_s3(bucket_name, region, files, final_upload_prefix, source_path)
logging.info(f'Finished uploading all {len(files)} files to S3. https://s3.console.aws.amazon.com/s3/buckets/{bucket_name}?region={region}&prefix={final_upload_prefix}/')
if __name__ == "__main__":
parsed_args = parse_args(sys.argv[1:])
main(parsed_args.bucket_name, parsed_args.region, parsed_args.upload_prefix, parsed_args.upload_prefix_config_file, parsed_args.source_dir, parsed_args.include, parsed_args.exclude) | /s3uploader_ci_cd-1.0.4-py3-none-any.whl/s3uploader/s3uploader.py | 0.658637 | 0.193623 | s3uploader.py | pypi |
import re
from urllib.parse import urlparse
def style(url):
""" Determine 'style' of a given S3 url
>>> style("s3://my-bucket/my-key/")
's3'
>>> style("s3://user@my-bucket/my-key/")
's3-credential'
>>> style("https://my-bucket.s3.amazonaws.com/my-key/")
'bucket-in-netloc'
>>> style("https://s3.amazonaws.com/my-bucket/my-key/")
'bucket-in-path'
"""
o = urlparse(url)
if o.scheme == 's3':
if '@' in o.netloc:
return 's3-credential'
else:
return 's3'
if re.search(r'^s3[.-](\w{2}-\w{4,9}-\d\.)?amazonaws\.com', o.netloc):
return 'bucket-in-path'
if re.search(r'\.s3[.-](\w{2}-\w{4,9}-\d\.)?amazonaws\.com', o.netloc):
return 'bucket-in-netloc'
raise ValueError(f'Unknown url style: {url}')
def build_url(url_type, bucket, key=None, region=None, credential_name=None):
""" Construct an S3 URL
Args:
url_type: one of 's3', 's3-credential', 'bucket-in-path', 'bucket-in-netloc'
bucket: S3 bucket name
key: Key within bucket (optional)
region: S3 region name (optional)
credential_name: user/credential name to use in S3 scheme url (optional)
Returns
(string) S3 URL
"""
if url_type == 's3':
credential = f'{credential_name}@' if credential_name else ""
return f's3://{credential}{bucket}/{key or ""}'
if url_type == 'bucket-in-path':
return f'https://s3{"-" if region else ""}{region or ""}.amazonaws.com/{bucket}/{key}'
if url_type == 'bucket-in-netloc':
return f'https://{bucket}.s3.amazonaws.com/{key}'
raise ValueError(f'Invalid url_type: {url_type}')
def parse_s3_credential_url(url):
""" Parse S3 scheme url containing a user/credential name
>>> parse_s3_url("s3://user@my-bucket/my-key")
{'bucket': 'my-bucket', 'key': 'my-key/', 'credential_name': 'user'}
"""
o = urlparse(url)
cred_name, bucket = o.netloc.split('@')
key = o.path if o.path[0] != '/' else o.path[1:]
return {'bucket': bucket, 'key': key, 'credential_name': cred_name}
def parse_s3_url(url):
""" Parse S3 scheme url
>>> parse_s3_url("s3://my-bucket/my-key")
{'bucket': 'my-bucket', 'key': 'my-key/'}
"""
o = urlparse(url)
bucket = o.netloc
key = o.path if o.path[0] != '/' else o.path[1:]
return {'bucket': bucket, 'key': key}
def parse_bucket_in_path_url(url):
""" Parse url with bucket name path
>>> parse_bucket_in_path_url("https://s3-eu-west-1.amazonaws.com/my-bucket/my-key/")
{'bucket': 'my-bucket', 'key': 'my-key/'}
"""
path = urlparse(url).path
bucket = path.split('/')[1]
key = '/'.join(path.split('/')[2:])
return {'bucket': bucket, 'key': key}
def parse_bucket_in_netloc_url(url):
""" Parse url with bucket name in host/netloc
>>> parse_bucket_in_netloc_url("https://my-bucket.s3.amazonaws.com/my-key/")
{'bucket': 'my-bucket', 'key': 'my-key/'}
"""
o = urlparse(url)
bucket = o.netloc.split('.')[0]
key = o.path if o.path[0] != '/' else o.path[1:]
return {'bucket': bucket, 'key': key}
def parse_url(url):
url_style = style(url)
if url_style == 's3-credential':
return parse_s3_credential_url(url)
if url_style == 's3':
return parse_s3_url(url)
if url_style == 'bucket-in-path':
return parse_bucket_in_path_url(url)
if url_style == 'bucket-in-netloc':
return parse_bucket_in_netloc_url(url) | /s3urls-0.0.3.tar.gz/s3urls-0.0.3/s3urls.py | 0.689096 | 0.194884 | s3urls.py | pypi |
from typing import Any, Callable, Dict, Optional, Tuple, Union
from loguru import logger
from Xlib import X
from Xlib.display import Display
from Xlib.protocol.event import KeyPress
from s3wm_core.key_combination import KeyCombination
keycode_mapping: Dict[Tuple[int, int], KeyCombination] = {}
def kill_wm(wm: Any) -> None:
"""Close WM process.
:param wm: an S3WM instance. (Used Any to avoid circular deps)
"""
wm.display.close()
exit(0) # noqa: WPS421
def init_keymap(display: Display) -> None:
"""
Sends requests to the X Server to listen for specific key events.
When the key combination is pressed the application will get the event about it.
:param display: Used to manipulate keysym to keycode transitions.
"""
from s3wm_core.wm_config import combinations # noqa: WPS433
for button in {1, 3}: # noqa: WPS335
display.screen().root.grab_button(
button=button,
modifiers=X.Mod1Mask,
owner_events=True,
event_mask=X.ButtonPressMask,
pointer_mode=X.GrabModeAsync,
keyboard_mode=X.GrabModeAsync,
confine_to=X.NONE,
cursor=X.NONE,
)
# Registering all keybindings to X11.
for combination in combinations:
# Getting code for key on current keyboard.
codes = {code for code, index in display.keysym_to_keycodes(combination.key)}
for code in codes:
logger.debug(combination)
display.screen().root.grab_key(
code,
combination.modifiers,
1,
X.GrabModeAsync,
X.GrabModeAsync,
)
keycode_mapping[(code, combination.modifiers)] = combination
def get_key_action( # noqa: WPS234
key_event: KeyPress,
) -> Optional[Union[Callable[..., Any], str]]:
"""
Function to get the action defined in `combinations` by keypress event.
:param key_event: event generated by X11.
:return: Action associated with keypress event.
"""
key = key_event.detail
modifiers = key_event.state
combination = keycode_mapping.get((key, modifiers))
if combination:
return combination.action
return None | /s3wm-0.2.3.tar.gz/s3wm-0.2.3/s3wm_core/keymap.py | 0.846546 | 0.172904 | keymap.py | pypi |
from typing import Any, Optional
from loguru import logger
from Xlib.error import XError
from Xlib.X import BadWindow, CurrentTime, RevertToParent
from Xlib.xobject.drawable import Window
from s3wm_core.s3screen import S3screen
from s3wm_core.utils import get_window_geometry
from s3wm_core.x_models import WindowGeometry, XWindowAttributes, XWMState
class S3window(object):
"""Main window abstraction for S3WM."""
def __init__(
self,
window: Window,
screen: S3screen,
parent: Optional[Window] = None,
):
self.parent = parent
self.window = window
self.screen = screen
@property
def id(self) -> int:
"""
X11 resource ID allocated for this window.
:return: unique window ID.
"""
return int(self.window.id)
@property
def is_root(self) -> bool:
"""
Property returns true if this window is root for display.
:return: boolean
"""
return bool(self.id == self.screen.root_window.id)
@property
def geom(self) -> Optional[WindowGeometry]:
"""
Get window geometry.
:return: Window geometry
"""
return get_window_geometry(self.window)
@property
def attributes(self) -> Optional[XWindowAttributes]:
"""
Return X11 window attributes.
:return: window attributes.
"""
try:
attrs = self.window.get_attributes()
return XWindowAttributes.from_orm(attrs)
except XError as err:
logger.debug(f"Can't get window attributes. Cause: {err}")
return None
@property
def wm_state(self) -> Optional[XWMState]:
"""
X11 Window startup state.
:return: current window wm_state.
"""
try:
wm_state = self.window.get_wm_state()
if not wm_state:
return None
except XError as err:
logger.debug(f"Can't get window state. Cause: {err}")
return None
return XWMState(wm_state.state)
@wm_state.setter
def wm_state(self, new_state: XWMState) -> None:
"""
Update wm_state.
:param new_state: new wm state.
"""
self.window.set_wm_state(icon=0, state=new_state.value)
def get_transient(self) -> Optional["S3window"]:
"""
Get transient window for current.
:return: transient window if any.
"""
try:
transient = self.window.get_wm_transient_for()
if not transient:
return None
return S3window(transient, self.screen)
except BadWindow as bwerr:
logger.debug(f"Can't get transient. Cause: {bwerr}")
return None
def map(self) -> None:
"""Maps window in X11."""
self.window.map()
def unmap(self) -> None:
"""Unmap window in X11."""
self.window.unmap()
def focus(self) -> None:
"""Set focus to window."""
self.window.set_input_focus(
RevertToParent,
CurrentTime,
)
def resize(self, width: int, height: int, percents: bool = False) -> None:
"""
Resize window.
If percents mode is on, width and height are treated as a percents,
relative to the current screen geometry.
:param width: new width
:param height: new height
:param percents: per cent mode is on or off.
"""
win_width = width
win_height = height
if percents:
screen_geom = self.screen.geom
win_width = (screen_geom.width * width) // 100
win_height = (screen_geom.height * height) // 100
self.window.configure(
width=win_width,
height=win_height,
)
def move(self, x: int, y: int) -> None: # noqa: WPS111
"""
Move window on the screen.
:param x: top left corner x coordinate.
:param y: top left corner y coordinate.
"""
self.window.configure(
x=x,
y=y,
)
def destroy(self) -> None:
"""Kill window from X11."""
self.window.destroy()
def __str__(self) -> str:
return f"<S3Window {self.id}>"
def __eq__(self, other: Any) -> bool:
if not isinstance(other, S3window):
return False
return self.id == other.id | /s3wm-0.2.3.tar.gz/s3wm-0.2.3/s3wm_core/s3window.py | 0.93556 | 0.151875 | s3window.py | pypi |
import re
import subprocess
from typing import Optional, Tuple
from loguru import logger
from Xlib.protocol.display import Screen
from Xlib.xobject.drawable import Window
from s3wm_core.x_models import WindowGeometry
def get_window_class(window: Window) -> str:
"""
Get window wm_class.
Fetch the WM_CLASS window property of the window WINDOW and return
the class part of the property. Return empty string if class is not
retrieved.
:param window: target window.
:returns: window wm_class string.
"""
try:
cmd, window_cls = window.get_wm_class()
except Exception as exc:
logger.error(f"Can't get window class. Cause: {exc}")
return ""
if window_cls is not None:
return str(window_cls)
return ""
def get_window_geometry(window: Window) -> Optional[WindowGeometry]:
"""
Obtain the geometry and attributes of the X11 window.
:param window: target window.
:returns: WindowGeomerty wrapper around X11 geometry type.
"""
try:
return WindowGeometry.from_orm(window.get_geometry())
except Exception as exc:
logger.exception(exc)
logger.error(f"Can't get window geometry. Cause: {exc}")
return None
def get_screen_size(screen: Screen) -> Tuple[int, int]:
"""
Get current screen size.
Return the dimension (WIDTH, HEIGHT) of the current screen as a
tuple in pixels. If xrandr command exists and either DP (DisplayPort)
or HDMI output is active, return its dimensions instead of the current X11 display
size.
:param screen: current screen.
:returns: Width and height of current screen.
"""
width, height = screen.width_in_pixels, screen.height_in_pixels
output = subprocess.getoutput("xrandr --current")
# pick the last line including DP- or HDMI-
match = re.search(r"(DP-?\d|HDMI-?\d) connected (\d+)x(\d+)", output)
if match:
width = int(match.group(2))
height = int(match.group(3))
logger.debug(f"get_screen_size -> w:{width} h:{height}")
return width, height
def get_usable_screen_size(screen: Screen) -> Tuple[int, int]:
"""
Get usable dimensions of the current screen.
Return the dimension (WIDTH, HEIGHT) of the usable screen are
(i.e., the area of the current screen excluding the are for displaying
status monitor using, for example, xpymon.
:param screen: current screen.
:returns: Width and height of current screen.
"""
width, height = get_screen_size(screen)
logger.debug(f"get_usable_screen_size -> w:{width} h:{height}")
return width, height | /s3wm-0.2.3.tar.gz/s3wm-0.2.3/s3wm_core/utils.py | 0.87938 | 0.269434 | utils.py | pypi |
import datetime
import s4.clarity.utils
import six
NUMERIC = "Numeric"
STRING = "String"
TEXT = "Text"
URI = "URI"
DATE = "Date"
DATETIME = "Datetime"
BOOLEAN = "Boolean"
ALL_TYPES = [NUMERIC, STRING, TEXT, URI, DATE, DATETIME, BOOLEAN]
def clarity_string_to_obj(typename, string):
"""
Convert field value string to Python object.
:type typename: str
:type string: str
:rtype: object
"""
if string is None:
return None
if typename in [STRING, TEXT, URI]:
return string
elif typename == NUMERIC:
return float(string)
elif typename == DATE:
return s4.clarity.utils.str_to_date(string)
elif typename == DATETIME:
return s4.clarity.utils.str_to_datetime(string)
elif typename == BOOLEAN:
return string == 'true'
else:
raise Exception("Unknown type '%s'" % typename)
def obj_to_clarity_string(obj):
"""
Convert Python object to field value string.
:rtype: str
"""
if obj is None:
return ""
elif isinstance(obj, six.string_types):
return obj
elif isinstance(obj, bool):
# Check bool before numerics, as bool is a subtype of int
return "true" if obj else "false"
elif isinstance(obj, (float, int)):
return repr(obj)
elif isinstance(obj, datetime.date):
return s4.clarity.utils.date_to_str(obj)
elif isinstance(obj, datetime.datetime):
return s4.clarity.utils.datetime_to_str(obj)
else:
raise Exception("Unknown object type '%s'" % type(obj).__name__)
def clarity_typename_to_python_typename(clarity_typename):
"""
Convert Clarity typename to the Python typename
:type clarity_typename: str
:rtype: str
"""
if clarity_typename == NUMERIC:
return "number"
elif clarity_typename in [STRING, TEXT, URI]:
return "str"
elif clarity_typename == DATE:
return "datetime.date"
elif clarity_typename == DATETIME:
return "datetime"
elif clarity_typename == BOOLEAN:
return "bool"
else:
raise Exception("Unknown clarity typename '%s'" % clarity_typename) | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/types.py | 0.57678 | 0.198142 | types.py | pypi |
from collections import defaultdict
from ._internal import ClarityElement
class IOMapsMixin(ClarityElement):
"""
Parse the StepDetails or Process object,
https://www.genologics.com/files/permanent/API/latest/rest.version.steps.limsid.details.html#GET
https://www.genologics.com/files/permanent/API/latest/rest.version.processes.html#GET
to prepare a list of inputs and outputs for each step/process.
:ivar list[IOMap] iomaps:
:ivar list[Artifact] inputs:
:ivar list[Artifact] outputs:
:ivar list[Artifact] shared_outputs:
:ivar dict[Artifact, list[Artifact]] input_keyed_lookup:
:ivar dict[Artifact, list[Artifact]] output_keyed_lookup:
"""
IOMAPS_XPATH = None
IOMAPS_OUTPUT_TYPE_ATTRIBUTE = None
def __init__(self, *args, **kwargs):
super(IOMapsMixin, self).__init__(*args, **kwargs)
self.xml_root # force population of xml_root, which will initialize lists
def _init_lists(self):
self.input_keyed_lookup = {}
self.output_keyed_lookup = defaultdict(list)
self.iomaps = []
shared_output_set = set()
io_map_nodes = self.xml_findall(self.IOMAPS_XPATH)
shared_result_file_type = self._get_iomaps_shared_result_file_type()
for io_map_node in io_map_nodes:
input_artifact, output_artifact, artifact_type, generation_type = self._get_node_artifacts(io_map_node)
# If we have not seen this input yet, store it to the input lookup dict.
# This step builds up our input artifact list and, if there are no per-artifact outputs
# this is the only place that inputs are recorded.
if input_artifact not in self.input_keyed_lookup:
self.input_keyed_lookup[input_artifact] = []
if output_artifact is not None:
# Remove all shared result files
if generation_type == "PerAllInputs" and artifact_type == shared_result_file_type:
shared_output_set.add(output_artifact)
else:
# Save the output to its input lookup
self.input_keyed_lookup[input_artifact].append(output_artifact)
# Save the input to the output's lookup
self.output_keyed_lookup[output_artifact].append(input_artifact)
# If any of the input lists have more than one item we are in a pooling step.
# There are no steps that will have multiple inputs AND multiple outputs.
is_pooling = any(len(inputs) > 1 for inputs in self.output_keyed_lookup.values())
if is_pooling:
# We are pooling so map multiple inputs to a single output
self.iomaps = [IOMap(input_artifacts, [output_artifact]) for output_artifact, input_artifacts in
self.output_keyed_lookup.items()]
else:
# Regular mapping, allow for one to one or replicate generation
self.iomaps = [IOMap([input_artifact], output_artifacts) for input_artifact, output_artifacts in
self.input_keyed_lookup.items()]
# Prepare our artifact lists
self.inputs = list(self.input_keyed_lookup)
self.outputs = list(self.output_keyed_lookup)
self.shared_outputs = list(shared_output_set)
def _get_iomaps_shared_result_file_type(self):
"""
Get the name of the shared result file type that is used in iomap output link nodes
:rtype: str
:return: the name
"""
raise Exception("Classes using the IOMapsMixin must override the _get_iomaps_shared_result_file_type method.")
def _get_node_artifacts(self, io_map_node):
"""
Returns the input and output artifacts for a io map node as well as the artifact type and generation_type
of the output
:return :type (Artifact, Artifact, String, String)
"""
# There will always be an input artifact
input_node = io_map_node.find('input')
input_artifact = self.lims.artifacts.from_link_node(input_node)
output_node = io_map_node.find('output')
if output_node is None:
return input_artifact, None, None, None
output_artifact = self.lims.artifacts.from_link_node(output_node)
artifact_type = output_node.get(self.IOMAPS_OUTPUT_TYPE_ATTRIBUTE)
generation_type = output_node.get("output-generation-type")
return input_artifact, output_artifact, artifact_type, generation_type
def iomaps_input_keyed(self):
"""
:return: a mapping of input -> outputs.
:rtype: dict[Artifact, list[Artifact]]
"""
return self.input_keyed_lookup
def iomaps_output_keyed(self):
"""
:return: a mapping of output -> inputs.
:rtype: dict[Artifact, list[Artifact]]
"""
return self.output_keyed_lookup
@property
def xml_root(self):
return super(IOMapsMixin, self).xml_root
@xml_root.setter
def xml_root(self, root_node):
super(IOMapsMixin,type(self)).xml_root.__set__(self, root_node)
if root_node is not None:
# initialize lists from new xml
self._init_lists()
class IOMap(object):
"""
:ivar inputs: list[Artifact]
:ivar outputs: list[Artifact]
"""
def __init__(self, inputs, outputs):
self.inputs = inputs
self.outputs = outputs
@property
def output(self):
"""
:type: Artifact
:raise Exception: If there are multiple output artifacts
"""
if len(self.outputs) > 1:
raise Exception("Too many outputs (%d) to get single output" % len(self.outputs))
return self.outputs[0]
@property
def input(self):
"""
:type: Artifact
:raise Exception: If there are multiple input artifacts
"""
if len(self.inputs) > 1:
raise Exception("Too many inputs (%d) to get single input" % len(self.inputs))
return self.inputs[0] | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/iomaps.py | 0.850189 | 0.362885 | iomaps.py | pypi |
from ._internal import WrappedXml, ClarityElement, FieldsMixin
from s4.clarity._internal.props import subnode_link, subnode_property, subnode_element
from s4.clarity import types, lazy_property
class ContainerDimension(WrappedXml):
is_alpha = subnode_property("is-alpha", types.BOOLEAN)
offset = subnode_property("offset", types.NUMERIC) # type: float
size = subnode_property("size", types.NUMERIC) # type: float
@lazy_property
def dimension_range(self):
"""
List of the labels for the given dimension
:return: list[int]|list[str]
"""
# Cast these to integers from floats to avoid deprecation warnings from range.
start = int(self.offset)
end = int(self.offset + self.size)
if not self.is_alpha:
return list(range(start, end))
else:
return list(map(chr, range(65 + start, 65 + end)))
def as_index(self, label):
if label.isdigit():
return int(label) - int(self.offset)
else:
return ord(label[0]) - 65 - int(self.offset)
def as_label(self, index):
if self.is_alpha:
return chr(65 + index + int(self.offset))
else:
return str(index + int(self.offset))
class ContainerType(ClarityElement):
"""
A class to handle container types, with helper functions to create and encode well positions
For the purposes of this class, the y-dimension is considered the columns, and the x-dimension is considered the rows.
"""
UNIVERSAL_TAG = "{http://genologics.com/ri/containertype}container-type"
is_tube = subnode_property("is-tube", types.BOOLEAN) # type: bool
x_dimension = subnode_element(ContainerDimension, "x-dimension") # type: ContainerDimension
y_dimension = subnode_element(ContainerDimension, "y-dimension") # type: ContainerDimension
def well_to_rc(self, well):
"""
Converts a Clarity well position to the zero based index of the row and column.
Example::
'B:4' -> (1, 3)
:param well: A Clarity formatted well position
:type well: str
:return: The zero based index of the row and the column.
:rtype: tuple[int]
"""
location_pieces = well.split(":")
return self.y_dimension.as_index(location_pieces[0]), self.x_dimension.as_index(location_pieces[1])
def rc_to_well(self, rc):
"""
Converts a zero based index of the row and column to a Clarity well position.
Example::
(1, 3) -> 'B:4'
:param rc: The zero based index of the row and the column.
:type rc: tuple[int]
:return: A Clarity formatted well position
:rtype: str
"""
return "%s:%s" % (self.y_dimension.as_label(rc[0]), self.x_dimension.as_label(rc[1]))
def row_major_order_wells(self):
"""
Returns wells in the container type in row major order.
This will return the wells ordered: ["y1:x1", "y1:x2", "y1:x3", [...], "y1,xn", "y2:x1", "y2:x2", [...]]
Unavailable wells are omitted.
:rtype: list[str]
"""
l = []
for y in self.y_dimension.dimension_range:
for x in self.x_dimension.dimension_range:
well_name = "%s:%s" % (str(y), str(x))
if well_name not in self.unavailable_wells:
l.append(well_name)
return l
def column_major_order_wells(self):
"""
Returns wells in the container type in column major order.
This will return the wells ordered: ["y1:x1", "y2:x1", "y3:x1", [...], "yn:x1", "y1:x2", "y2:x2", [...]]
Unavailable wells are omitted.
:rtype: list[str]
"""
l = []
for x in self.x_dimension.dimension_range:
for y in self.y_dimension.dimension_range:
well_name = "%s:%s" % (str(y), str(x))
if well_name not in self.unavailable_wells:
l.append(well_name)
return l
@lazy_property
def unavailable_wells(self):
"""
:type: set[str]
"""
unavailable_well_nodes = self.xml_findall("unavailable-well")
return set(node.text for node in unavailable_well_nodes)
@lazy_property
def total_capacity(self):
"""
:type: int
"""
return len(self.x_dimension.dimension_range) * len(self.y_dimension.dimension_range) - len(self.unavailable_wells)
def row_order_wells(self):
"""
:deprecated: use :class:`ContainerType.row_major_order_wells()` instead.
"""
return self.row_major_order_wells()
def column_order_wells(self):
"""
:deprecated: use :class:`ContainerType.column_major_order_wells()` instead.
"""
return self.column_major_order_wells()
def x_dimension_range(self):
"""
:deprecated: use :class:`ContainerType.x_dimension.dimension_range` instead
"""
return self.x_dimension.dimension_range
def y_dimension_range(self):
"""
:deprecated: use :class:`ContainerType.y_dimension.dimension_range` instead
"""
return self.y_dimension.dimension_range
class Container(FieldsMixin, ClarityElement):
UNIVERSAL_TAG = "{http://genologics.com/ri/container}container"
ATTACH_TO_NAME = "Container"
container_type = subnode_link(ContainerType, "type", attributes=('name', 'uri'))
occupied_wells = subnode_property("occupied-wells", typename=types.NUMERIC, readonly=True)
state = subnode_property("state", typename=types.STRING, readonly=True)
@property
def type_name(self):
"""
Read-only shortcut to containertype name, which we know without doing another GET.
:type: str
"""
typenode = self.xml_find('./type')
return typenode.get('name')
@property
def placements(self):
"""
Dict of string "Y:X" -> Artifacts.
:type: dict[str, Artifact]
"""
return self.xml_all_as_dict("placement",
lambda n: n.find("value").text, # key
lambda n: self.lims.artifacts.from_link_node(n) # value
)
def artifact_at(self, well):
"""
:param well: String matching "Y:X" where Y is a column index and X is a row index.
The string may use letters or numbers depending on the container type.
:type well: str
:rtype: Artifact or None
"""
try:
return self.placements[well]
except KeyError:
raise KeyError("Container '%s' has no artifact at '%s'." % (self.name, well)) | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/container.py | 0.841598 | 0.456046 | container.py | pypi |
import logging
from ._internal import FieldsMixin, ClarityElement
from ._internal.props import subnode_property, subnode_link
from s4.clarity.project import Project
from s4.clarity.artifact import Artifact
from s4.clarity import ETree
from s4.clarity import types
log = logging.getLogger(__name__)
class Sample(FieldsMixin, ClarityElement):
UNIVERSAL_TAG = "{http://genologics.com/ri/sample}sample"
# special tag used for creation posts
CREATION_TAG = "{http://genologics.com/ri/sample}samplecreation"
ATTACH_TO_NAME = "Sample"
date_received = subnode_property("date-received", types.DATE)
date_completed = subnode_property("date-completed", types.DATE)
project = subnode_link(Project, "project")
artifact = subnode_link(Artifact, "artifact")
@property
def is_control(self):
"""
:type: bool
"""
return self.xml_find('./control-type') is not None
def set_location(self, container, row, col):
"""
Sets this artifact's location (usually for sample creation) with
the given row and column, in the given container.
:param container: The Sample's container
:type container: s4.clarity.Container
:param row: The well position row.
:type row: str or int
:param col: The well position column
:type col: str or int
:deprecated: Use set_location_coords or set_location_well
"""
log.warning("Deprecated call: sample.set_location. Use set_location_coords or set_location_well.")
return self.set_location_coords(container, row, col)
def set_location_coords(self, container, row, col):
"""
Sets this artifact's location (usually for sample creation) with
the given row and column, in the given container.
Equivalent to set_location_well with the string "<row>:<col>".
:param container: The Sample's container
:type container: s4.clarity.Container
:param row: The well position row.
:type row: str or int
:param col: The well position column
:type col: str or int
"""
return self.set_location_well(container, '{0}:{1}'.format(row, col))
def set_location_well(self, container, well):
""""
Sets this artifact's location (usually for sample creation) with
the given well location, in the given container.
:param container: The Sample's container
:type container: s4.clarity.Container
:param well: The well position in the form "<row>:<col>"
:type well: str
"""
location_node = self.make_subelement_with_parents("./location")
ETree.SubElement(location_node, 'value').text = well
# attach container node, which must have the uri
ETree.SubElement(location_node, 'container', {'uri': container.uri}) | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/sample.py | 0.755547 | 0.156943 | sample.py | pypi |
import logging
from collections import defaultdict
from s4.clarity import ETree
log = logging.getLogger(__name__)
ACTION_ASSIGN = "assign"
ACTION_UNASSIGN = "unassign"
class Router(object):
"""
Class allowing routing of multiple artifacts to a given workflow or stage
"""
def __init__(self, lims):
self.lims = lims
self.routing_dict = defaultdict(lambda:defaultdict(set))
def clear(self):
"""
Clears the routing node and the routing dict.
"""
self.routing_dict = defaultdict(lambda:defaultdict(set))
def remove(self, artifact_or_artifacts):
"""
Remove given artifact or artifacts from the routing dict.
No error is raised if the artifact is not found.
"""
for artifact in self._normalize_as_list(artifact_or_artifacts):
for routes in self.routing_dict.values():
for artifact_set in routes.values():
artifact_set.discard(artifact)
def assign(self, workflow_or_stage_uri, artifact_or_artifacts):
"""
Stages an artifact or multiple artifacts to be assigned to a workflow_or_stage_uri.
:type workflow_or_stage_uri: str
:param workflow_or_stage_uri: The uri of either a workflow or workflow-stage. If a workflow uri is provided,
the artifacts will be queued to the first stage. Otherwise, they will be queued to the specific workflow-stage.
:type artifact_or_artifacts: s4.clarity.Artifact | list[s4.clarity.Artifact]
"""
self._update_routing_dict(ACTION_ASSIGN, workflow_or_stage_uri, artifact_or_artifacts)
def unassign(self, workflow_or_stage_uri, artifact_or_artifacts):
"""
Stages an artifact or multiple artifacts to be unassigned from a workflow_or_stage_uri.
:type workflow_or_stage_uri: str
:param workflow_or_stage_uri: The uri of either a workflow or workflow-stage. If a workflow uri is provided,
the artifacts will be removed from any stages of that workflow. Otherwise, they will be removed from the
specified workflow stage.
:type artifact_or_artifacts: s4.clarity.Artifact | list[s4.clarity.Artifact]
"""
self._update_routing_dict(ACTION_UNASSIGN, workflow_or_stage_uri, artifact_or_artifacts)
def _update_routing_dict(self, action, uri, artifact_or_artifacts):
"""
Stages an artifact or multiple artifacts to be assigned to /unassigned from a workflow_or_stage_uri.
:type action: str
:param action: either "assign" or "unassign"
:type uri: str
:param uri: The uri of either a workflow or workflow-stage.
"""
self.routing_dict[action][uri].update(
self._normalize_as_list(artifact_or_artifacts)
)
def commit(self):
"""
Generates the routing XML for workflow/stage assignment/unassignment and posts it.
"""
routing_node = self._create_routing_node()
self.lims.request("post", self.lims.root_uri + "/route/artifacts", routing_node)
def _create_routing_node(self):
"""
Generates the XML for workflow/stage assignment/unassignment
"""
routing_node = ETree.Element("{http://genologics.com/ri/routing}routing")
for action, routes in self.routing_dict.items():
for workflow_or_stage_uri, artifact_set in routes.items():
if artifact_set:
assign_node = self._add_action_subnode(routing_node, action, workflow_or_stage_uri)
# create an artifact assign node for each samples
for artifact in artifact_set:
ETree.SubElement(assign_node, "artifact", {"uri": artifact.uri})
return routing_node
def _add_action_subnode(self, routing_node, action, workflow_or_stage_uri):
"""
Generates a ElementTree.SubElement according to the action (assign / unassign) and the workflow/stage uri
"""
if '/stages/' in workflow_or_stage_uri:
assign_node = ETree.SubElement(routing_node, action, {'stage-uri': workflow_or_stage_uri})
else:
assign_node = ETree.SubElement(routing_node, action, {'workflow-uri': workflow_or_stage_uri})
return assign_node
def _normalize_as_list(self, artifact_or_artifacts):
"""
Returns a list of artifacts from one or many artifacts
"""
if not isinstance(artifact_or_artifacts, (list, set)):
artifacts = [artifact_or_artifacts]
else:
artifacts = artifact_or_artifacts
return artifacts | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/routing.py | 0.839339 | 0.286045 | routing.py | pypi |
import logging
log = logging.getLogger(__name__)
ROW_ORDER = "row"
COLUMN_ORDER = "column"
def column_order_sort_keys(artifact):
"""
Provide container position sort-keys for the sorted function based on column-major order
Usage example::
sorted_outputs = sorted(self.outputs, key=row_order_sort_keys)
:type artifact: s4.clarity.Artifact
:rtype: tuple(str|int, str|int)
"""
location_pieces = _split_location_pieces(artifact.location_value)
return location_pieces[1], location_pieces[0]
def row_order_sort_keys(artifact):
"""
Provide container position sort-keys for the sorted function based on row-major order
Usage example::
sorted_outputs = sorted(self.outputs, key=row_order_sort_keys)
:type artifact: s4.clarity.Artifact
:rtype: tuple(str|int, str|int)
"""
location_pieces = _split_location_pieces(artifact.location_value)
return location_pieces[0], location_pieces[1]
def _split_location_pieces(location_value):
location_pieces = location_value.split(":")
return [_parse_location_piece(piece) for piece in location_pieces]
def _parse_location_piece(location_piece):
if location_piece.isdigit():
return int(location_piece)
else:
return location_piece
def place_plate_to_plate_match_wells(step, input_container, output_container):
"""
Places samples in the input_container in the output_container at
the same well location.
Plates do not have to be the same dimensions, but artifacts placed
at invalid wells will not be accepted by Clarity.
To submit these changes you will need to call step.placements.post_and_parse() after.
:param step: The step that the placement is being done for
:param input_container: Container with artifacts to place in the output container
:param output_container: Container that will be populated with artifacts.
"""
for io_map in step.details.iomaps:
if io_map.input.container == input_container:
step.placements.create_placement(io_map.output, output_container, io_map.input.location_value)
def auto_place_artifacts(step, artifacts, order=ROW_ORDER):
"""
Places the artifacts provided, in the order provided, to selected_containers in the step.
:type step: Step
:type artifacts: list[Artifact]
:type order: str
"""
step.placements.clear_placements()
output_iterator = iter(artifacts)
number_outputs = len(artifacts)
number_placed_outputs = 0
# Note: This will not create new containers, only use the ones currently provided.
containers = step.placements.selected_containers
for container in containers:
if order == ROW_ORDER:
wells = container.container_type.row_major_order_wells()
elif order == COLUMN_ORDER:
wells = container.container_type.column_major_order_wells()
else:
raise Exception("Auto Place Error - Unrecognized order type '%s'" % order)
for well in wells:
output = next(output_iterator, None)
if output is None:
break
log.debug("Placing %s in well %s of container %s", output, well, container.name)
step.placements.create_placement(output, container, well)
number_placed_outputs += 1
# we're out of either outputs or wells before if below
if number_placed_outputs == number_outputs:
# Submit and return
step.placements.post_and_parse()
step.refresh()
return
# we're out of wells, continue to next container
# we're out of containers
raise Exception("Auto Place Error - Insufficient containers for artifacts.") | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/steputils/placement_utils.py | 0.821832 | 0.356783 | placement_utils.py | pypi |
import logging
log = logging.getLogger(__name__)
def copy_from_input_to_output(step, udf_names):
"""
Copies a set of UDFs from the inputs of a step to its outputs.
- Supply a list of UDFs if the source and destination names are the same.
- Supply a dictionary (source name to destination name) if they differ.
If the UDF is not present on the source, it is skipped.
Will throw an exception if there are more than one input per output.
:type step: s4.clarity.Step
:type udf_names: list[str]|dict[str:str]
"""
pairs = _keyed_io_maps_to_array(step.details.iomaps_input_keyed())
return _copy(pairs, udf_names)
def copy_from_output_to_input(step, udf_names):
"""
Copies a set of UDFs from the outputs of a step to its inputs, one to one.
- Supply a list of UDFs if the source and destination names are the same.
- Supply a dictionary (source name to destination name) if they differ.
If the UDF is not present on the source, it is skipped.
Will throw an exception if there are more than one input per output or more than one output per input.
:type step: s4.clarity.Step
:type udf_names: list[str]|dict[str:str]
"""
pairs = _keyed_io_maps_to_array(step.details.iomaps_output_keyed())
return _copy(pairs, udf_names)
def _keyed_io_maps_to_array(keyed_io_maps):
"""
Prepares an array of keyed artifact to value artifacts
:param dict[Artifact:list(Artifact)]: Artifact to list of Artifact dictionary
:return: list[(Artifact, Artifact)]
"""
pairs = []
for key_artifact in keyed_io_maps:
for related_artifact in keyed_io_maps[key_artifact]:
pairs.append((key_artifact, related_artifact))
return pairs
def copy_from_input_to_sample(step, udf_names):
"""
Copies a set of UDFs from the inputs of a step to each input's sample.
- Supply a list of UDFs if the source and destination names are the same.
- Supply a dictionary (source name to destination name) if they differ.
If the UDF is not present on the source, it is skipped.
:type step: s4.clarity.Step
:type udf_names: list[str]|dict[str:str]
"""
pairs = [
(artifact, artifact.sample) for artifact in step.details.inputs
]
return _copy(pairs, udf_names)
def copy_from_output_to_sample(step, udf_names):
"""
Copies a set of UDFs from the outputs of a step to each output's sample.
- Supply a list of UDFs if the source and destination names are the same.
- Supply a dictionary (source name to destination name) if they differ.
If the UDF is not present on the source, it is skipped.
:type step: s4.clarity.Step
:type udf_names: list[str]|dict[str:str]
"""
pairs = [
(artifact, artifact.sample) for artifact in step.details.outputs
]
return _copy(pairs, udf_names)
def copy_from_sample_to_input(step, udf_names):
"""
Copies a set of UDFs to the inputs of a step from each input's sample.
- Supply a list of UDFs if the source and destination names are the same.
- Supply a dictionary (source name to destination name) if they differ.
If the UDF is not present on the source, it is skipped.
:type step: s4.clarity.Step
:type udf_names: list[str]|dict[str:str]
"""
pairs = [
(artifact.sample, artifact) for artifact in step.details.inputs
]
return _copy(pairs, udf_names)
def copy_from_sample_to_output(step, udf_names):
"""
Copies a set of UDFs to the outputs of a step from each output's sample.
- Supply a list of UDFs if the source and destination names are the same.
- Supply a dictionary (source name to destination name) if they differ.
If the UDF is not present on the source, it is skipped.
:type step: s4.clarity.Step
:type udf_names: list[str]|dict[str:str]
"""
pairs = [
(artifact.sample, artifact) for artifact in step.details.outputs
]
return _copy(pairs, udf_names)
def _copy(pairs, udf_names):
"""
Does the copy operation.
:type pairs: list[(Artifact, Artifact)]
:type udf_names: list[str]|dict[str:str]
"""
for source, destination in pairs:
if type(udf_names) == dict:
for (source_udf, destination_udf) in udf_names.items():
if source_udf in source:
log.info("Copying UDF '%s' on %s to UDF '%s' on %s", source_udf, source, destination_udf, destination)
destination[destination_udf] = source[source_udf]
else:
for udf_name in udf_names:
if udf_name in source:
log.info("Copying UDF '%s' from %s to %s", udf_name, source, destination)
destination[udf_name] = source[udf_name] | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/steputils/copyudfs.py | 0.7586 | 0.547827 | copyudfs.py | pypi |
from __future__ import print_function
import logging
from collections import defaultdict
log = logging.getLogger(__name__)
def set_next_actions(epp, default_action=None, controls_action=None, failed_qc_action=None, action_func=None):
"""
:type epp: s4.clarity.scripts.StepEPP
:type action_func: (s4.clarity.Artifact) -> str
:param failed_qc_action: applied to any sample or control that has failed qc.
:param controls_action: applied to controls. if this is a qc step, only controls which have passed.
:param action_func: called with each artifact; must return an action (string).
if either failed_qc_action or controls_action are set, and also action_func is set, action_func will only
be called for artifacts which are not caught by those actions.
if action_func is None, or returns None for an artifact, the default is used.
:param default_action: if None, an appropriate action is calculated (e.g. next step, or complete protocol.)
"""
actions_list = epp.step.actions.next_actions
auto_default_action, next_step_uri = _default_action_and_uri_for_step(epp.step)
if default_action is None:
default_action = auto_default_action
log.debug("default action: %s" % default_action)
changed = False
need_full_artifact_fetch = action_func is not None or \
failed_qc_action is not None or \
controls_action is not None
if need_full_artifact_fetch:
artifact_uris = [action["artifact-uri"] for action in actions_list]
epp.step.lims.artifacts.batch_get(artifact_uris)
for action_for_artifact in actions_list:
artifact = epp.step.lims.artifact_from_uri(action_for_artifact["artifact-uri"])
if controls_action is not None and artifact.is_control:
new_action = controls_action
elif failed_qc_action is not None and artifact.qc_failed():
new_action = failed_qc_action
elif action_func is not None:
new_action = action_func(artifact) or default_action
else:
new_action = default_action
if new_action is None:
# only possible if default_action is None
log.info("SKIPPING %s: taking no action (leaving in step)", artifact)
continue
current_action = action_for_artifact.get("action")
if current_action is None or current_action != new_action:
action_for_artifact["action"] = new_action
if new_action == "nextstep":
action_for_artifact["step-uri"] = next_step_uri
elif "step-uri" in action_for_artifact:
del action_for_artifact["step-uri"]
log.info("CHANGED %s: %s" % (artifact, new_action))
changed = True
else:
log.info("UNCHANGED %s: %s" % (artifact, current_action))
if changed:
epp.step.actions.next_actions = actions_list
epp.step.actions.commit()
print("Next Actions Set Successfully")
def route_to_next_protocol(step, artifacts_to_route):
"""
Queues the given artifacts directly to the first step of the next protocol.
NOTE: Artifacts *must* be in-progress in the current step, or an exception will be thrown.
:type step: step.Step
:type artifacts_to_route: list[s4.clarity.Artifact]
"""
# figure out how many workflow stages need to be skipped
current_protocol_step_count = step.configuration.protocol.number_of_steps
protocol_step_index = step.configuration.protocol_step_index
steps_to_skip = int(current_protocol_step_count - protocol_step_index + 1)
stages_to_artifacts = get_current_workflow_stages(step, artifacts_to_route)
for current_stage, artifact_list in stages_to_artifacts.items():
new_stage_index = int(current_stage.index) + steps_to_skip
if new_stage_index > len(current_stage.workflow.stages):
# This is the last protocol, so don't route the artifact.
log.info("Artifacts with LIMS IDs \"%s\" are in the last protocol. Will not route." % ", ".join(artifact.limsid for artifact in artifact_list))
continue
new_stage_to_route_to = current_stage.workflow.stages[new_stage_index]
new_stage_to_route_to.enqueue(artifact_list)
def get_current_workflow_stages(step, artifacts):
"""
Given artifacts in a currently running step, finds their current workflow stages.
:returns: a dict mapping workflow stages to lists of artifacts which are currently in them.
:rtype: dict[Stage, list[Artifact]]
"""
iomaps_output_keyed = step.details.iomaps_output_keyed()
stage_to_artifacts = defaultdict(list)
for artifact in artifacts:
# Make sure to get the workflow stages from the input, as it may not be the artifact we're actually routing
inputs = iomaps_output_keyed.get(artifact)
if inputs:
workflow_stages = get_artifact_workflow_stages_for_current_step(step, inputs[0])
else:
workflow_stages = get_artifact_workflow_stages_for_current_step(step, artifact)
for workflow_stage in workflow_stages:
stage_to_artifacts[workflow_stage].append(artifact)
return stage_to_artifacts
def route_to_stage_by_name(step, artifacts_to_route, target_stage_name,
name_matches_base_name=lambda name, requested: name == requested):
"""
Queues the given artifacts to the first stage in the artifact's workflow with the given name.
NOTE: Artifacts *must* be in-progress in the current step, or an exception will be thrown.
Optionally takes a name comparison function to use. Defaults to exact name matching.
:type step: step.Step
:type artifacts_to_route: list[s4.clarity.Artifact]
:type target_stage_name: str
:type name_matches_base_name: (str, str) -> bool
"""
if len(artifacts_to_route) == 0:
return
stages_to_artifacts = get_current_workflow_stages(step, artifacts_to_route)
for current_stage, artifact_list in stages_to_artifacts.items():
found_stage = False
workflow = current_stage.workflow
for workflow_stage in workflow.stages:
if name_matches_base_name(workflow_stage.name, target_stage_name):
workflow_stage.enqueue(artifact_list)
found_stage = True
break
if not found_stage:
raise Exception("Unable to route artifacts (%s) to stage '%s' -- match not found in workflow." %
(artifact_list, target_stage_name))
def get_artifact_workflow_stages_for_current_step(step, artifact):
workflow_stages = [stage_history.stage
for stage_history in artifact.workflow_stages
if stage_history.status == "IN_PROGRESS"
and stage_history.stage.step.uri == step.configuration.uri]
if not workflow_stages:
# The artifact is not in progress at the current step, so we can't determine which stage to route to.
raise Exception("Can not retrieve an in-progress workflow stage for artifact '%s' in step '%s'." %
(artifact.name, step.name))
return workflow_stages
def _default_action_and_uri_for_step(step):
stepconf = step.configuration
transitions = stepconf.transitions
if transitions is None or len(transitions) == 0:
if stepconf.protocol_step_index == stepconf.protocol.number_of_steps:
action = "complete"
else:
action = "leave"
next_step_uri = None
else:
action = "nextstep"
next_step_uri = transitions[0]["next-step-uri"]
return action, next_step_uri | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/steputils/actions.py | 0.748812 | 0.192786 | actions.py | pypi |
import abc
import s4.clarity
from .genericscript import GenericScript
class DerivedSampleAutomation(GenericScript):
"""
A script run from the Project Dashboard screen.
:ivar LIMS lims: The Clarity object to perform operations against.
:ivar list[Artifact] artifacts: The list of Artifacts that the script applies to, loaded from the provided LIMS Ids.
:param map options: A map of the values of the supplied command line arguments. The default keys available are:
`username`, `password`, `api_uri`, and `derived_sample_ids`.
*Usage:*
Implement process_derived_samples(), which must return a string to display success status to the user.
Optionally:
add_arguments(argparser) # To add more arguments. Don't forget to call super.
Add to the end of your file:
if __name__ == '__main__':
YourSubClass.main()
Example Clarity automation string. Contains an example of additional user input that would require an override of
add_arguments to add the -x arg. *Note* that all userinput args are strings:
``python <script_name>.py -u {username} -p {password} -a '{baseURI}v2' -d {derivedSampleLuids} -x {userinput:input_x}``
"""
__metaclass__ = abc.ABCMeta
def __init__(self, options):
super(DerivedSampleAutomation, self).__init__(options)
self.lims = s4.clarity.LIMS(options.api_uri, options.username, options.password, options.dry_run)
self.artifacts = self.lims.artifacts.batch_get_from_limsids(options.derived_sample_ids)
@classmethod
def add_arguments(cls, argparser):
super(DerivedSampleAutomation, cls).add_arguments(argparser)
argparser.add_argument(
'-u', '--username', type=str, help='Clarity LIMS username', required=True
)
argparser.add_argument(
'-p', '--password', type=str, help='Clarity LIMS password', required=True
)
argparser.add_argument(
'-a', '--api-uri', type=str, help='URI of Clarity LIMS (ending in /api/v2)', required=True
)
argparser.add_argument(
'-d', '--derived-sample-ids', type=str, nargs='+', help='LIMS IDs of derived samples (artifacts)'
)
def run(self):
success_message = self.process_derived_samples()
if not success_message:
raise Exception("The process_derived_samples method must return a string message to use to report success.")
self.final_summary = success_message
@abc.abstractmethod
def process_derived_samples(self):
"""
Implement this to perform the work required. Method *must* return a summary success string, as it's used to
display the user-facing message on script completion.
:return: Message to report success to the user
:rtype: str
:raise: Exception to report failure
"""
raise NotImplementedError("Call to abstract method '%s'." % repr(classmethod)) | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/scripts/derived_sample_automation.py | 0.795261 | 0.294171 | derived_sample_automation.py | pypi |
from .stepepp import StepEPP
import logging
import re
log = logging.getLogger(__name__)
class TriggeredStepEPP(StepEPP):
""" TriggeredStepEPP acts as an EPP with multiple entry points, to allow the developer to group all scripts
associated with a step together in a single file. A script implementing this class is intended to be called
multiple times in the same step at different stages, with the Action parameter determining which method is called.
Choices for the Action parameter are automatically generated from all class methods starting with ``on_``. The
Action value is generated by taking the method name, trimming the ``on_``, and transforming the rest of the name to
Pascal-case. For example, an EPP String containing ``-a TestThing`` would attempt to execute a method named
``on_test_thing``.
Usage:
``python <script.py> -u {username} -p {password} --step-uri {stepURI:v2} -l {compoundOutputFileLuid#} -a <Action>``
Our suggested implementation when creating method names is to mirror Clarity's language for scripts triggered
on step transitions, and having button-triggered scripts follow the button label, as shown here:
==================== ===============
Action Parameter EPP Method Name
==================== ===============
BeginningOfStep on_beginning_of_step
EndOfStep on_end_of_step
PlacementEnter on_placement_enter
PlacementExit on_placement_exit
PoolingEnter on_pooling_enter
PoolingExit on_pooling_exit
AddReagentsEnter on_add_reagents_enter
AddReagentsExit on_add_reagents_exit
RecordDetailsEnter on_record_details_enter
RecordDetailsExit on_record_details_exit
CalculateQc on_calculate_qc (Record Details buttons example)
==================== ===============
Ultimately, though, as long as the ``on_`` rules for method naming is followed, the pattern used in your
implementation is up to you.
"""
triggered_step_actions = {}
@classmethod
def add_triggered_step_actions(cls):
action_handler_names = [method for method in dir(cls) if method.startswith('on_')]
for action_handler_name in action_handler_names:
cls.triggered_step_actions[cls._generate_action(action_handler_name)] = getattr(cls, action_handler_name)
@classmethod
def _generate_action(cls, action_handler_name):
# Converts snake-case to camel-case, and remove the ``on`` from the front
# eg. ``on_record_details_enter`` would be transformed to ``RecordDetailsEnter``
return re.sub('_.', lambda x: x.group()[1].upper(), action_handler_name).lstrip('on')
@classmethod
def add_arguments(cls, argparser):
super(TriggeredStepEPP, cls).add_arguments(argparser)
cls.add_triggered_step_actions()
argparser.add_argument("-a", "--action", choices=list(cls.triggered_step_actions), required=True)
def run(self):
log.info("Handling Action '%s'" % self.options.action)
action_arg = self.triggered_step_actions.get(self.options.action)
if action_arg:
action_arg(self) | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/scripts/triggered_step_epp.py | 0.647018 | 0.317506 | triggered_step_epp.py | pypi |
from collections import defaultdict
def get_parent_artifacts(lims, artifacts):
"""
Helper method to get the parent artifacts keyed to the supplied artifacts
:param LIMS lims:
:param list[Artifact] artifacts: The artifacts to get parent artifacts for
:rtype: dict[Artifact, list[Artifact]]
"""
artifact_to_parent_artifacts = defaultdict(list)
artifacts_to_batch_fetch = []
for artifact in artifacts:
if artifact.parent_step:
# Ugly list comprehension that covers pooled inputs and replicates
artifact_to_parent_artifacts[artifact] = [input_artifact for iomap in artifact.parent_step.details.iomaps
for input_artifact in iomap.inputs
if any(output.limsid == artifact.limsid for output in iomap.outputs)]
artifacts_to_batch_fetch += artifact_to_parent_artifacts[artifact]
else:
# Without a parent_step, we've reached the end of the artifact history
artifact_to_parent_artifacts[artifact] = []
if artifact_to_parent_artifacts:
lims.artifacts.batch_fetch(set(artifacts_to_batch_fetch))
return artifact_to_parent_artifacts
def get_udfs_from_artifacts_or_ancestors(lims, artifacts_to_get_udf_from, required_udfs=None, optional_udfs=None):
"""
Walks the genealogy for each artifact in the artifacts_to_get_udf_from list and gets the value for udf_name from the
supplied artifact, or its first available ancestor that has a value for the UDF.
NOTE: The method will stop the search upon reaching any pooling step.
:param LIMS lims:
:param list[Artifact] artifacts_to_get_udf_from: the list of artifacts whose ancestors should be inspected for the udf. Passed
down recursively until all artifacts have been satisfied.
:param list[str] required_udfs: The list of UDFs that *must* be found. Exception will be raised otherwise.
:param list[str] optional_udfs: The list of UDFs that *can* be found, but do not need to be.
:rtype: dict[s4.clarity.Artifact, dict[str, str]]
:raises UserMessageException: if values can not be retrieved for all required_udfs for all of the provided artifacts
"""
if not required_udfs and not optional_udfs:
raise Exception("The get_udfs_from_artifacts_or_ancestors method must be called with at least one "
"of the required_udfs or optional_udfs parameters.")
required_udfs = required_udfs or []
optional_udfs = optional_udfs or []
# Assemble the dictionaries for the internal methods
ancestor_artifact_to_original_artifact = {}
original_artifact_to_udfs = {}
for artifact in artifacts_to_get_udf_from:
ancestor_artifact_to_original_artifact[artifact] = [artifact]
original_artifact_to_udfs[artifact] = {}
for name in (required_udfs + optional_udfs):
original_artifact_to_udfs[artifact][name] = artifact.get(name, None)
artifacts_to_udfs = _get_udfs_from_ancestors_internal(
lims, ancestor_artifact_to_original_artifact, original_artifact_to_udfs)
if required_udfs:
_validate_required_ancestor_udfs(artifacts_to_udfs, required_udfs)
return artifacts_to_udfs
def _validate_required_ancestor_udfs(artifacts_to_udfs, required_udfs):
"""
Validates that all items in the artifacts_to_udfs dict have values for the required_udfs
:type artifacts_to_udfs: dict[s4.clarity.Artifact, dict[str, str]]
:type required_udfs: list[str]
:raises UserMessageException: if any artifact is missing any of the required_udfs
"""
artifacts_missing_udfs = set()
missing_udfs = set()
for artifact, udf_name_to_value in artifacts_to_udfs.items():
for required_udf in required_udfs:
if udf_name_to_value.get(required_udf) in ["", None]:
artifacts_missing_udfs.add(artifact.name)
missing_udfs.add(required_udf)
if artifacts_missing_udfs:
raise Exception("Could not get required values for udf(s) '%s' from ancestors of artifact(s) '%s'." %
("', '".join(missing_udfs), "', '".join(artifacts_missing_udfs)))
def _get_udfs_from_ancestors_internal(lims, current_artifacts_to_original_artifacts, original_artifacts_to_udfs):
"""
Recursive method that gets parent artifacts, and searches them for any udfs that have not yet been filled in
:type lims: s4.clarity.LIMS
:type current_artifacts_to_original_artifacts: dict[s4.clarity.Artifact: list[s4.clarity.Artifact]]
:param current_artifacts_to_original_artifacts: dict of the currently inspected artifact to the original artifact.
:type original_artifacts_to_udfs: dict[s4.clarity.Artifact, dict[str, str]]
:param original_artifacts_to_udfs: dict of the original artifacts to their ancestors' UDF values, which will
get filled in over the recursive calls of this method.
:rtype: dict[s4.clarity.Artifact, dict[str, Any]]
"""
current_artifacts = list(current_artifacts_to_original_artifacts)
current_artifacts_to_parent_artifacts = get_parent_artifacts(lims, list(current_artifacts_to_original_artifacts))
# Initialize the 'next to search' dict
next_search_artifacts_to_original_artifacts = defaultdict(list)
for current_artifact in current_artifacts:
if not current_artifacts_to_parent_artifacts[current_artifact]:
# The end of the genealogy has been reached for this artifact
continue
if current_artifact.parent_step.pooling is not None:
# Stop looking when we reach a step with pooled inputs, as ancestor artifacts would likely contain multiple
# values for the UDFs in question
continue
# Can now get a single parent artifact with confidence, as validated it
current_artifact_parent = current_artifacts_to_parent_artifacts[current_artifact][0]
for original_artifact in current_artifacts_to_original_artifacts[current_artifact]:
continue_searching = False
for udf_name, udf_value in original_artifacts_to_udfs[original_artifact].items():
# Don't overwrite values that have already been found
if udf_value is not None:
continue
found_value = current_artifact_parent.get(udf_name, None)
if found_value is None:
continue_searching = True
continue
original_artifacts_to_udfs[original_artifact][udf_name] = found_value
if continue_searching:
next_search_artifacts_to_original_artifacts[current_artifact_parent].append(original_artifact)
if next_search_artifacts_to_original_artifacts:
return _get_udfs_from_ancestors_internal(lims, next_search_artifacts_to_original_artifacts, original_artifacts_to_udfs)
return original_artifacts_to_udfs | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/utils/artifact_ancestry.py | 0.897316 | 0.425665 | artifact_ancestry.py | pypi |
from s4.clarity import ETree
import logging
import inspect
from six import string_types
from abc import ABCMeta, abstractmethod
try:
from collections.abc import MutableSequence, MutableMapping
except ImportError:
from collections import MutableSequence, MutableMapping
from s4.clarity import types
log = logging.getLogger(__name__)
class _clarity_property(object):
"""
Abstract class to hold code common to each type of property.
Inheritors must call _ensure_settable in __set__.
"""
__metaclass__ = ABCMeta
def __init__(self, property_name, readonly=False, property_type_str=None):
"""
:type property_name: str
:type readonly: bool
"""
self.property_name = property_name
self.readonly = readonly
self.__doc__ = "The value of the XML property '%s'" % property_name
if property_type_str:
self.__doc__ += "\n\n:type: %s" % property_type_str
self.__name__ = property_name
self.__module__ = _prop_defining_module()
@abstractmethod
def __get__(self, instance, owner):
"""
Returns the value of this property.
:param instance: Instance is the instance that the attribute was accessed through that provides the backing xml data.
:param owner: The class of the owning object.
"""
return None
def __set__(self, instance, value):
"""
:param instance: Instance is the instance that the attribute was accessed through that provides the backing xml data.
:param value: The value to set.
"""
# this does nothing, but in the case of an always-readonly class, we call this to
# ensure we are properly raising an error.
self._ensure_settable(instance)
def _ensure_settable(self, instance):
if self.readonly:
raise AttributeError("%s.%s is a read-only property." % (instance, self.property_name))
class attribute_property(_clarity_property):
"""
Creates a property that is backed against a xml attribute on the root element of a WrappedXml object.
ex:
<root_node demo_attribute='demo value' />
class RootElementWrapper(WrappedXml):
demo_attribute = attribute_property("demo_attribute")
root.demo_attribute == 'demo_value'
"""
def __init__(self, property_name, typename=types.STRING, readonly=False):
"""
:type typename: str
:param typename: one of s4.clarity.types, default STRING.
"""
prop_typename = types.clarity_typename_to_python_typename(typename)
super(attribute_property, self).__init__(property_name, readonly, prop_typename)
self.typename = typename
def __get__(self, instance, owner):
"""
:type instance: s4.clarity._internal.element.WrappedXml
"""
string_value = instance.xml_root.attrib.get(self.property_name)
return types.clarity_string_to_obj(self.typename, string_value)
def __set__(self, instance, value):
"""
Setting the value to None will cause the attribute to be deleted.
:param instance:
:type instance: s4.clarity._internal.element.WrappedXml
:param value:
"""
self._ensure_settable(instance)
if value is None:
instance.xml_root.attrib.pop(self.property_name, None)
else:
string_value = types.obj_to_clarity_string(value)
instance.xml_root.set(self.property_name, string_value)
class subnode_property(_clarity_property):
"""
Creates a property with a simple value that is backed against a xml sub-element owned by the root node.
ex:
<root_element>
<sub_element>Sub Element Value</sub_element>
</root_element>
class RootElementWrapper(WrappedXml):
sub_element = subnode_property("sub_element")
root.sub_element == 'Sub Element Value'
"""
def __init__(self, property_name, typename=types.STRING, readonly=False):
"""
:param property_name: The name of the xml element to use as a backing value.
:type property_name: str
:param typename: one of s4.clarity.types, default STRING
:type typename: str
:param readonly: When set to True an AttributeError will be thrown if this property is written to.
:type readonly: bool
"""
prop_typename = types.clarity_typename_to_python_typename(typename)
super(subnode_property, self).__init__(property_name, readonly, prop_typename)
self.typename = typename
def __get__(self, instance, owner):
"""
:param instance: The instance that the attribute was accessed through, or None when the attribute is accessed through the owner.
:type instance: s4.clarity._internal.element.WrappedXml
:param owner: The owner of the class
:type owner: Type
"""
string_value = instance.get_node_text(self.property_name)
return types.clarity_string_to_obj(self.typename, string_value)
def __set__(self, instance, value):
"""
Setting the value to None will cause the subnode (self.prop_name) to be deleted.
:type instance: s4.clarity._internal.element.WrappedXml
:raises AttributeError: If this is a read-only property.
"""
self._ensure_settable(instance)
if value is None:
instance.remove_subnode(self.property_name)
else:
string_value = types.obj_to_clarity_string(value)
instance.set_subnode_text(self.property_name, string_value)
class subnode_link(_clarity_property):
"""
Creates a property that is backed by a subnode link data structure, which is a
node with the 'limsid' and 'uri' attributes. This property will return a ClarityElement
for the data structure backed against the data from the uri.
ex:
<root_node>
<sample_node limsid='1234' uri='https://qalocal/api/v2/samples/1234' />
</root_node>
class RootNodeWrapper(WrappedXml):
sample = subnode_link(Sample, "sample_node")
root.sample == <Sample limsid='1234'>
"""
def __init__(self, element_type, property_name, readonly=False, attributes=('limsid', 'uri')):
"""
:type element_type: Type[s4.clarity._internal.element.ClarityElement]
:type property_name: str
:type attributes: tuple[str]
:param readonly: When set to True an AttributeError will be thrown if this property is written to.
:type readonly: bool
"""
super(subnode_link, self).__init__(property_name, readonly)
self.element_type = element_type
self.link_attributes = attributes
self.__doc__ = """The linked `{0}` from the '{1}' subnode
:type: {0}""".format(element_type.__name__, property_name)
def __get__(self, instance, owner):
"""
:type instance: s4.clarity._internal.element.WrappedXml
:rtype: s4.clarity._internal.element.ClarityElement
"""
return instance.lims.factory_for(self.element_type).from_link_node(instance.xml_find("./" + self.property_name))
def __set__(self, instance, value):
"""
:type instance: s4.clarity._internal.element.WrappedXml
:type value: s4.clarity._internal.element.ClarityElement
"""
self._ensure_settable(instance)
# a link is of the form:
# <project limsid="SWI1" uri="https://qalocal/api/v2/projects/SWI1"/>
node = instance.get_or_create_subnode(self.property_name)
attribs = {}
for attrname in self.link_attributes:
if hasattr(value, attrname):
attrvalue = getattr(value, attrname)
if attrvalue is not None:
attribs[attrname] = attrvalue
if node is None:
ETree.SubElement(instance.xml_root, self.property_name, attribs)
else:
for k, v in attribs.items():
node.set(k, v)
class subnode_links(_clarity_property):
"""
Creates a property that backs against a number of elements with the subnode link data structure,
which is a node with the 'limsid' and 'uri' attributes. This node will return a list of ClarityElements
similar to the output of subnode_link.
ex:
<root_node>
<link_node limsid='1234' uri='https://qalocal/api/v2/samples/1234' />
<link_node limsid='1235' uri='https://qalocal/api/v2/samples/1235' />
<link_node limsid='1236' uri='https://qalocal/api/v2/samples/1236' />
</root_node>
class RootElementWrapper(WrappedXml):
sub_element = subnode_link(Sample, "link_node")
root.sub_element == [<Sample limsid='1234'>, <Sample limsid='1235'>, <Sample limsid='1236'>]
"""
def __init__(self, element_type, property_name, container_name=None):
"""
:type element_type: Type[s4.clarity._internal.element.ClarityElement]
:type property_name: str
:type container_name: str
"""
super(subnode_links, self).__init__(property_name, readonly=True)
self.element_type = element_type
if container_name is None:
self.link_path = "./{0}".format(property_name)
else:
self.link_path = "./{0}/{1}".format(container_name, property_name)
self.__doc__ = """The linked `{0}` objects from the '{1}' subnodes
:type: list[{0}]""".format(element_type.__name__, property_name)
def __get__(self, instance, owner):
"""
:type instance: s4.clarity._internal.element.WrappedXml
:rtype: s4.clarity._internal.element.ClarityElement
"""
return instance.lims.factory_for(self.element_type).from_link_nodes(instance.xml_findall(self.link_path))
class subnode_element(_clarity_property):
"""
Creates a property that backs against a sub node which contains data structure that can be represented by
a object derived from WrappedXml.
ex:
<root_node>
<sub_node>
<node_one>value 1</node_one>
<node_two>value 2</node_two>
</sub_node>
</root_node>
class SubNode(WrappedXml):
node_one = subnode_property("node_one")
node_two = subnode_property("node_two")
class RootNodeWrapper(WrappedXml):
sub_element = subnode_element(SubNode, "sub_node")
root.sub_element.node_one == "value 1"
"""
def __init__(self, element_class, property_name, readonly=False):
"""
:type element_class: Type[s4.clarity._internal.element.WrappedXml]
:type property_name: str
:param readonly: When set to True an AttributeError will be thrown if this property is written to.
:type readonly: bool
"""
super(subnode_element, self).__init__(property_name, readonly)
self.element_class = element_class
self.__doc__ = """The element `{0}` from subnode '{1}'
:type: {0}""".format(element_class.__name__, property_name)
def __get__(self, instance, owner):
"""
:type instance: s4.clarity._internal.element.WrappedXml
:rtype: s4.clarity._internal.element.WrappedXml
"""
# ToDo: This sould be cached so that we return the same object each time otherwise root.sub_element != root.sub_element
return self.element_class(instance.lims, instance.get_or_create_subnode(self.property_name))
def __set__(self, instance, value):
"""
:type instance: s4.clarity._internal.element.WrappedXml
:type value: s4.clarity._internal.element.WrappedXml
"""
self._ensure_settable(instance)
node = instance.xml_find(self.property_name)
if node:
instance.xml_root.remove(node)
instance.xml_root.append(value.xml_root)
class subnode_element_list(_clarity_property):
"""
Creates a property that backs onto a list of subnodes which contains data structure that can be represented by
a object derived from WrappedXml.
ex:
<root_node>
<sub_nodes>
<sub_node>
<node_one>value 1</node_one>
<node_two>value 2</node_two>
</sub_node>
<sub_node>
<node_one>value 3</node_one>
<node_two>value 4</node_two>
</sub_node>
<sub_node>
<node_one>value 5</node_one>
<node_two>value 6</node_two>
</sub_node>
</sub_nodes>
</root_node>
class SubNode(WrappedXml):
node_one = subnode_property("node_one")
node_two = subnode_property("node_two")
class RootElementWrapper(WrappedXml):
sub_element = subnode_element(SubNode, "sub_nodes", "sub_node")
root.sub_element == [<SubNode>, <SubNode>, <SubNode>]
root.sub_element[0].node_one == "value 1"
root.sub_element[2].node_two == "value 6"
"""
def __init__(self, element_class, property_name, list_item_property_name, readonly=False):
"""
:param element_class: The class
:type element_class: Type[s4.clarity._internal.element.WrappedXml]
:param property_name: The name of the xml element that contains the sub items to be used as backing values for this list.
:type property_name: str
:param list_item_property_name: The name of the xml elements that will be used as backing values for indivdual items in this list.
:type list_item_property_name: str
:param readonly: When set to True an AttributeError will be thrown if this property is written to.
:type readonly: bool
"""
super(subnode_element_list, self).__init__(property_name, readonly)
self.element_class = element_class
self.list_item_property_name = list_item_property_name
self.__doc__ = """`{0}` items from the '{1}' subnodes
:type: list[{0}]""".format(element_class.__name__, list_item_property_name)
def __get__(self, instance, owner):
"""
:param instance: The owning object.
:type instance: s4.clarity._internal.element.WrappedXml
:rtype: list[s4.clarity._internal.element.WrappedXml]
"""
return _ClarityWrappedXmlList(self.element_class, instance, self.property_name, self.list_item_property_name, self.readonly)
def __set__(self, instance, value):
"""
:type instance: s4.clarity._internal.element.WrappedXml
:type value: list[s4.clarity._internal.element.WrappedXml]
"""
self._ensure_settable(instance)
nodes = instance.xml_findall(self.property_name)
if nodes:
[instance.xml_root.remove(node) for node in nodes]
[instance.xml_root.append(item.xml_root) for item in value]
class _ClarityWrappedXmlList(MutableSequence):
def __init__(self, element_class, node_instance, property_name, list_item_property_name, readonly=False):
"""
Represents a list of sub nodes as an object list.
:param element_class: The class used to represent nodes as objects.
:type element_class: WrappedXml derived class
:param node_instance: The owning WrappedXml object that contains the backing xml.
:type node_instance: WrappedXml derived
:param property_name: The name of the xml element that contains the list of nodes.
:type property_name: str
:param list_item_property_name: The name of the xml element that represents the nodes.
:type list_item_property_name: str
:param readonly: When set to True an AttributeError will be thrown if this property is written to.
:type readonly: bool
"""
self._element_class = element_class
self._parent_element = node_instance
self._property_name = property_name
self._list_property_name = list_item_property_name
self._read_only = readonly
self._inner_list = self._get_list_nodes()
def _get_list_nodes(self):
root_element = self._get_list_root_element()
if root_element is None:
return []
sub_nodes = root_element.findall(self._list_property_name)
return [self._element_class(self._parent_element.lims, node) for node in sub_nodes]
def _ensure_settable(self):
if self._read_only:
raise AttributeError("%s.%s is a read-only property." % (self._parent_element, self._list_property_name))
def __len__(self):
return len(self._inner_list)
def __delitem__(self, index):
# Verify we can modify this structure
self._ensure_settable()
# Remove the element from xml
element = self._inner_list[index]
root_element = self._get_list_root_element()
if root_element is not None:
root_element.remove(element.xml_root)
# Remove the element from the in memory list
del self._inner_list[index]
def insert(self, index, value):
# Verify we can modify this structure
self._ensure_settable()
# Add the item to be a child of the list root element
root_element = self._get_or_create_list_root_element()
root_element.insert(index, value.xml_root)
# Insert into the in memory list
self._inner_list.insert(index, value)
def __setitem__(self, index, value):
# Verify we can modify this structure
self._ensure_settable()
# Update the backing xml
root_element = self._get_or_create_list_root_element()
root_element[index] = value.xml_root
# Modify the in memory list
self._inner_list.__setitem__(index, value)
def __getitem__(self, index):
return self._inner_list.__getitem__(index)
def _get_list_root_element(self):
"""
Shortcut to the wrapper element for the list.
:return: The Etree element that is described in the _property_name xpath, or None
:rtype: ETree.Element
"""
return self._parent_element.xml_find(self._property_name)
def _get_or_create_list_root_element(self):
"""
Shortcut to the wrapper element for the list. If the element does not exist it will
be created
:return: The Etree element that is described in the _property_name xpath.
:rtype: ETree.Element
"""
return self._parent_element.get_or_create_subnode(self._property_name)
class _ClarityLiteralDict(MutableMapping):
"""
:type top_node: ETree.Element
"""
def __init__(self, top_node, subnode_name, name_attribute, value_attribute):
self.top_node = top_node
self.subnode_name = subnode_name
self.name_attribute = name_attribute
self.value_attribute = value_attribute
def __iter__(self):
# Return an iterator of all keys, like a dict
all_keys = map(self._get_node_key, self._get_all_nodes())
return iter(all_keys)
def __len__(self):
return len(self._get_all_nodes())
def __delitem__(self, key):
node = self._node_for(key)
if node is None:
raise KeyError
self.top_node.remove(node)
def __setitem__(self, key, value):
node = self._node_for(key)
if node is None:
node = ETree.SubElement(self.top_node, self.subnode_name, {self.name_attribute: key})
node.set(self.value_attribute, value)
def __getitem__(self, key):
node = self._node_for(key)
if node is None:
raise KeyError
return node.get(self.value_attribute)
def _node_for(self, key):
nodes = self._get_all_nodes()
for n in nodes:
if self._get_node_key(n) == key:
return n
return None
def _get_node_key(self, node):
return node.get(self.name_attribute)
def _get_all_nodes(self):
return self.top_node.findall('./' + self.subnode_name)
class subnode_property_literal_dict(subnode_property):
"""
If there is a dictionary represented in xml with key/value subnodes this will turn it into a dictionary.
"""
def __init__(self, prop_name, subprop_name, name_attribute='name', value_attribute='value', readonly=False):
"""
:param prop_name:
:param subprop_name:
:param name_attribute:
:param value_attribute:
:param readonly: When set to True an AttributeError will be thrown if this property is written to.
:type readonly: bool
"""
super(subnode_property_literal_dict, self).__init__(prop_name, readonly=readonly)
self.subprop_name = subprop_name
self.name_attribute = name_attribute
self.value_attribute = value_attribute
self._dict = None
def _get_or_make_dict(self, instance):
node = instance.xml_find('./' + self.property_name)
# we check node in case we've gotten a new xml tree and still have the old dict.
if self._dict is not None and self._dict.top_node == node:
return self._dict
if node is None:
node = ETree.SubElement(instance.xml_root, self.property_name)
return _ClarityLiteralDict(node, self.subprop_name, self.name_attribute, self.value_attribute)
def __get__(self, instance, owner):
"""
:type instance: s4.clarity._internal.element.ClarityElement
:rtype: dict
"""
return self._get_or_make_dict(instance)
def __set__(self, instance, new_dict):
"""
:type instance: s4.clarity._internal.element.ClarityElement
:type new_dict: dict
"""
self._ensure_settable(instance)
# this ought to work with .update,
our_dict = self._get_or_make_dict(instance)
our_dict.update(new_dict)
class subnode_property_dict(subnode_property):
"""
Takes a subnode that contains a dictionary, searching by property name.
The content of the element is turned into the dictionary.
"""
def __init__(self, property_name, as_attributes=(), readonly=False):
"""
:param property_name:
:param as_attributes:
:param readonly: When set to True an AttributeError will be thrown if this property is written to.
:type readonly: bool
"""
super(subnode_property_dict, self).__init__(property_name, readonly=readonly)
self.as_attributes = as_attributes
def __get__(self, instance, owner):
"""
:type instance: s4.clarity._internal.element.ClarityElement
:rtype: dict
"""
node = instance.xml_find('./' + self.property_name)
return self._node_into_dict(node)
def _node_into_dict(self, node):
if node.text is None or node.text.strip() == "":
d = dict()
for sub in node:
key = sub.tag
value = self._node_into_dict(sub)
if key in d:
value_already = d[key]
if type(value_already) == list:
value_already.append(value)
else:
d[key] = [value_already, value]
else:
d[key] = value
for attrname in self.as_attributes:
key = attrname
value = node.get(key)
if value is not None:
d[key] = value
return d
else:
return node.text
@staticmethod
def _value_to_string(value):
if value is None:
raise Exception("Can't serialize None as XML value")
elif type(value) in (string_types, int):
# simple types which we know are ok
return str(value)
elif type(value) == bool:
return "true" if value else "false"
else:
log.warning("stringifying value %r for XML: %s", value, str(value))
return str(value)
def _dict_into_node(self, instance, value, parent, name, node=None):
if node is None:
node = parent.find('./' + name) or ETree.SubElement(parent, name)
if type(value) == dict:
for k, v in value.items():
if k in self.as_attributes:
node.set(k, self._value_to_string(v))
else:
self._dict_into_node(instance, v, node, k)
elif type(value) == list:
for subvalue in value:
# call self again, but value = subvalue, prechosen node
self._dict_into_node(instance, subvalue, parent, name, node)
# get a new node
node = ETree.SubElement(parent, name)
# release the last node in the list
parent.remove(node)
else:
node.text = self._value_to_string(value)
def __set__(self, instance, value):
"""
:type instance: s4.clarity._internal.element.ClarityElement
:type value: dict[str, any]
"""
self._ensure_settable(instance)
self._dict_into_node(instance, value, instance.xml_root, self.property_name)
class subnode_property_list_of_dicts(subnode_property_dict):
def __init__(self, property_name, as_attributes=(), order_by=None):
super(subnode_property_list_of_dicts, self).__init__(property_name, as_attributes)
self.__doc__ = "Retrieves the value of the property '%s'\n\n:type: list[dict]" % property_name
self.order_by = order_by
def __get__(self, instance, owner):
"""
:type instance: s4.clarity._internal.element.ClarityElement
:rtype: list[dict]
"""
nodes = instance.xml_findall('./' + self.property_name)
the_list = [self._node_into_dict(node) for node in nodes]
if self.order_by is None:
return the_list
else:
return sorted(the_list, key=self.order_by)
def __set__(self, instance, the_list):
"""
:type instance: s4.clarity._internal.element.ClarityElement
:type the_list: list[dict[str, any]]
"""
self._ensure_settable(instance)
if self.order_by is not None:
last_key = None
for item in the_list:
this_key = self.order_by(item)
if last_key is not None and this_key < last_key:
raise Exception("List given for %s should be ordered by %s, but value %r is out of order" %
(self.property_name, self.order_by, this_key),
item, last_key, this_key)
last_key = this_key
xpath = './' + self.property_name
split_path = xpath.split("/")
parent_path = "/".join(split_path[:-1]) + "/"
parent = instance.xml_find(parent_path)
node_name = split_path[-1]
for matching_node in [n for n in list(parent) if n.tag.lower() == node_name.lower()]:
parent.remove(matching_node)
self._dict_into_node(instance, the_list, parent, node_name)
def _prop_defining_module():
frame = inspect.currentframe()
modulename = None
while modulename is None or modulename == __name__:
modulename = frame.f_locals.get('__module__')
frame = frame.f_back
if frame is None:
break
# frames are special and are not properly reference-counted, so must be deleted explicitly
del frame
return modulename | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/_internal/props.py | 0.842701 | 0.188679 | props.py | pypi |
from typing import List, Iterable, Tuple
from six.moves.urllib.parse import urlencode
from s4.clarity import ClarityException
from s4.clarity import ETree
import re
from .element import ClarityElement
class NoMatchingElement(ClarityException):
pass
class MultipleMatchingElements(ClarityException):
pass
class BatchFlags(int):
NONE = 0
BATCH_CREATE = 1
BATCH_GET = 2
BATCH_UPDATE = 4
QUERY = 8
BATCH_ALL = 15 # all options, or'd
class ElementFactory(object):
"""
Provides access to a Clarity API endpoint. Implements conversion between XML and ClarityElement
as well as caching and network services.
:type lims: LIMS
:type element_class: classobj
:type batch_flags: s4.clarity.BatchFlags
"""
_params_re = re.compile(r'\?.*$')
@staticmethod
def _strip_params(string):
return ElementFactory._params_re.sub('', string)
def __init__(self, lims, element_class, batch_flags=None, request_path=None, name_attribute="name"):
"""
:type lims: LIMS
:type element_class: classobj
:type batch_flags: BatchFlags or None
:type request_path: str
:param request_path: for example, '/configuration/workflows'.
when not specified, uses '/<plural of element name>'.
:type name_attribute: str
:param name_attribute: if not "name", provide this to adjust behaviour of 'get_by_name'.
"""
self.lims = lims
self.element_class = element_class
self.name_attribute = name_attribute
self.batch_flags = batch_flags or BatchFlags.NONE
self._plural_name = self.element_class.__name__.lower() + "s"
if request_path is None:
request_path = "/" + self._plural_name
self.uri = lims.root_uri + request_path
self._cache = dict()
lims.factories[element_class] = self
def new(self, **kwargs):
# type: (**str) -> ClarityElement
"""
Create a new ClarityElement pre-populated with the provided values.
This object has yet to be persisted to Clarity.
:param kwargs: Key/Value list of attribute name/value pairs to initialize the element with.
:return: A new ClarityElement, pre-populated with provided values.
"""
# creating some types requires using special tag, ie samples
# are created by posting a 'samplecreation' element, not a 'sample'
el_tag = getattr(self.element_class, 'CREATION_TAG', self.element_class.UNIVERSAL_TAG)
# create xml_root, call class constructor
new_xml_root = ETree.Element(el_tag)
new_obj = self.element_class(self.lims, xml_root=new_xml_root)
# set attributes from kwargs to new_object
for k, v in kwargs.items():
setattr(new_obj, k, v)
return new_obj
def add(self, element):
# type: (ClarityElement) -> ClarityElement
"""
Add an element to the Factory's internal cache and persist it back to Clarity.
:type element: ClarityElement
:rtype: ClarityElement
"""
element.post_and_parse(self.uri)
self._cache[element.uri] = element
return element
def delete(self, element):
# type: (ClarityElement) -> None
"""
Delete an element from the Factory's internal cache and delete it from Clarity.
:type element: ClarityElement
"""
self.lims.request('delete', element.uri)
del self._cache[element.uri]
def can_batch_get(self):
# type: () -> bool
"""
Indicates if Clarity will allow batch get requests.
"""
return self.batch_flags & BatchFlags.BATCH_GET
def can_batch_update(self):
# type: () -> bool
"""
Indicates if Clarity will allow batch updates.
"""
return self.batch_flags & BatchFlags.BATCH_UPDATE
def can_batch_create(self):
# type: () -> bool
"""
Indicates if Clarity will allow batch record creation.
"""
return self.batch_flags & BatchFlags.BATCH_CREATE
def can_query(self):
# type: () -> bool
"""
Indicates if Clarity will allow the user to submit queries.
"""
return self.batch_flags & BatchFlags.QUERY
def from_link_node(self, xml_node):
# type: (ETree.Element) -> ClarityElement
"""
Will return the ClarityElement described by the link node.
Link nodes are any xml element with the following attributes
<element uri='...' name='...' limsid='...' />
"""
if xml_node is None:
return None
obj = self.get(xml_node.get("uri"), name=xml_node.get("name"), limsid=xml_node.get("limsid"))
return obj
def from_link_nodes(self, xml_nodes):
# type: (List[ETree.Element]) -> List[ClarityElement]
"""
Will return the ClarityElements described by the link nodes.
Link nodes are any xml element with the following attributes
<element uri='...' name='...' limsid='...' />
"""
objs = []
for xml_node in xml_nodes:
obj = self.from_link_node(xml_node)
if obj is not None:
objs.append(obj)
return objs
def from_limsid(self, limsid, force_full_get=False):
# type: (str, bool) -> ClarityElement
"""
Returns the ClarityElement with the specified limsid.
"""
uri = self.uri + "/" + limsid
return self.get(uri, limsid=limsid, force_full_get=force_full_get)
def get_by_name(self, name):
# type: (str) -> ClarityElement
"""
Queries for a ClarityElement that is described by the unique name.
An exception is raised if there is no match or more than one match.
:raises NoMatchingElement: if no match
:raises MultipleMatchingElements: if multiple matches
"""
matches = self.query(**{self.name_attribute: name})
if len(matches) == 0:
raise NoMatchingElement("No %s found with name '%s'" % (self.element_class.__name__, name))
elif len(matches) > 1:
raise MultipleMatchingElements("More than one %s found with name '%s'" % (self.element_class.__name__, name))
return matches[0]
def get(self, uri, force_full_get=False, name=None, limsid=None):
# type: (str, bool, str, str) -> ClarityElement
"""
Returns the cached ClarityElement described by the provide uri. If the
element does not exist a new cache entry will be created with the provided
name and limsid.
If force_full_get is true, and the object is not fully retrieved it will be refreshed.
"""
uri = self._strip_params(uri)
if uri in self._cache:
obj = self._cache[uri]
else:
obj = self.element_class(self.lims, uri=uri, name=name, limsid=limsid)
self._cache[uri] = obj
if force_full_get and not obj.is_fully_retrieved():
obj.refresh()
return obj
def post(self, element):
# type: (ClarityElement) -> None
"""
Posts the current state of the ClarityElement back to Clarity.
"""
element.post_and_parse(self.uri)
def batch_fetch(self, elements):
# type: (Iterable[ClarityElement]) -> List[ClarityElement]
"""
Updates the content of all ClarityElements with the current state from Clarity.
Syntactic sugar for batch_get([e.uri for e in elements])
:return: A list of the elements returned by the query.
"""
return self.batch_get([e.uri for e in elements])
def batch_get_from_limsids(self, limsids):
# type: (Iterable[str]) -> List[ClarityElement]
"""
Return a list of ClarityElements for a given list of limsids
:param limsids: A list of Clarity limsids
:return: A list of the elements returned by the query.
"""
return self.batch_get([self.uri + "/" + limsid for limsid in limsids])
def batch_get(self, uris, prefetch=True):
# type: (Iterable[str], bool) -> List[ClarityElement]
"""
Queries Clarity for a list of uris described by their REST API endpoint.
If this query can be made as a single request it will be done that way.
:param uris: A List of uris
:param prefetch: Force load full content for each element.
:return: A list of the elements returned by the query.
"""
if not uris:
return [] # just return an empty list if there were no uris
if self.can_batch_get():
links_root = ETree.Element("{http://genologics.com/ri}links")
n_queries = 0
querying_now = set()
for uri in uris:
uri = self._strip_params(uri)
if uri in querying_now:
# already covered
continue
obj = self._cache.get(uri)
if prefetch and (obj is None or not obj.is_fully_retrieved()):
link = ETree.SubElement(links_root, "link")
link.set("uri", uri)
link.set("rel", self._plural_name)
querying_now.add(uri)
n_queries += 1
if n_queries > 0:
result_root = self.lims.request('post', self.uri + "/batch/retrieve", links_root)
result_nodes = result_root.findall('./' + self.element_class.UNIVERSAL_TAG)
for node in result_nodes:
uri = node.get("uri")
uri = self._strip_params(uri)
old_obj = self._cache.get(uri)
if old_obj is not None:
old_obj.xml_root = node
else:
new_obj = self.element_class(self.lims, uri=uri, xml_root=node)
self._cache[uri] = new_obj
return [self._cache[uri] for uri in uris]
else:
return [self.get(uri, force_full_get=prefetch) for uri in uris]
def _query_uri_and_tag(self):
# type: () -> Tuple[str, str]
"""
Return the uri and tag to use for queries. This can be overridden by subclasses when the
the query uri doesn't follow the usual rule. Currently this is used to support queries for steps
which are mapped to queries against processes and then mapped back to steps.
Parse uri and tag from UNIVERSAL_TAG
:return: Factory endpoint URI and tag. ex: ('http://genologics.com/ri/step', 'step')
"""
return self.uri, self.element_class.UNIVERSAL_TAG.split('}', 2)[1]
def all(self, prefetch=True):
# type: (bool) -> List[ClarityElement]
"""
Queries Clarity for all ClarityElements associated with the Factory.
:param prefetch: Force load full content for each element.
:return: List of ClarityElements returned by Clarity.
"""
return self.query(prefetch)
def query(self, prefetch=True, **params):
# type: (bool, **str) -> List[ClarityElement]
"""
Queries Clarity for ClarityElements associated with the Factory.
The query will be made with the provided parameters encoded in the url.
For the specific parameters to pass and the expected values please see the
Clarity REST API.
Some of the expected parameters contain the '-' character, in which
case the dictionary syntax of this call will need to be used.
Inline parameter names::
query(singlevaluename='single value', multivaluename=['A', 'B', 'C'])
Dictionary of parameters::
query(prefetch=True, ** {
'single-value-name': 'single value',
'multi-value-name': ['A', 'B', 'C']
})
:param params: Query parameters to pass to clarity.
:param prefetch: Force load full content for each element.
:return: A list of the elements returned by the query.
"""
if not self.can_query():
raise Exception("Can't query for %s" % self.element_class.__name__)
uri, tag = self._query_uri_and_tag()
query_uri = uri + "?" + urlencode(params, doseq=True)
elements = []
while query_uri:
links_root = self.lims.request('get', query_uri)
link_nodes = links_root.findall('./' + tag)
elements += self.from_link_nodes(link_nodes)
next_page_node = links_root.findall('./next-page')
if next_page_node:
query_uri = next_page_node[0].get('uri')
else:
query_uri = None
if prefetch:
self.batch_fetch(elements)
return elements
def query_uris(self, **params):
# type: (**str) -> List[str]
"""
For backwards compatibility, use query() instead.
Does a query and returns the URIs of the results.
:param params: Query parameters to pass to clarity.
"""
return [e.uri for e in self.query(False, **params)]
def batch_update(self, elements):
# type: (Iterable[ClarityElement]) -> None
"""
Persists the ClarityElements back to Clarity. Will preform
this action as a single query if possible.
:param elements: All ClarityElements to save the state of.
:raises ClarityException: if Clarity returns an exception as XML
"""
if not elements:
return
if self.can_batch_update():
details_root = ETree.Element(self.batch_tag)
for el in elements:
details_root.append(el.xml_root)
self.lims.request('post', self.uri + "/batch/update", details_root)
else:
for el in elements:
self.lims.request('post', el.uri, el.xml_root)
def batch_create(self, elements):
# type: (Iterable[ClarityElement]) -> List[ClarityElement]
"""
Creates new records in Clarity for each element and returns these new records as ClarityElements.
If this operation can be performed in a single network operation it will be.
:param elements: A list of new ClarityElements that have not been persisted to Clarity yet.
:return: New ClarityElement records from Clarity, created with the data supplied to the method.
:raises ClarityException: if Clarity returns an exception as XML
"""
if not elements:
return []
if self.can_batch_create():
details_root = ETree.Element(self.batch_tag)
for el in elements:
details_root.append(el.xml_root)
links = self.lims.request('post', self.uri + "/batch/create", details_root)
return self.from_link_nodes(links)
else:
objects = []
for el in elements:
new_obj = self.element_class(
self.lims,
xml_root=self.lims.request('post', el.uri, el.xml_root)
)
self._cache[new_obj.uri] = new_obj
objects.append(new_obj)
return objects
def batch_refresh(self, elements):
# type: (Iterable[ClarityElement]) -> None
"""
Loads the current state of the elements from Clarity. Any changes made
to these artifacts that has not been pushed to Clarity will be lost.
:param elements: All ClarityElements to update from Clarity.
"""
# Clear the existing configs on samples this will force a refresh when queried
# even though the samples are currently in the cache
self.batch_invalidate(elements)
# Now force load a new copy of the artifact state
self.batch_fetch(elements)
def batch_invalidate(self, elements):
# type: (Iterable[ClarityElement]) -> None
"""
Clears the current local state for all elements.
:param elements: The ClarityElements that are to have their current state cleared.
"""
for element in elements:
element.invalidate()
@property
def batch_tag(self):
return re.sub("}.*$", "}details", self.element_class.UNIVERSAL_TAG) | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/_internal/factory.py | 0.880579 | 0.236759 | factory.py | pypi |
import collections
from six import string_types
from . import ClarityElement
from s4.clarity import ETree, types
from .lazy_property import lazy_property
from s4.clarity.types import obj_to_clarity_string, clarity_string_to_obj
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
FIELD_TAG = "{http://genologics.com/ri/userdefined}field"
class FieldsMixin(ClarityElement):
# most elements put fields in '.', some are in './fields'.
# must start with "./", or be a single period.
FIELDS_XPATH = "."
ATTACH_TO_NAME = None
@lazy_property
def fields(self):
""":type: dict[str, object]"""
if self.FIELDS_XPATH == ".":
fields_node = self.xml_root
else:
fields_node = self.xml_find(self.FIELDS_XPATH)
if fields_node is None:
fields_node = self.make_subelement_with_parents(self.FIELDS_XPATH)
return FieldsDict(fields_node)
def get(self, name, default=None):
"""
Get a UDF, if it exists.
(Non-exception version of []).
:type name: str
:param default: returned if the item is not present
:rtype: str or int or bool or datetime.datetime or float
"""
if not isinstance(name, string_types):
raise Exception("Non-string UDF names are invalid for Clarity elements.")
return self.fields.get(name, default)
def get_raw(self, name, default=None):
"""
Get a UDF as a string, if it exists.
:type name: str
:param default: returned if the item is not present
:rtype: str
"""
if not isinstance(name, string_types):
raise Exception("Non-string UDF names are invalid for Clarity elements.")
return self.fields.get_raw(name, default)
def get_formatted_number_string(self, name, default=None):
"""
Get a Numeric UDF formatted to the correct precision, if the UDF exists.
:type name: str
:type default: str
:param default: returned if the item is not present
:rtype: str
"""
if not isinstance(name, string_types):
raise Exception("Non-string UDF names are invalid for Clarity elements.")
raw_value = self.fields.get(name)
if raw_value is None: # Compare against None, so as not to lose values of 0
return default
udf = self.get_udf_config(name)
if udf.field_type != types.NUMERIC:
raise Exception("'%s' can not be used with get_with_precision, as it is non-numeric." % name)
# udf.precision will be None if 0 after Clarity 4.2.17
return '{0:.{prec}f}'.format(raw_value, prec=int(udf.precision or 0))
def get_udf_config(self, name):
"""
Get the underlying UDF configuration associated with the field
:param name: name of the field
:rtype: s4.clarity.configuration.Udf
"""
return self.lims.udfs.get_by_name(name, self._get_attach_to_key())
def _get_attach_to_key(self):
"""
Get the attach-to-name and attach-to-category properties for fetching the field's matching UDF object.
Default implementation depends on the ATTACH_TO_NAME property being defined, and returns an empty category
:rtype: str,str
:return: a tuple of the attach-to-name and attach-to-category properties associated with the element type's UDFs
"""
if not self.ATTACH_TO_NAME:
raise Exception("Classes using the FieldsMixin must either provide a ATTACH_TO_NAME value, or override the "
"get_element_attach_to_values method.")
return self.ATTACH_TO_NAME, ""
def __contains__(self, item):
try:
self.__getitem__(item)
return True
except KeyError:
return False
# delegate to fields
def __getitem__(self, item):
if not isinstance(item, string_types):
raise Exception("Non-string UDF names are invalid for Clarity elements.", type(item))
try:
return self.fields.__getitem__(item)
except KeyError:
raise KeyError("No UDF '%s' defined on %s." % (item, self))
# delegate to fields
def __setitem__(self, key, value):
if not isinstance(key, string_types):
raise Exception("Non-string UDF names are invalid for Clarity elements.")
return self.fields.__setitem__(key, value)
def __delitem__(self, key):
if not isinstance(key, string_types):
raise Exception("Non-string UDF names are invalid for Clarity elements.")
return self.fields.__delitem__(key)
def __iter__(self):
return self.fields.__iter__()
@property
def xml_root(self):
return super(FieldsMixin, self).xml_root
@xml_root.setter
def xml_root(self, root_node):
"""
NOTE: setting xml_root directly will end-run around dirty object tracking.
"""
super(FieldsMixin,type(self)).xml_root.__set__(self, root_node)
if root_node is not None:
# wipe our fields cache
self.__dict__.pop('fields', None)
# comma decimal mark workaround
# This works around the Clarity issue with non-english locales, where numeric values from Clarity
# are output by Clarity with commas as the decimal mark, but Clarity cannot accept them as input.
for subnode in root_node.findall(self.FIELDS_XPATH + '/' + FIELD_TAG):
if subnode.get('type') == types.NUMERIC:
subnode.text = subnode.text.replace(',', '.')
class FieldsDict(MutableMapping):
"""
:type _real_dict: dict[str, ETree.Element]
:type _root_node: ETree.Element
"""
def __init__(self, fields_node):
"""
:type fields_node: ETree.Element
"""
d = {}
for subnode in fields_node.findall('./' + FIELD_TAG):
d[subnode.get("name")] = subnode
self._real_dict = d
self._value_cache = {}
self._root_node = fields_node
def __len__(self):
return len(self._real_dict)
def __setitem__(self, key, value):
"""
NOTE: The value should already be the correct type.
:type key: str
:type value: object
"""
field_node = self._get_or_create_node(key)
field_node.text = obj_to_clarity_string(value)
self._value_cache[field_node] = value
def _get_or_create_node(self, key):
"""
Get a field XML node, or create and append it to the XML fields node if it doesn't exist.
:type key: str
:rtype: ETree.Element
"""
field_node = self._real_dict.get(key)
if field_node is None:
field_node = ETree.SubElement(self._root_node, FIELD_TAG)
field_node.set('name', key)
self._real_dict[key] = field_node
return field_node
def __getitem__(self, key):
"""
Returns a field (UDF) value.
The return type can be any of bool, str, float, datetime.
:rtype: object
:type key: str
"""
field_node = self._real_dict[key]
return self._node_python_value(field_node)
def get_raw(self, key, default=None):
"""
Returns an untranslated string value of a UDF.
:type key: str
:type default: str or None
:rtype: str or None
"""
field_node = self._real_dict[key]
return field_node.text if field_node is not None else default
def _node_python_value(self, field_node):
if field_node in self._value_cache:
return self._value_cache[field_node]
else:
field_type = field_node.get('type')
value = clarity_string_to_obj(field_type, field_node.text)
self._value_cache[field_node] = value
return value
def get_type(self, key):
"""
Returns the type of a Clarity field.
:type key: str
:returns: any of FieldsDict.TYPES
:rtype: str
"""
field_node = self._real_dict[key]
if field_node is None:
return None
else:
return field_node.get('type')
def __delitem__(self, key):
"""
Delete an item in the field dictionary by setting it to None, which will send the empty string
as a value to Clarity.
"""
self.__setitem__(key, None)
def __iter__(self):
"""Return an iterator over field names."""
return self._real_dict.__iter__()
def itervalues(self):
"""Return an iterator over deserialized field values."""
for key in self._real_dict:
node = self._real_dict[key]
yield self._node_python_value(node)
def iteritems(self):
"""Return an iterator over (field name, field value (deserialized)) pairs."""
for key in self._real_dict:
node = self._real_dict[key]
value = self._node_python_value(node)
yield (key, value)
def __contains__(self, x):
return self._real_dict.__contains__(x) | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/_internal/fields.py | 0.791902 | 0.202246 | fields.py | pypi |
from s4.clarity import ETree
from s4.clarity._internal import ClarityElement
from s4.clarity._internal.props import subnode_property, attribute_property
from s4.clarity import types
class Udf(ClarityElement):
UNIVERSAL_TAG = "{http://genologics.com/ri/configuration}udfconfig"
CREATION_TAG = "{http://genologics.com/ri/configuration}field"
# Alternate name to avoid collision with built-in 'type'
field_type = attribute_property("type") # type: str
attach_to_name = subnode_property("attach-to-name") # type: str
show_in_lablink = subnode_property("show-in-lablink", types.BOOLEAN) # type: bool
allow_non_preset_values = subnode_property("allow-non-preset-values", types.BOOLEAN) # type: bool
first_preset_is_default_value = subnode_property("first-preset-is-default-value", types.BOOLEAN) # type: bool
show_in_tables = subnode_property("show-in-tables", types.BOOLEAN) # type: bool
is_editable = subnode_property("is-editable", types.BOOLEAN) # type: bool
is_deviation = subnode_property("is-deviation", types.BOOLEAN) # type: bool
is_controlled_vocabulary = subnode_property("is-controlled-vocabulary", types.BOOLEAN) # type: bool
is_required = subnode_property("is-required", types.BOOLEAN) # type: bool
attach_to_category = subnode_property("attach-to-category") # type: str
# Only valid for Numeric types
min_value = subnode_property("min-value", types.NUMERIC) # type: float
max_value = subnode_property("max-value", types.NUMERIC) # type: float
precision = subnode_property("precision", types.NUMERIC) # type: float
@property
def presets(self):
"""
:type: list
"""
preset_nodes = self.xml_root.findall('preset')
return [types.clarity_string_to_obj(self.field_type, preset_node.text) for preset_node in preset_nodes]
def add_preset(self, new_preset_value):
"""
Add a new preset value to the end of the list. Ignores values that are already present.
:type new_preset_value: str|unicode|int|float|datetime.date|bool
:param new_preset_value: the preset value to add, with a type appropriate to the UDF. The value is not
validated to be the correct type.
"""
preset = self._find_preset_by_value(new_preset_value)
if preset is not None:
return
self._add_preset_internal(new_preset_value)
def remove_preset(self, preset_value):
"""
Remove a preset value from the list.
:type preset_value: str|unicode|int|float|datetime.date|bool
:param preset_value: the preset value to remove, with a type appropriate to the UDF. The value is
not validated to be the correct type.
"""
preset = self._find_preset_by_value(preset_value)
if preset is not None:
self.xml_root.remove(preset)
def set_default_preset(self, default_preset_value):
"""
Sets a preset value as the default (puts first in the list). Adds value if it isn't already preset.
:type default_preset_value: str|unicode|int|float|datetime.date|bool
:param default_preset_value: the new default preset value, with a type appropriate to the UDF. The value is
not validated to be the correct type.
:raises Exception: if the udf's first-preset-is-default property is currently false
"""
if not self.first_preset_is_default_value:
raise Exception("Setting the default value will have no effect, as first-preset-is-default-value is false.")
current_preset_nodes = self.xml_findall('preset')
# Initialize the new list of presets with the default
new_preset_values = [types.obj_to_clarity_string(default_preset_value)]
for preset in current_preset_nodes:
# Only grab values other than the new default, in case it was already in there
if types.clarity_string_to_obj(self.field_type, preset.text) != default_preset_value:
new_preset_values.append(preset.text)
self.xml_root.remove(preset)
for preset_value in new_preset_values:
self._add_preset_internal(preset_value)
def _find_preset_by_value(self, preset_value):
all_presets = self.xml_root.findall("preset")
for preset in all_presets:
if types.clarity_string_to_obj(self.field_type, preset.text) == preset_value:
return preset
def _add_preset_internal(self, preset_value):
preset_node = ETree.SubElement(self.xml_root, 'preset')
preset_node.text = types.obj_to_clarity_string(preset_value) | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/configuration/udf.py | 0.761538 | 0.227459 | udf.py | pypi |
import logging
from s4.clarity._internal.element import ClarityElement, WrappedXml
from s4.clarity.reagent_kit import ReagentKit
from s4.clarity.control_type import ControlType
from s4.clarity._internal.props import subnode_property_list_of_dicts, subnode_property, subnode_property_literal_dict, attribute_property, subnode_element_list
from s4.clarity import types, lazy_property
log = logging.getLogger(__name__)
class Protocol(ClarityElement):
UNIVERSAL_TAG = "{http://genologics.com/ri/protocolconfiguration}protocol"
properties = subnode_property_literal_dict('protocol-properties', 'protocol-property')
index = attribute_property("index", typename=types.NUMERIC)
@lazy_property
def steps(self):
"""
:type: list[StepConfiguration]
"""
return [StepConfiguration(self, n) for n in self.xml_findall('./steps/step')]
def _step_node(self, name):
for n in self.xml_findall('./steps/step'):
if n.get('name') == name:
return n
return None
def step_from_id(self, stepid):
"""
:rtype: StepConfiguration or None
"""
for step in self.steps:
if step.uri.split('/')[-1] == stepid:
return step
return None
def step(self, name):
"""
:rtype: StepConfiguration or None
"""
for step in self.steps:
if step.name == name:
return step
return None
@property
def number_of_steps(self):
"""
:type: int
"""
return len(self.steps)
class ProtocolStepField(WrappedXml):
name = attribute_property("name")
style = attribute_property("style")
attach_to = attribute_property("attach-to")
class StepConfiguration(ClarityElement):
UNIVERSAL_TAG = "{http://genologics.com/ri/protocolconfiguration}step"
def __init__(self, protocol, node):
"""
:type protocol: Protocol
"""
super(StepConfiguration, self).__init__(protocol.lims, uri=None, xml_root=node)
self.protocol = protocol
properties = subnode_property_literal_dict('step-properties', 'step-property')
protocol_step_index = subnode_property("protocol-step-index", types.NUMERIC)
queue_fields = subnode_element_list(ProtocolStepField, "queue-fields", "queue-field")
step_fields = subnode_element_list(ProtocolStepField, "step-fields", "step-field")
sample_fields = subnode_element_list(ProtocolStepField, "sample-fields", "sample-field")
triggers = subnode_property_list_of_dicts('epp-triggers/epp-trigger', as_attributes=[
'status', 'point', 'type', 'name'
])
transitions = subnode_property_list_of_dicts('transitions/transition', as_attributes=[
'name', 'sequence', 'next-step-uri'
], order_by=lambda x: int(x.get('sequence')))
def refresh(self):
"""
:raise Exception: Unable to refresh step directly, use protocol
"""
# FIXME?
raise Exception("Unable to refresh step directly, use protocol")
def put_and_parse(self, alternate_uri=None):
self.protocol.put_and_parse(alternate_uri)
def post_and_parse(self, alternate_uri=None):
self.protocol.post_and_parse(alternate_uri)
@lazy_property
def process_type(self):
"""
:type: ProcessType
"""
pt_display_name = self.get_node_text('process-type')
results = self.lims.process_types.query(displayname=pt_display_name)
if results:
return results[0]
else:
raise Exception("Process type '%s' not found in Clarity", pt_display_name)
@lazy_property
def required_reagent_kits(self):
"""
:type: ReagentKit
"""
reagent_kits = self.xml_findall("./required-reagent-kits/reagent-kit")
return [ReagentKit(self.lims, p.get("uri")) for p in reagent_kits]
@lazy_property
def permitted_control_types(self):
"""
:type: ControlType
"""
control_types = self.xml_findall("./permitted-control-types/control-type")
return [ControlType(self.lims, p.get("uri")) for p in control_types]
@lazy_property
def permitted_containers(self):
"""
:type: ContainerType
"""
container_types = self.xml_findall("./permitted-containers/container-type")
# container-type (type generic-type-link) has no uri attribute. find the container by name
# beware if your lims has multiple containers with the same name
ret = self.lims.container_types.query(name=[c.text for c in container_types])
if len(container_types) != len(ret): # can len(types) > len(ret)?
log.warning(
"The number of container types found differs from the number "
"specified in the step config. Do multiple container types "
"share the same name?"
)
return ret
@lazy_property
def permitted_instrument_types(self):
"""
:type: List[str]
"""
instrument_type_nodes = self.xml_findall("./permitted-instrument-types/instrument-type")
return [node.text for node in instrument_type_nodes]
@lazy_property
def queue(self):
"""
:type: Queue
"""
return self.lims.queues.from_limsid(self.limsid) | /s4-clarity-1.5.0.tar.gz/s4-clarity-1.5.0/s4/clarity/configuration/protocol.py | 0.582966 | 0.173148 | protocol.py | pypi |
__all__ = ["get_telecope_years", "load_atmosphere"]
# Cell
import healpy as hp
import numpy as np
from pathlib import Path
import logging as log
from .core import (
get_telescope,
get_observing_efficiency,
base_folder,
simulations_observing_efficiency,
mapmaking_naming,
read_instrument_model,
)
from .noise import get_thinfp
# Cell
def get_telecope_years(config, site, channel):
"""Compute the number of telescope/years in the CMB-S4 configuration
config_telescopes : dict
CMB-S4 telescopes configuration,
generally loaded from a TOML file
site : str
'Pole' or 'Chile', case doesn't matter
channel : str
Channel tag, e.g. 'MFHS1'
"""
telescope_years = 0
for telescope_name, telescope_config in config["telescopes"][
get_telescope(channel)
].items():
if telescope_config["site"].lower() == site.lower():
has_band = telescope_config.get(channel[:-1], 0) > 0
telescope_years += has_band * telescope_config.get(
"years", config["experiment"]["total_experiment_length_years"]
)
return telescope_years
# Cell
def load_atmosphere(config, site, channel, realization=0, raw=False):
"""Load foreground maps for a channel
Parameters
----------
config : dict
CMB-S4 configuration,
generally loaded from a TOML file
site : str
'Pole' or 'Chile', case doesn't matter
channel : str
Channel tag, e.g. 'MFHS1'
realization : int
Choose one of the available 8 realizations
Returns
-------
output_map : numpy array
Output map with all emissions combined, uses nan for missing pixels
"""
telescope = get_telescope(channel)
channel_noP = channel.replace("P", "")
map_filename = (
Path(f"{realization:08d}")
/ f"{site.lower()}_atmosphere_{telescope}_{channel_noP}_{mapmaking_naming[telescope]}"
)
log.info(f"Reading {map_filename}")
atmosphere_map = hp.read_map(
Path(base_folder) / map_filename, (0, 1, 2), dtype=None, verbose=False
)
if raw:
atmosphere_map[atmosphere_map == 0] = hp.UNSEEN
return atmosphere_map
atmosphere_map[atmosphere_map == hp.UNSEEN] = np.nan
atmosphere_map[atmosphere_map == 0] = np.nan
# input map is 10 days at 100% efficiency
atmosphere_map *= np.sqrt(
10
* simulations_observing_efficiency[site.lower()][channel]
/ (
365.25
* get_observing_efficiency(
config["experiment"]["observing_efficiency"], site, telescope, channel
)
)
)
atmosphere_map /= np.sqrt(get_telecope_years(config, site, channel))
atmosphere_map[0] *= config["experiment"].get("atmosphere_scaling_T", 1)
atmosphere_map[1:] *= config["experiment"].get("atmosphere_scaling_P", 1)
atmosphere_map[1:] /= np.sqrt(get_thinfp(channel))
return atmosphere_map | /s4_design_sim_tool-1.1.2-py3-none-any.whl/s4_design_sim_tool/atmosphere.py | 0.840423 | 0.286419 | atmosphere.py | pypi |
__all__ = [
"thinfp_string",
"thinfp_table",
"get_thinfp",
"get_tube_years",
"load_noise",
]
# Cell
import healpy as hp
import numpy as np
from pathlib import Path
import logging as log
from .core import (
get_telescope,
get_observing_efficiency,
base_folder,
simulations_observing_efficiency,
read_instrument_model,
mapmaking_naming,
simulated_tubes,
)
# Cell
thinfp_string = """telescope tube thinfp
LAT ULFPL 1
LAT LFL 4
LAT LFPL 4
LAT MFL 16
LAT HFL 16
LAT MFPL 16
LAT HFPL 16
SAT LFS 1
SAT MFLS 4
SAT MFHS 4
SAT HFS 8
"""
from astropy.io import ascii
thinfp_table = ascii.read(thinfp_string)
# Cell
def get_thinfp(channel):
"""Get the focal plane thinning factor for noise simulations
Parameters
----------
channel : str
CMB-S4 channel tag e.g. HFL2
Returns
-------
thinfp : int
thinning factor
"""
return (thinfp_table[thinfp_table["tube"] == channel[:-1]])["thinfp"][0]
# Cell
def get_tube_years(config, site, channel):
"""Compute the number of tube/years in the CMB-S4 configuration
config_telescopes : dict
CMB-S4 telescopes configuration,
generally loaded from a TOML file
site : str
'Pole' or 'Chile', case doesn't matter
channel : str
Channel tag, e.g. 'MFHS1'
"""
tube_years = 0
for telescope_name, telescope_config in config["telescopes"][
get_telescope(channel)
].items():
if telescope_config["site"].lower() == site.lower():
num_tubes = telescope_config.get(channel[:-1], 0)
tube_years += num_tubes * telescope_config.get(
"years", config["experiment"]["total_experiment_length_years"]
)
return tube_years
# Cell
def load_noise(config, site, channel, realization=0):
"""Load noise maps for a channel
Parameters
----------
config : dict
CMB-S4 configuration,
generally loaded from a TOML file
site : str
'Pole' or 'Chile', case doesn't matter
channel : str
Channel tag, e.g. 'MFHS1'
realization : int
Choose one of the available 8 realizations
Returns
-------
output_map : numpy array
Output map with all emissions combined, uses nan for missing pixels
"""
telescope = get_telescope(channel)
channel_noP = channel.replace("P", "")
map_filename = (
Path(f"{realization:08d}")
/ f"{site.lower()}_noise_{telescope}_{channel_noP}_{mapmaking_naming[telescope]}"
)
log.info(f"Base folder: {base_folder}")
log.info(f"Reading {map_filename}")
output_map = hp.read_map(
Path(base_folder) / map_filename, (0, 1, 2), dtype=None, verbose=False
)
output_map[output_map == hp.UNSEEN] = np.nan
output_map[output_map == 0] = np.nan
# input map is 10 days
output_map *= np.sqrt(
10
* simulations_observing_efficiency[site.lower()][channel]
/ (
365.25
* get_observing_efficiency(
config["experiment"]["observing_efficiency"], site, telescope, channel
)
* get_observing_efficiency(
config["experiment"]["sensitivity_factor"], site, telescope, channel
)
)
)
output_map /= np.sqrt(
get_tube_years(config, site, channel) / simulated_tubes[channel[:-1]]
)
# focal plane thinning factor of TOD simulations
output_map /= np.sqrt(get_thinfp(channel))
return output_map | /s4_design_sim_tool-1.1.2-py3-none-any.whl/s4_design_sim_tool/noise.py | 0.807347 | 0.303903 | noise.py | pypi |
__all__ = [
"simulations_sampling_frequency_scaling",
"load_hitmap_wcov",
"hitmaps_naming",
"wcov_naming",
]
# Cell
import healpy as hp
import numpy as np
from pathlib import Path
import logging as log
from .core import (
get_telescope,
get_observing_efficiency,
base_folder,
simulations_observing_efficiency,
read_instrument_model,
mapmaking_naming,
simulated_tubes,
)
from .noise import get_thinfp, get_tube_years
# Cell
simulations_sampling_frequency_scaling = dict(SAT=5, LAT=2)
# Cell
hitmaps_naming = {
"SAT": "telescope_all_time_all_hits.fits.gz",
"LAT": "filtered_telescope_all_time_all_hmap.fits",
}
wcov_naming = {
"SAT": "telescope_all_time_all_wcov.fits.gz",
"LAT": "filtered_telescope_all_time_all_wcov.fits",
}
def load_hitmap_wcov(config, site, channel, realization=0, raw_hitmap=False):
"""Load hitmaps and white noise covariance matrices for a channel
This loads the simulated hitmaps and white noise covariance matrices
and scales them properly to the experiment configuration and duration
as defined in the input config file.
Hitmaps assumes a sampling frequency of 100 Hz for SAT and 400 Hz for
LAT.
Parameters
----------
config : dict
CMB-S4 configuration,
generally loaded from a TOML file
site : str
'Pole' or 'Chile', case doesn't matter
channel : str
Channel tag, e.g. 'MFHS1'
realization : int
Choose one of the available 8 realizations
Returns
-------
hitmap : numpy array
Hitmap for all channels all tubes combined
wcov : numpy array
White noise covariance matrix, rows are:
"II", "IQ", "IU", "QQ", "QU", "UU", units are K^2
"""
# it is the same scaling for hitmap and white noise covariance matrix,
# which is the same as noise except squared
telescope = get_telescope(channel)
tube = channel[:-1]
channel_noP = channel.replace("P", "")
map_filename = (
Path(f"{realization:08d}")
/ f"{site.lower()}_noise_{telescope}_{channel_noP}_{hitmaps_naming[telescope]}"
)
wcov_filename = (
Path(f"{realization:08d}")
/ f"{site.lower()}_noise_{telescope}_{channel_noP}_{wcov_naming[telescope]}"
)
log.info(f"Base folder: {base_folder}")
log.info(f"Reading {map_filename}")
hitmap = hp.read_map(Path(base_folder) / map_filename, 0, dtype=None, verbose=False)
hitmap[hitmap == hp.UNSEEN] = 0
if raw_hitmap:
return hitmap
wcov = hp.read_map(
Path(base_folder) / wcov_filename, (0, 1, 2, 3, 4, 5), dtype=None, verbose=False
)
scaling = (
365.25
* get_observing_efficiency(
config["experiment"]["observing_efficiency"], site, telescope, channel
)
* get_observing_efficiency(
config["experiment"]["sensitivity_factor"], site, telescope, channel
)
) / (10 * simulations_observing_efficiency[site.lower()][channel])
# focal plane thinning factor of TOD simulations
scaling *= get_thinfp(channel)
scaling *= get_tube_years(config, site, channel) / simulated_tubes[tube]
hitmap = np.round(hitmap * scaling).astype(np.int64)
hitmap *= simulations_sampling_frequency_scaling[telescope]
wcov /= scaling
wcov[:, hitmap == 0] = hp.UNSEEN
return hitmap, wcov | /s4_design_sim_tool-1.1.2-py3-none-any.whl/s4_design_sim_tool/hitmap_wcov.py | 0.892492 | 0.492798 | hitmap_wcov.py | pypi |
__all__ = [
"base_folder",
"simulations_observing_efficiency",
"mapmaking_naming",
"simulated_tubes",
"read_instrument_model",
"get_telescope",
"get_observing_efficiency",
]
# Cell
import os
base_folder = os.environ.get(
"S4REFSIMTOOL_INPUT",
"/global/cscratch1/sd/keskital/s4sim/reference_tool_round_2/out",
)
# Cell
# computed from the hitmap, see 04_hitmap_wcov
simulations_observing_efficiency = {
"pole": {
"ULFPL1": 0.7094065625,
"LFPL1": 0.7094065625,
"LFPL2": 0.7094065625,
"MFPL1": 0.7094065625,
"MFPL2": 0.7094065625,
"HFPL1": 0.7094065625,
"HFPL2": 0.7094065625,
"LFS1": 0.5410630787037037,
"LFS2": 0.5410630787037037,
"MFLS1": 0.5410630787037037,
"MFHS1": 0.5410630787037037,
"MFLS2": 0.5410630787037037,
"MFHS2": 0.5410630787037037,
"HFS1": 0.5410630787037037,
"HFS2": 0.5410630787037037,
},
"chile": {
"LFL1": 1.0367161458333334,
"LFL2": 1.0367161458333334,
"MFL1": 1.0367161458333334,
"MFL2": 1.0367161458333334,
"HFL1": 1.0350467156468064,
"HFL2": 1.0350467156468064,
},
}
mapmaking_naming = {
"SAT": "telescope_all_time_all_filtered.fits.gz",
"LAT": "filtered_telescope_all_time_all_bmap.fits",
}
simulated_tubes = dict(
LFS=1,
MFLS=1,
MFHS=1,
HFS=1, # SAT
LFL=8,
MFL=54,
HFL=23, # LAT Chile
ULFPL=4,
LFPL=9,
MFPL=54,
HFPL=18, # LAT Pole
)
# Cell
from astropy.table import QTable
def read_instrument_model(filename="instrument_model/cmbs4_instrument_model.tbl"):
"""Read the CMB-S4 instrument model
Includes all instrument parameters with units except bandpasses
Paramters
---------
filename : str path
path to instrument model in ASCII IPAC format
Returns
-------
s4 : QTable
astropy table with units, 1 row per channel
"""
return QTable.read(filename, format="ascii.ipac")
def get_telescope(channel):
"""Identify if a channel is in SAT or LAT
Parameters
----------
channel : str
channel tag, e.g. MFH1
Returns
-------
telescope : str
'SAT' or 'LAT'
"""
return channel[-2] + "AT"
def get_observing_efficiency(observing_efficiency_config, site, telescope, channel):
"""Extract the observing frequency for a specific channel from
the configuration dictionary
Parameters
----------
observing_efficiency_config : dict
the order of keys should be site, telescope, channel
e.g. {"Pole":{"LAT":{"HFL1":0.1}}, "default":0.2}
site : str
site, Pole or Chile
telescope : str
LAT or SAT
channel: str
e.g. HFL1
Returns
-------
observing_efficiency : float
observing efficiency in decimal form, for example .2 for 20%
"""
if site in observing_efficiency_config:
if telescope in observing_efficiency_config[site]:
try:
return observing_efficiency_config[site][telescope][channel]
except KeyError:
try:
return observing_efficiency_config[site][telescope]["default"]
except KeyError:
pass
try:
return observing_efficiency_config[site]["default"]
except KeyError:
pass
return observing_efficiency_config["default"] | /s4_design_sim_tool-1.1.2-py3-none-any.whl/s4_design_sim_tool/core.py | 0.708818 | 0.332121 | core.py | pypi |
__all__ = ["emission_naming", "load_sky_emission"]
# Cell
import healpy as hp
import numpy as np
from pathlib import Path
import logging as log
from .core import get_telescope, base_folder, mapmaking_naming, read_instrument_model
# Cell
emission_naming = dict(
foreground_emission="combined_foregrounds",
CMB_unlensed="cmb_unlensed_solardipole",
CMB_lensing_signal="cmb_lensing_signal",
CMB_tensor_to_scalar_ratio="cmb_tensor",
)
# Cell
def load_sky_emission(config_sky_emission, site, channel):
"""Load foreground maps for a channel
Parameters
----------
config_sky_emission : dict
CMB-S4 sky emission configuration,
generally config['sky_emission'] for a configuration
loaded from a TOML file
dictionary with standard emission names and their weights
site : str
'Pole' or 'Chile', case doesn't matter
channel : str
Channel tag, e.g. 'MFHS1'
Returns
-------
output_map : numpy array
Output map with all emissions combined, uses nan for missing pixels
"""
log.info("Configuration %s", str(config_sky_emission))
telescope = get_telescope(channel)
nside = 512 if telescope == "SAT" else 4096
npix = hp.nside2npix(nside)
output_map = np.zeros((3, npix), dtype=np.float32)
realization = 0 # foregrounds are deterministic
channel_noP = channel.replace("P", "")
for emission, weight in config_sky_emission.items():
if weight == 0:
log.info("Skip %s", emission)
continue
log.info("Processing %s", emission)
emission_map = hp.read_map(
Path(base_folder)
/ f"{realization:08d}"
/ f"{site.lower()}_{emission_naming[emission]}_{telescope}_{channel_noP}_{mapmaking_naming[telescope]}",
(0, 1, 2),
dtype=None,
verbose=False,
)
emission_map[emission_map == 0] = np.nan
emission_map[emission_map == hp.UNSEEN] = np.nan
if emission == "CMB_tensor_to_scalar_ratio":
# input maps are simulated with r=3e-3
# tensor-to-scalar ratio is defined in the power spectrum, we need to weight the maps with the square root
weight = np.sqrt(weight / 3e-3)
output_map += emission_map * weight
output_map /= 1e6 # uK -> K
return output_map | /s4_design_sim_tool-1.1.2-py3-none-any.whl/s4_design_sim_tool/foregrounds.py | 0.85744 | 0.355831 | foregrounds.py | pypi |
__all__ = [
"md5sum_string",
"md5sum_file",
"parse_channels",
"s4_channels",
"merge_dict",
"parse_config",
"S4RefSimTool",
"command_line_script",
]
# Cell
import os
import toml
import healpy as hp
import numpy as np
import h5py
from pathlib import Path
import logging as log
from datetime import date
from .core import get_telescope
from s4_design_sim_tool import __version__
from .foregrounds import load_sky_emission
from .atmosphere import load_atmosphere, get_telecope_years
from .noise import load_noise
from .hitmap_wcov import load_hitmap_wcov
# Cell
import hashlib
def md5sum_string(string):
return hashlib.md5(string.encode("utf-8")).hexdigest()
def md5sum_file(filename):
"""Compute md5 checksum of the contents of a file"""
return md5sum_string(open(filename, "r").read())
# Cell
s4_channels = {
"LAT": [
"ULFPL1",
"LFL1",
"LFPL1",
"LFL2",
"LFPL2",
"MFPL1",
"MFL1",
"MFL2",
"MFPL2",
"HFL1",
"HFPL1",
"HFL2",
"HFPL2",
],
"SAT": ["LFS1", "LFS2", "MFLS1", "MFHS1", "MFLS2", "MFHS2", "HFS1", "HFS2"],
}
def parse_channels(channels):
"""Parse a comma separated list of channels or all or SAT/LAT into channel tag list"""
if channels in ["SAT", "LAT"]:
channels = s4_channels[channels]
elif channels in ["all", None]:
channels = s4_channels["SAT"] + s4_channels["LAT"]
elif isinstance(channels, str):
channels = channels.split(",")
return channels
# Cell
import collections
def merge_dict(d1, d2):
"""
Modifies d1 in-place to contain values from d2. If any value
in d1 is a dictionary (or dict-like), *and* the corresponding
value in d2 is also a dictionary, then merge them in-place.
"""
for k, v2 in d2.items():
v1 = d1.get(k) # returns None if v1 has no value for this key
if isinstance(v1, collections.Mapping) and isinstance(v2, collections.Mapping):
merge_dict(v1, v2)
else:
d1[k] = v2
def parse_config(*config_files):
"""Parse TOML configuration files
Later TOML configuration files override the previous ones,
dictionaries at the same level are merged.
Parameters
----------
config_files : one or more str
paths to TOML configuration files
Returns
-------
config : dict
parsed dictionary"""
config = toml.load(config_files[0])
for conf in config_files[1:]:
merge_dict(config, toml.load(conf))
return config
# Cell
class S4RefSimTool:
def __init__(self, config_filename, output_folder="output"):
"""Simulate CMB-S4 maps based on the experiment configuration
Parameters
----------
config : str or Path or List
CMB-S4 configuration stored in a TOML file
see for example s4_design.toml in the repository
It also supports multiple TOML files as a List, in this case
later files override configuration files of the earlier files.
check the `config` attribute to verify that the parsing behaved
as expected.
output_folder : str or Path
Output path
"""
self.config_filename = (
[config_filename]
if isinstance(config_filename, (str, Path))
else config_filename
)
self.config = parse_config(*self.config_filename)
self.output_filename_template = "cmbs4_KCMB_{telescope}-{band}_{site}_nside{nside}_{split}_of_{nsplits}.fits"
self.output_folder = Path(output_folder)
self.output_folder.mkdir(parents=True, exist_ok=True)
def run(self, channels="all", sites=["Pole", "Chile"]):
"""Run the simulation
Parameters
----------
channels : str or list[str]
list of channel tags, e.g.
* ["LFS1", "LFS2"] or
* "SAT" or "LAT"
* "all" (default)
site : list[str]
['Pole'] or ['Chile'], by default ["Pole", "Chile"]
"""
nsplits = self.config["experiment"].get("number_of_splits", 0)
if nsplits == 1:
nsplits = 0
assert (
nsplits < 8
), "We currently only have 7 independent realizations of atmosphere and noise"
conf_md5 = ",".join(map(md5sum_file, self.config_filename))
for site in sites:
for channel in parse_channels(channels):
if get_telecope_years(self.config, site, channel) == 0:
continue
telescope = get_telescope(channel)
subfolder = self.output_folder / f"{telescope}-{channel}_{site.lower()}"
subfolder.mkdir(parents=True, exist_ok=True)
log.info("Created output folder %s", str(subfolder))
for split in range(nsplits + 1):
nside = 512 if telescope == "SAT" else 4096
output_filename = self.output_filename_template.format(
nside=nside,
telescope=telescope,
band=channel,
site=site.lower(),
split=max(1, split), # split=0 is full mission and we want 1
nsplits=1 if split == 0 else nsplits,
)
if os.path.exists(subfolder / output_filename):
log.info("File %s already exists, SKIP", output_filename)
continue
if split == 0:
log.info(f"Simulate channel {channel} at {site}")
sky_emission = load_sky_emission(
self.config["sky_emission"], site, channel
)
output_map = np.zeros_like(sky_emission)
if self.config["experiment"].get("include_atmosphere", True):
output_map += load_atmosphere(
self.config, site, channel, realization=split
)
else:
log.info("Skip the atmosphere noise")
if self.config["experiment"].get("include_noise", True):
output_map += load_noise(
self.config, site, channel, realization=split
)
else:
log.info("Skip the instrument noise")
if split > 0:
output_map *= np.sqrt(nsplits)
output_map += sky_emission
# Use UNSEEN instead of nan for missing pixels
output_map[np.isnan(output_map)] = hp.UNSEEN
log.info(f"Writing {output_filename}")
noise_version = "1.0"
hp.write_map(
subfolder / output_filename,
output_map,
column_units="K_CMB",
extra_header=[
("SOFTWARE", "s4_design_sim_tool"),
("SW_VERS", __version__),
("SKY_VERS", "1.0"),
("ATM_VERS", "1.0"),
("NOI_VERS", noise_version),
("SITE", site),
("SPLIT", split),
("NSPLITS", nsplits),
("CHANNEL", channel),
("DATE", str(date.today())),
("CONFMD5", conf_md5),
],
coord="Q",
overwrite=True,
)
# only run of full mission and the first split
if split in [0, 1] and self.config["experiment"].get(
"include_noise", True
):
log.info(f"Loading hitmap and white noise covariance matrix")
if split == 0:
hitmap, wcov = load_hitmap_wcov(
self.config, site, channel, realization=0
)
else:
hitmap = np.round(hitmap / nsplits).astype(np.int64)
wcov = hp.ma(wcov) * nsplits
hitmap_filename = output_filename.replace("KCMB", "hitmap")
log.info(f"Writing {hitmap_filename}")
hp.write_map(
subfolder / hitmap_filename,
hitmap,
column_units="hits",
extra_header=[
("SOFTWARE", "s4_design_sim_tool"),
("SW_VERS", __version__),
("NOI_VERS", noise_version),
("SITE", site),
("SPLIT", split),
("NSPLITS", nsplits),
("CHANNEL", channel),
("DATE", str(date.today())),
("CONFMD5", conf_md5),
],
coord="Q",
overwrite=True,
)
wcov_filename = output_filename.replace("KCMB", "wcov")
log.info(f"Writing {wcov_filename}")
hp.write_map(
subfolder / wcov_filename,
wcov,
column_units="K_CMB**2",
extra_header=[
("SOFTWARE", "s4_design_sim_tool"),
("SW_VERS", __version__),
("NOI_VERS", noise_version),
("SITE", site),
("SPLIT", split),
("NSPLITS", nsplits),
("CHANNEL", channel),
("DATE", str(date.today())),
("CONFMD5", conf_md5),
],
coord="Q",
overwrite=True,
)
if split == 1:
del hitmap, wcov
# Cell
def command_line_script(args=None):
import logging as log
log.basicConfig(level=log.INFO)
import argparse
parser = argparse.ArgumentParser(description="Run s4_design_sim_tool")
parser.add_argument("config", type=str, nargs="*", help="TOML Configuration files")
parser.add_argument(
"--channels",
type=str,
help="Channels e.g. all, SAT, LAT, LFL1 or comma separated list of channels",
required=False,
default="all",
)
parser.add_argument(
"--site",
type=str,
help="Pole, Chile or all, default all",
required=False,
default="all",
)
parser.add_argument(
"--output_folder",
type=str,
help="Output folder, optional",
required=False,
default="output",
)
res = parser.parse_args(args)
if res.site == "all":
sites = ["Chile", "Pole"]
else:
sites = [res.site]
sim = S4RefSimTool(res.config, output_folder=res.output_folder)
sim.run(channels=res.channels, sites=sites) | /s4_design_sim_tool-1.1.2-py3-none-any.whl/s4_design_sim_tool/cli.py | 0.792103 | 0.326822 | cli.py | pypi |
try:
from cStringIO import StringIO as BytesIO
except ImportError: # pragma: no cover
# Python 3
from io import BytesIO
import PIL.Image
import PIL.ImageChops
import PIL.ImageFile
# Set a larger buffer size. This fixes problems with jpeg decoding.
# See http://mail.python.org/pipermail/image-sig/1999-August/000816.html for
# details.
PIL.ImageFile.MAXBLOCK = 1000000
def scale_image(image, width=None, height=None, crop=False,
strip_whitespace=False):
"""Scale the given image data to another size and return the result
as a string or optionally write in to the file-like `result` object.
:param file image: open file for image to scale
:param int width: desired maximum image width
:param int height: desired maximum image width
:param bool crop: allow cropping image to fill full width and height
:param bool strip_whitespace: crop surrounding whitespace before
processing the image
The return value is a tuple with the new image, the image format and
a size-tuple.
The `width`, `height`, `direction` parameters will be passed to
:meth:`scale_pil_image`, which performs the actual scaling.
"""
image = PIL.Image.open(image)
# When we create a new image during scaling we loose the format
# information, so remember it here.
format = image.format
if format == 'GIF':
format = 'PNG'
elif format != 'PNG':
format = 'JPEG'
if strip_whitespace:
image = crop_surrounding_whitespace(image)
image = scale_pil_image(image, width, height, crop)
output = BytesIO()
image.save(output, format, optimize=True)
return (output.getvalue(), format, image.size)
def correct_colour_mode(image):
"""Make sure an image uses a colour handling scheme which allows for
high quality scaling and can be rendered by web browsers.
"""
if image.mode == '1':
# Convert black&white to grayscale
image = image.convert("L")
elif image.mode == 'P':
# Convert palette based images to 3x8bit+alpha
image = image.convert('RGBA')
elif image.mode == 'CMYK':
# Convert CMYK to RGB since web browser can not handle CMYK
image = image.convert('RGB')
return image
def crop_surrounding_whitespace(image):
"""Remove surrounding empty space around an image.
This implemenation assumes that the surrounding space has the same colour
as the top leftmost pixel.
:param image: PIL image
:rtype: PIL image
"""
bg = PIL.Image.new(image.mode, image.size, image.getpixel((0, 0)))
diff = PIL.ImageChops.difference(image, bg)
bbox = diff.getbbox()
if not bbox:
return image
# XXX: if we want to crop the final image we should try to match the
# desired aspect ratio here to make sure the last step will not
# crop out real image data.
return image.crop(bbox)
def center_crop(image, width, height):
"""Crop an image to the desired width and height. The crop is
made from the middle of the image.
:param image: PIL image
:param int width: maximum width, or None of width is unrestrained
:param int height: maximum height, or None of height is unrestrained
:rtype: PIL image
"""
(current_width, current_height) = image.size
if width is not None and current_width > width:
left = int((current_width - width) / 2.0)
right = left + width
else:
left = 0
right = current_width
if height is not None and current_height > height:
top = int((current_height - height) / 2.0)
bottom = top + height
else:
top = 0
bottom = current_height
return image.crop((left, top, right, bottom))
def scale_pil_image(image, width=None, height=None, crop=False):
"""Scale a PIL image to another size.
:param image: PIL Image instance
:param int width: desired maximum image width
:param int height: desired maximum image width
:param bool crop: allow cropping image to fill full width and height
:rtype: PIL Image
The generated image is a JPEG image, unless the original is a GIF or PNG
image. This is needed to make sure alpha channel information is not lost,
which JPEG does not support.
"""
if width is None and height is None:
raise ValueError('Either width or height need to be given')
image = correct_colour_mode(image)
(current_width, current_height) = image.size
scale_height = (float(height) / float(current_height)) \
if height is not None else None
scale_width = (float(width) / float(current_width)) \
if width is not None else None
if scale_height == scale_width:
# The original already has the right aspect ratio, so use a fast
# thumbnail to scale.
image.thumbnail((width, height), PIL.Image.ANTIALIAS)
return image
if crop:
scale = max(scale_width, scale_height)
else:
scale = min(filter(None, [scale_width, scale_height]))
# Skip scale multiplication if possible to prevent off-by-one errors
new_width = width if scale == scale_width \
else int(current_width * scale)
new_height = height if scale == scale_height \
else int(current_height * scale)
image.draft(image.mode, (new_width, new_height))
image = image.resize((new_width, new_height), PIL.Image.ANTIALIAS)
if not crop:
return image
return center_crop(image, width, height) | /s4u.image-2.4.0.tar.gz/s4u.image-2.4.0/src/s4u/image/scale.py | 0.918966 | 0.565719 | scale.py | pypi |
try:
from cStringIO import StringIO as BytesIO
except ImportError: # pragma: no cover
# Python 3
from io import BytesIO
import errno
import io
import os
import uuid
import requests
from sqlalchemy import schema
from sqlalchemy import types
from sqlalchemy.orm.session import object_session
from repoze.filesafe import create_file
from repoze.filesafe import open_file
from repoze.filesafe import delete_file
from pyramid_sqlalchemy import BaseObject
from s4u.image.util import extension_for_image_data
from s4u.image.scale import scale_image
def generate_path(extension):
"""Generate a filename within the image storage. The is based on a
random string and the given extension. A three-level directory
structure is used.
"""
filename = '%s%s' % (uuid.uuid4(), extension)
filename = os.path.join(filename[0], filename[1:3], filename)
return filename
class Image(BaseObject):
"""A source image.
"""
root_path = None
__tablename__ = 'image'
id = schema.Column(types.Integer(),
schema.Sequence('image_id_seq', optional=True),
primary_key=True, autoincrement=True)
#: Relative filesystem path for the image
path = schema.Column(types.String(128), unique=True)
#: URL for (canonical) image.
url = schema.Column(types.Text())
def __init__(self, data=None, filename=None, url=None):
if not (data is not None or url is not None):
raise ValueError('You must provide at least one of data or url')
self.url = url
if data is not None:
self._set_content(data, filename)
@property
def filesystem_path(self):
"""Return the (absolute) filesystem path for the image data."""
if self.root_path is None:
raise AttributeError('root_path not set')
if not self.path:
return None
return os.path.join(self.root_path, self.path)
def download(self, timeout=0.5): # pragma: no cover
"""Download a remote image so we have a local copy.
"""
if self.path is not None:
raise TypeError('Image already has local data.')
r = requests.get(self.url, timeout=timeout)
extension = extension_for_image_data(BytesIO(r.content))
self.path = generate_path(extension)
file = create_file(self.filesystem_path, 'wb')
if hasattr(file, 'fileno'): # pragma: no cover (for testing only)
try:
os.fchmod(file.fileno(), 0o644)
except io.UnsupportedOperation: # BytesIO for testing
pass
file.write(r.content)
file.close()
def scale(self, width=None, height=None, crop=False,
strip_whitespace=False):
"""Return a scaled version of this image. If a matching
:py:class:`ImageScale` is found it is returned directly. Otherwise the
image will be scaled and a new ImageScale instance is created.
See :py:func:`scale_image <s4u.image.scale.scale_image>` for more
information on the scaling parameters.
:rtype: :py:class:`ImageScale`
"""
if not self.path:
raise TypeError('Can not scale an image that is not stored locally.')
if not (width or height):
raise ValueError('You must specify either width or height')
session = object_session(self)
scale = session.query(ImageScale)\
.filter(ImageScale.image_id == self.id)\
.filter(ImageScale.param_width == (width or 0))\
.filter(ImageScale.param_height == (height or 0))\
.filter(ImageScale.param_crop == crop)\
.filter(ImageScale.param_strip_whitespace == strip_whitespace)\
.first()
if scale is None:
scale = ImageScale(self, width, height, crop, strip_whitespace)
session.add(scale)
return scale
def delete(self):
"""Delete this image including all scales from the database and disk.
"""
self._delete_scales()
try:
delete_file(self.filesystem_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
object_session(self).delete(self)
def replace(self, data, filename=None):
"""Replace the image contents.
This method should be used to modify images. This method will
delete old old image scales and switch to a new filename and
URL.
:param str data: Raw image data.
:param str filename: Image filename (optional)
"""
self._delete_scales()
try:
delete_file(self.filesystem_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
self._set_content(data, filename)
def _set_content(self, data, filename=None):
extension = None
if filename:
extension = os.path.splitext(filename)[1]
if not extension:
extension = extension_for_image_data(BytesIO(data))
self.path = generate_path(extension)
file = create_file(self.filesystem_path, 'wb')
if hasattr(file, 'fileno'): # pragma: no cover (for testing only)
try:
os.fchmod(file.fileno(), 0o644)
except io.UnsupportedOperation: # BytesIO for testing
pass
file.write(data)
file.close()
def _delete_scales(self):
"""Remove all existing image scales.
"""
session = object_session(self)
scales = session.query(ImageScale).filter(ImageScale.image_id == self.id)
for scale in scales:
try:
delete_file(scale.filesystem_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
scales.delete()
class ImageScale(BaseObject):
"""A scaled version of image. Each :obj:`Image` can have many different
scaled versions. The final dimensions of the image are stored to allow
efficient creation of HTML ``img`` tags.
"""
root_path = None
__tablename__ = 'image_scale'
__table_args__ = (
schema.UniqueConstraint('image_id',
'param_width', 'param_height',
'param_crop', 'param_strip_whitespace'),
schema.CheckConstraint('param_width>0 or param_height>0'),
{})
id = schema.Column(types.Integer(),
schema.Sequence('image_scale_id_seq', optional=True),
primary_key=True, autoincrement=True)
path = schema.Column(types.String(128), nullable=False, unique=True)
image_id = schema.Column(types.Integer(),
schema.ForeignKey(Image.id,
ondelete='CASCADE', onupdate='CASCADE'),
nullable=False)
param_width = schema.Column(types.Integer(), nullable=False)
param_height = schema.Column(types.Integer(), nullable=False)
param_crop = schema.Column(types.Boolean(), nullable=False)
param_strip_whitespace = schema.Column(types.Boolean(), nullable=False)
#: The width in pixels of the scaled image.
width = schema.Column(types.Integer(), nullable=False)
#: The heighy in pixels of the scaled image.
height = schema.Column(types.Integer(), nullable=False)
def __init__(self, image, width=None, height=None, crop=False,
strip_whitespace=False):
self.image_id = image.id
self.param_width = width or 0
self.param_height = height or 0
self.param_crop = crop
self.param_strip_whitespace = strip_whitespace
file = open_file(image.filesystem_path, 'rb')
(data, format, size) = scale_image(file, width, height, crop,
strip_whitespace)
self.path = generate_path('.' + format.lower())
self.width = size[0]
self.height = size[1]
file = create_file(self.filesystem_path, 'wb')
if hasattr(file, 'fileno'): # pragma: no cover (for testing only)
try:
os.fchmod(file.fileno(), 0o644)
except io.UnsupportedOperation: # BytesIO for testing
pass
file.write(data)
file.close()
@property
def filesystem_path(self):
"""Return the (absolute) filesystem path for the image data."""
if self.root_path is None:
raise AttributeError('root_path not set')
return os.path.join(self.root_path, self.path)
def _delete(self):
"""Remove image scale from database and filesystem.
"""
try:
delete_file(self.filesystem_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
object_session(self).delete(self)
schema.Index('image_scale_parameters',
ImageScale.image_id,
ImageScale.param_width,
ImageScale.param_height,
ImageScale.param_crop,
ImageScale.param_strip_whitespace)
__all__ = ['Image', 'ImageScale'] | /s4u.image-2.4.0.tar.gz/s4u.image-2.4.0/src/s4u/image/model.py | 0.712432 | 0.240953 | model.py | pypi |
import os
from s4u.image.model import Image
from s4u.image.model import ImageScale
def configure(original_path, scale_path):
"""Configure the filesystem paths used to store image files.
:param original_path: filesystem path used for full size images.
:param scale_path: filesystem path used for scaled images.
"""
Image.root_path = original_path
ImageScale.root_path = scale_path
def includeme(config):
"""Configure s4u.image using a Pyramid :py:class:`Configurator
<pyramid.config.Configurator>` object. This will take the filesystem
paths from the application settings using the keys
``fs.images.original`` and ``fs.images.scaled``.
"""
settings = config.registry.settings
configure(settings['fs.images.original'],
settings['fs.images.scaled'])
for key in ['original', 'scaled']:
base_url = settings.get('fs.images.%s.url' % key)
if base_url:
config.add_static_view(base_url,
settings['fs.images.%s' % key])
else:
config.add_static_view('image-%s' % key,
settings['fs.images.%s' % key],
cache_max_age=86400 * 31)
def initialise_filesystem():
"""Create required filesystem.
This function requires that s4u.image is already configured.
It is safe to call this function multiple times: it will notice if a
directory already exists.
"""
_create_directories(Image.root_path)
_create_directories(ImageScale.root_path)
def _create_directories(root_path):
if not os.path.isdir(root_path):
raise RuntimeError('%s is not a valid image root path' % root_path)
for top in range(16):
top_path = os.path.join(root_path, '%x' % top)
if not os.path.exists(top_path):
os.mkdir(top_path)
for sub in range(256):
sub_path = os.path.join(top_path, '%02x' % sub)
if not os.path.exists(sub_path):
os.mkdir(sub_path) | /s4u.image-2.4.0.tar.gz/s4u.image-2.4.0/src/s4u/image/__init__.py | 0.549641 | 0.237609 | __init__.py | pypi |
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import Numeric
class greatest(expression.FunctionElement):
"""SQL function to return the largest number of a series.
.. code-block:: python
>>> session = meta.Session()
>>> session.query(greatest(1, 3)).scalar()
3
>>> session.query(greatest(9, 5, 2, 4)).scalar()
9
This function is only supported on databases which support GREATEST
(such as PostgreSQL) or MAX (such as SQLite).
"""
type = Numeric()
name = 'greatest'
@compiles(greatest)
def default_greatest(element, compiler, **kw):
return compiler.visit_function(element)
@compiles(greatest, 'sqlite')
def max_greatest(element, compiler, **kw):
values = [compiler.process(a) for a in element.clauses]
return 'MAX(%s)' % ', '.join(values)
class least(expression.FunctionElement):
"""SQL function to return the smallest number of a series.
.. code-block:: python
>>> session = meta.Session()
>>> session.query(least(1, 3)).scalar()
1
>>> session.query(greatest(9, 5, 2, 4)).scalar()
2
This function is only supported on databases which support LEAST
(such as PostgreSQL) or MIN (such as SQLite).
"""
type = Numeric()
name = 'least'
@compiles(least)
def default_least(element, compiler, **kw):
return compiler.visit_function(element)
@compiles(least, 'sqlite')
def min_least(element, compiler, **kw):
values = [compiler.process(a) for a in element.clauses]
return 'MIN(%s)' % ', '.join(values)
class day_difference(expression.FunctionElement):
"""Determine the difference in (whole) days between two dates.
.. code-block:: python
>>> session = meta.Session()
>>> today = datetime.date.today()
>>> session.query(day_different(current_timestamp(), today))
15
"""
type = Numeric()
name = 'day_difference'
@compiles(day_difference)
def default_day_difference(element, compiler, **kw):
if len(element.clauses) != 2:
raise ValueError('default_day_differences takes two parameters')
values = [compiler.process(a) for a in element.clauses]
return 'EXTRACT(day FROM %s - %s)' % tuple(values)
@compiles(day_difference, 'sqlite')
def sqlite_day_difference(element, compiler, **kw):
"""SQLite always returns 0 if you try to try to do math with dates.
The standard trick is to convert to julian dates first.
"""
if len(element.clauses) != 2:
raise ValueError('default_day_differences takes two parameters')
values = [compiler.process(a) for a in element.clauses]
return 'CAST(julianday(%s) - julianday(%s) AS INT)' % tuple(values)
__all__ = ['greatest', 'least', 'day_difference'] | /s4u.sqlalchemy-4.0.tar.gz/s4u.sqlalchemy-4.0/src/s4u/sqlalchemy/func.py | 0.814385 | 0.695771 | func.py | pypi |
from .gui.helpers import send_all
from .gui.helpers import close
from .gui.neighborhood_list import get_earfcn_values
from .gui.scripts import get_ip_script_output
import pathlib
import subprocess
import logging
import dearpygui.dearpygui as dpg
def main():
""" Creation of main function that is ran when program is called as well as required global variables. """
corrected_output = get_ip_script_output()
ROOT = pathlib.Path(__file__).resolve().parent.parent
file = ROOT / "shell_scripts" / "localbandchange.sh"
ADJUSTMENT = 110
re_pos = 40
picture_file = ROOT / "images" / "ca_logo.png"
# Initialize the logger for the main file
logger = logging.getLogger(name=__name__)
# Create the dpg context allowing the program to work
dpg.create_context()
# Create the button theme colors for the GUI buttons as well as the correct parameters for the Cell Antenna Logo.
with dpg.theme() as grn_btn_theme:
with dpg.theme_component(dpg.mvAll):
dpg.add_theme_color(dpg.mvThemeCol_Button,
(0, 55, 0, 255)) # GREEN
with dpg.theme() as red_btn_theme:
with dpg.theme_component(dpg.mvAll):
dpg.add_theme_color(dpg.mvThemeCol_Button, (255, 0, 0, 255)) # RED
# Addition of the Cell Antenna Logo to the program
width, height, channels, data = dpg.load_image(f"{picture_file}")
with dpg.texture_registry(show=True):
dpg.add_static_texture(
width=width,
height=height,
default_value=data,
tag="texture_tag"
)
def lookup_table(band_number: str) -> tuple[str, str, str, str]:
""" Create A Lookup table that will use structured pattern matching in order to match the tag of a button
number and assign corresponding variable values that will be passed into a function. """
logger.info(f"SENDER: {band_number}")
match band_number:
case 1:
return (str(18300), str(300), str(1), str(20))
case 2:
return (str(18900), str(900), str(2), str(20))
case 3:
return (str(19575), str(1575), str(3), str(20))
case 4:
return (str(20175), str(2175), str(4), str(20))
case 5:
return (str(20525), str(2525), str(5), str(10))
case 7:
return (str(21100), str(3100), str(7), str(20))
case 8:
return (str(21625), str(3625), str(8), str(10))
case 10:
return (str(22450), str(4450), str(10), str(20))
case 11:
return (str(22850), str(4850), str(11), str(10))
case 12:
return (str(23095), str(5095), str(12), str(10))
case 13:
return (str(23230), str(5230), str(13), str(10))
case 14:
return (str(23300), str(5330), str(14), str(10))
case 17:
return (str(23790), str(5790), str(17), str(10))
case 20:
return (str(24300), str(6300), str(20), str(20))
case 21:
return (str(24525), str(6525), str(21), str(15))
case 25:
return (str(26365), str(8365), str(25), str(20))
case 26:
return (str(26865), str(8865), str(26), str(15))
case 28:
return (str(27435), str(9435), str(28), str(20))
case 30:
return (str(27710), str(9820), str(30), str(10))
case 66:
return (str(132322), str(66886), str(66), str(20))
case 71:
return (str(133297), str(68761), str(71), str(20))
case _:
logger.info(f"\nSENDER: {type(band_number)}\n")
raise ValueError("Incorrect Band Number Passed")
def my_function(sender):
"""Creation of a function that takes in the button Tag as an input and based on that tag input, it matches it to the corresponding values required for that band number to properly display.
That information is passed along to the subprocess command that runs commands in the terminal from python."""
ULEARFCN, DLEARFCN, BAND, BAND_WIDTH = lookup_table(sender)
command = f"{file} {ULEARFCN} {DLEARFCN} {BAND} {BAND_WIDTH} {corrected_output}"
(subprocess.run([command], shell=True))
def button_callback(sender, app_data, user_data):
""" Creation of the callback function that gets activated whenever a button is pressed. """
logger.info(f"\nSENDER: {sender}\n")
my_function(int(sender))
# Creation of the Dear Py Gui Window
with dpg.window(label="SDR GUI", no_scrollbar=True, no_collapse=True, height=735, width=550, no_close=True, no_move=True, tag="SDR_GUI", no_resize=True) as window:
# Adds the cell antenna logo image.
dpg.add_image("texture_tag", pos=(230, 25))
for i in range(14):
"""Uses a for loop and structured pattern matching to create the list of buttons on the left side."""
match i:
case 1:
dpg.add_button(label=f"Band {i} (20 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
0, 20 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 2:
dpg.add_button(label=f"Band {i} (20 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
0, 55 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 3:
dpg.add_button(label=f"Band {i} (20 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
0, 90 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 4:
dpg.add_button(label=f"Band {i} (20 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
0, 125 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 5:
dpg.add_button(label=f"Band {i} (10 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
0, 160 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 7:
dpg.add_button(label=f"Band {i} (20 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
0, 195 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 8:
dpg.add_button(label=f"Band {i} (10 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
0, 230 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 10:
dpg.add_button(label=f"Band {i} (20 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
0, 265 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 11:
dpg.add_button(label=f"Band {i} (10 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
0, 300 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 12:
dpg.add_button(label=f"Band {i} (10 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
0, 335 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 13:
dpg.add_button(label=f"Band {i} (10 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
0, 370 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case _:
continue
for i in range(14, 72):
""" Uses a for loop and structured pattern matching to create the list of buttons on the right side. """
match i:
case 14:
dpg.add_button(label=f"Band {i} (10 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
300, 20 + ADJUSTMENT), width=250,)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 17:
dpg.add_button(label=f"Band {i} (10 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
300, 55 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 20:
dpg.add_button(label=f"Band {i} (20 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
300, 90 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 21:
dpg.add_button(label=f"Band {i} (15 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
300, 125 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 25:
dpg.add_button(label=f"Band {i} (20 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
300, 160 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 26:
dpg.add_button(label=f"Band {i} (15 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
300, 195 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 28:
dpg.add_button(label=f"Band {i} (20 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
300, 230 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 30:
dpg.add_button(label=f"Band {i} (10 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
300, 265 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 66:
dpg.add_button(label=f"Band {i} (20 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
300, 300 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case 71:
dpg.add_button(label=f"Band {i} (20 MHZ Bandwidth)", callback=button_callback, tag=f"{i}", pos=(
300, 335 + ADJUSTMENT), width=250)
dpg.bind_item_theme(theme=grn_btn_theme, item=f"{i}")
case _:
continue
# Addition of Custom Input Field
with dpg.child_window(pos=(5, 575 - re_pos), height=25, width=450, tag="Custom_Input_Fields", no_scrollbar=True, border=False):
dpg.add_text(" ULEARFCN DLEARFCN BAND B.W")
with dpg.child_window(pos=(5, 550 - re_pos), height=25, width=300, tag="Custom_Input_Title", no_scrollbar=True, border=False):
dpg.add_text("Enter in Your Custom Values Below:")
with dpg.child_window(pos=(5, 625 - re_pos), height=25, width=550, tag="Custom_Freq_Input_Title", no_scrollbar=True, border=False):
dpg.add_text(
"Enter a Frequency Value Below: Must be close to the Middle Freq. of the Band")
with dpg.child_window(pos=(5, 675 - re_pos), height=40, width=550, no_scrollbar=True, border=False, tag="Display_EARFCN_Values"):
dpg.add_text("The corresponding EARFCN values can be found below along with the Band Number.\nYou may input them into the Custom Values Field, along with your desired \nbandwidth.")
dpg.add_input_text(tag="Single_Band_Input", pos=(
0, 650 - re_pos), width=75, no_spaces=True)
dpg.add_input_text(tag="ULEARFCN_VALUE", pos=(
0, 600 - re_pos), width=75, no_spaces=True)
dpg.add_input_text(tag="DLEARFCN_VALUE", pos=(
100, 600 - re_pos), width=75, no_spaces=True)
dpg.add_input_text(tag="BAND_VALUE", pos=(
200, 600 - re_pos), width=75, no_spaces=True)
dpg.add_input_text(tag="BW_VALUE", pos=(
300, 600 - re_pos), width=75, no_spaces=True)
# Creation of Buttons used to send all the custom input parameters as well as the frequency converter
dpg.add_button(label="Send All", callback=send_all,
pos=(400, 600 - re_pos), tag="Send_All")
dpg.bind_item_theme(theme=grn_btn_theme, item="Send_All")
dpg.add_button(label="Send", callback=get_earfcn_values,
pos=(100, 650 - re_pos), tag="send_band")
dpg.bind_item_theme(theme=grn_btn_theme, item="send_band")
# Creation of the Popup Window that is initially hidden in the program
with dpg.child_window(pos=(50, 367), show=False, tag="popup_window", height=150, width=450, no_scrollbar=True):
dpg.add_text(" Error: You have entered an improper value! \n This Value may not be in the range of supported frequencies. \n The band numbers present on the Green Buttons display \n the full range and capability of this device. \n Your chosen frequency must fit within those bands, and \n should be a value close to the middle frequency of that band. \n Or you have an invalid character in your input!\n Or the value you entered is not a Whole Number!")
dpg.add_button(show=True, tag="popup_window_button", height=25,
width=45, label="Close", callback=close, pos=(195, 120))
dpg.bind_item_theme(theme=red_btn_theme,
item="popup_window_button")
with dpg.child_window(pos=(440, 700), height=35, width=110, no_scrollbar=True, border=False):
dpg.add_text("Version: 0.2.3")
# The required DPG (dearpygui) commands to run and close the program
dpg.create_viewport(title='SDR GUI', width=550, height=735)
dpg.setup_dearpygui()
dpg.show_viewport()
dpg.start_dearpygui()
dpg.destroy_context()
if __name__ == "__main__":
""" Runs the main function. """
main() | /s60z-0.2.5.tar.gz/s60z-0.2.5/src/main.py | 0.522202 | 0.193014 | main.py | pypi |
## sAsync
*SQLAlchemy made asynchronous.*
* [API Docs](http://edsuom.com/sAsync/sasync.html)
* [PyPI Page](https://pypi.org/project/sAsync/)
* [Project Page](http://edsuom.com/sAsync.html) at **edsuom.com**
* *See also* [AsynQueue](http://edsuom.com/AsynQueue.html)
**sAsync** lets you use the use the database transaction core of the
[SQLAlchemy](http://www.sqlalchemy.org/) Python SQL toolkit in an
asynchronous fashion. In addition to an access broker for conveniently
managing database access, table setup, and transactions under the
[Twisted](http://twistedmatrix.com) framework, it provides persistent
item-value stores and arrays.
The **sAsync** package uses a threaded task queue from the
[AsynQueue](http://edsuom.com/AsynQueue.html) package to wrap your
SQLAlchemy database access code inside asynchronous transactions. At
the lowest level, it provides a
[@transact](http://edsuom.com/sAsync/sasync.database.html#transaction)
decorator for your database-access methods that makes them immediately
return a Twisted *Deferred* object.
For example, suppose you want to run a method that selects a list of
row objects from a table. Instead of waiting around for your method to
return the list, blocking everything else your program is trying to
do, you decorate it with *@transact* and run it. It immediately hands
you a *Deferred* object that promises an eventual result. You add one
or more callback functions to the *Deferred* and *Twisted* will call
them when the database transaction is finally run from the queue.
Once you've attached your callback function to the deferred result,
you can go on with your business, knowing that SQLAlchemy will be
cranking away behind the scenes (in a transaction-specific thread) to
obtain a result for you. When the result is finally ready, your
transact-decorated method will look at the *Deferred*, see the note
you scribbled on it (*Pls call this function with the result. Thx!*),
and give your function a call with the list of rows. It will supply
the callback with the list as the function's argument.
Actually, it's better than that. Unless you specify otherwise, the
*Deferred* object will iterate the rows of your database query in an
asynchronous, Twisted fashion. The result will be an instance of
AsynQueue's
[Deferator](http://edsuom.com/AsynQueue/asynqueue.iteration.Deferator.html),
which spits out baby *Deferred* objects as it iterates over your
result. If you use the *consumer* keyword to supply an object
implementing Twisted's *IConsumer* interface, the result rows will be
"produced" to it instead.
You can also do some asynchronous database operations on a higher
level. For example, you can maintain a store of Python objects, with
each object accessible (with deferred results) via a unique key. The
[items.py](http://edsuom.com/sAsync/items.py.html) source file
provides a nice simple example of how to use sAsync.
### Table Setup
Relational databases operate on tables, and setting up the tables in
your database can be a huge pain in the ass. But the job is easier
with sAsync's
[AccessBroker.table](http://edsuom.com/sAsync/sasync.database.AccessBroker.html#table)
method. It creates the table you specify if none exists yet and sets
up indices for it, as you direct it. Once the table is in your
database, you can reference it as an attribute of your *AccessBroker*
object.
The sensible place to do table setup is at the very beginning, before
any database transactions get underway. You can define a *startup*
method of your *AccessBroker*, and it will get run when the object is
constructed.
Here's the *startup* method for the database access broker in my
logfile-to-database parsing application *statalysis*:
```python
@defer.inlineCallbacks
def startup(self):
# Primary key is an auto-incrementing index, which can be used
# to find out the order in which requests were made within a
# single second.
yield self.table(
'entries',
SA.Column('id', SA.Integer, primary_key=True),
SA.Column('dt', SA.DateTime),
SA.Column('ip', SA.String(15)),
SA.Column('http', SA.SmallInteger),
SA.Column('was_rd', SA.Boolean),
SA.Column('id_vhost', SA.Integer),
SA.Column('id_url', SA.Integer),
SA.Column('id_ref', SA.Integer),
SA.Column('id_ua', SA.Integer),
index_dt=['dt'], index_ip=['ip']
)
for name in self.indexedValues:
yield self.table(
name,
SA.Column('id', SA.Integer, primary_key=True),
SA.Column('value', SA.String(self.valueLength)),
unique_value=['value']
)
yield self.table(
'bad_ip',
SA.Column('ip', SA.String(15), primary_key=True),
SA.Column('purged', SA.Boolean, nullable=False),
)
yield self.table(
'files',
SA.Column(
'name', SA.String(self.valueLength), primary_key=True),
SA.Column('dt', SA.DateTime),
SA.Column('records', SA.Integer),
)
self.pendingID = {}
self.dtk = DTK()
self.ipm = IPMatcher()
self.ipList = []
self.idTable = {}
for name in self.indexedValues:
self.idTable[name] = {}
```
### Deferred Results
Notice all the *yield* statements? Twisted's *inlineCallbacks*
capability makes it easy to use *Deferred* objects. You don't need to
add all those callbacks and a pile of callback functions one after the
other. Just decorate your method with *@defer.inlineCallbacks* (after
doing "*from twisted.internet import defer*," of course) and yield the
*Deferred* objects. Processing will resume in the method when the
*Deferred* fires.
In the *startup* method, there is nothing obtaining any deferred
results. The *Deferred* objects from each call to *self.table* are
just yielded for asynchronous program flow and that's that. Each one
fires when a table has been made (or checked), with no result except
the fact that setup work is done for that particular table.
With inline callbacks, you can get the value that a *Deferred* has
been fired with. Often there's important information there. Here's an
example of how that is done, in a method of the *statalysis* access
broker that actually accesses database content:
```python
@defer.inlineCallbacks
def setRecord(self, dt, record):
"""
Adds all needed database entries for the supplied record at the
specified datetime.
@return: A C{Deferred} that fires with a bool indicating if a
new entry was added.
"""
ip = record['ip']
if ip in self.ipList:
# Ignore this, it's from an already purged IP address
result = False
else:
self.ipm.addIP(ip)
# Build list of values and indexed-value IDs
values = [record[x] for x in self.directValues]
for name in self.indexedValues:
value = record[name][:self.valueLength]
if value in self.idTable[name]:
# We've set this value already
ID = self.idTable[name][value]
else:
ID = yield self.setNameValue(name, value, niceness=-15)
# Add to idTable for future reference, avoiding DB checks
self.idTable[name][value] = ID
values.append(ID)
# With this next line commented out and result = False
# instead, the memory leak still persists. CPU time for the
# main process was 66% of normal.
result = yield self.setEntry(dt, values)
defer.returnValue(result)
```
The ID value for an indexed value, when not found in the in-memory
*idTable*, is obtained from the transaction method *setNameValue*. It
returns a deferred result, and that is put into the local *ID*
variable via the inline callback.
### Transaction Methods
So, what does *setNameValue* look like? I thought you'd never ask.
```python
@transact
def setNameValue(self, name, value):
"""
Get the unique ID for this value in the named table, adding a new
entry for it there if necessary.
"""
table = getattr(self, name)
if not self.s("s_{}".format(name)):
self.s([table.c.id], table.c.value == SA.bindparam('value'))
ID = self.s().execute(value=value).scalar()
if ID is None:
rp = table.insert().execute(value=value)
ID = rp.lastrowid
rp.close()
return ID
```
Now we get to see a little sAsync magic. Look at that
[@transact](http://edsuom.com/sAsync/sasync.database.html#transact)
decorator. It makes the method into a transaction, running it via the
thread queue and with its own SQLAlchemy begin/commit setup for the
database connection. There's a lot going on with that little
decorator; take a look at the source for
[database.py](http://edsuom.com/sAsync/database.py.html) to get some
idea of what's under the hood.
Another bit of coolness in *setNameValue* is the polymorphic method
*s* (of
[sasync.database.Accessbroker](http://edsuom.com/sAsync/sasync.database.AccessBroker.html#s)). It
lets you compile and save an SQLAlchemy *select* object and then run
it with new parameters whenever you want.
### License
Copyright (C) 2006-2007, 2015 by Edwin A. Suominen
See [edsuom.com](http://edsuom.com) for API documentation as well as
information about Ed's background and other projects, software and
otherwise.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the
License. You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language
governing permissions and limitations under the License.
| /sAsync-0.9.3.tar.gz/sAsync-0.9.3/README.md | 0.552057 | 0.775435 | README.md | pypi |
# Imports
from twisted.internet import defer
import sqlalchemy as SA
from sasync.database import transact, AccessBroker
NICENESS_WRITE = 6
class Missing:
"""
An instance of me is returned as the value of a missing item.
"""
def __init__(self, group, name):
self.group, self.name = group, name
class Transactor(AccessBroker):
"""
I do the hands-on work of non-blocking database access for the
persistence of C{name:value} items within a uniquely-identified
group, e.g., for a persistent dictionary using
L{pdict.PersistentDict}.
My methods return Twisted C{Deferred} instances to the results of
their database accesses rather than forcing the client code to
block while the database access is being completed.
"""
def __init__(self, ID, *url, **kw):
"""
Instantiates me for the items of a particular group uniquely identified
by the supplied integer I{ID}, optionally using a particular database
connection to I{url} with any supplied keywords.
"""
if not isinstance(ID, int):
raise TypeError("Item IDs must be integers")
self.groupID = ID
if url:
AccessBroker.__init__(self, url[0], **kw)
else:
AccessBroker.__init__(self)
def startup(self):
"""
Startup method, automatically called before the first transaction.
"""
return self.table(
'sasync_items',
SA.Column('group_id', SA.Integer, primary_key=True),
SA.Column('name', SA.String(40), primary_key=True),
SA.Column('value', SA.PickleType, nullable=False)
)
@transact
def load(self, name):
"""
Item load transaction
"""
items = self.sasync_items
if not self.s('load'):
self.s(
[items.c.value],
SA.and_(items.c.group_id == self.groupID,
items.c.name == SA.bindparam('name')))
row = self.s().execute(name=name).fetchone()
if row:
return row['value']
return Missing(self.groupID, name)
@transact
def loadAll(self):
"""
Load all my items, returing a C{name:value} dict
"""
items = self.sasync_items
if not self.s('load_all'):
self.s(
[items.c.name, items.c.value],
items.c.group_id == self.groupID)
rows = self.s().execute().fetchall()
result = {}
for row in rows:
result[row['name']] = row['value']
return result
@transact
def update(self, name, value):
"""
Item overwrite (entry update) transaction
"""
items = self.sasync_items
u = items.update(
SA.and_(items.c.group_id == self.groupID,
items.c.name == name))
u.execute(value=value)
@transact
def insert(self, name, value):
"""
Item add (entry insert) transaction
"""
self.sasync_items.insert().execute(
group_id=self.groupID, name=name, value=value)
@transact
def delete(self, *names):
"""
Item(s) delete transaction
"""
items = self.sasync_items
self.sasync_items.delete(
SA.and_(items.c.group_id == self.groupID,
items.c.name.in_(names))).execute()
@transact
def names(self):
"""
All item names loading transaction
"""
items = self.sasync_items
if not self.s('names'):
self.s(
[items.c.name],
items.c.group_id == self.groupID)
return [str(x[0]) for x in self.s().execute().fetchall()]
class Items(object):
"""
I provide a public interface for non-blocking database access to
persistently stored name:value items within a uniquely identified
group, e.g., for a persistent dictionary using
L{pdict.PersistentDict}.
Before you use any instance of me, you must specify the parameters
for creating an C{SQLAlchemy} database engine. A single argument
is used, which specifies a connection to a database via an
RFC-1738 url. In addition, the keyword options can be
employed that are listed in the API docs for L{sasync}.
You can set an engine globally, for all instances of me, via the
L{sasync.engine} package-level function, or via the
L{queue.Factory.setGlobal} class method. Alternatively, you can
specify an engine for one particular instance by supplying the
parameters to my constructor.
"""
def __init__(self, ID, *url, **kw):
"""
Instantiates me for the items of a particular group uniquely
identified by the supplied hashable I{ID}.
In addition to any engine-specifying keywords supplied, the following
are particular to this constructor:
@param ID: A hashable object that is used as my unique identifier.
@keyword nameType: A C{type} object defining the type that each name
will be coerced to after being loaded as a string from the
database.
"""
try:
self.groupID = hash(ID)
except:
raise TypeError("Item IDs must be hashable")
self.nameType = kw.pop('nameType', str)
if url:
self.t = Transactor(self.groupID, url[0], **kw)
else:
self.t = Transactor(self.groupID)
for name in ('waitUntilRunning', 'callWhenRunning', 'shutdown'):
setattr(self, name, getattr(self.t, name))
def write(self, funcName, name, value, niceness=None):
"""
Performs a database write transaction, returning a deferred to its
completion.
"""
func = getattr(self.t, funcName)
if niceness is None:
niceness = NICENESS_WRITE
return self.callWhenRunning(func, name, value, niceness=niceness)
def load(self, name):
"""
Loads item I{name} from the database, returning a deferred to the
loaded value. A L{Missing} object represents the value of a missing
item.
"""
return self.callWhenRunning(self.t.load, name)
@defer.inlineCallbacks
def loadAll(self):
"""
Loads all items in my group from the database, returning a
deferred to a dict of the loaded values. The keys of the dict
are coerced to the type of my I{nameType} attribute.
"""
newDict = {}
yield self.waitUntilRunning()
valueDict = yield self.t.loadAll()
for name, value in valueDict.iteritems():
key = self.nameType(name)
newDict[key] = value
defer.returnValue(newDict)
def update(self, name, value):
"""
Updates the database entry for item I{name} = I{value}, returning a
deferred that fires when the transaction is done.
"""
return self.write('update', name, value)
def insert(self, name, value):
"""
Inserts a database entry for item I{name} = I{value}, returning a
deferred that fires when the transaction is done.
"""
return self.write('insert', name, value)
def delete(self, *names):
"""
Deletes the database entries for the items having the supplied
I{*names}, returning a deferred that fires when the transaction is
done.
"""
return self.t.delete(*names)
def names(self):
"""
Returns a deferred that fires with a list of the names of all items
currently defined in my group.
"""
def gotNames(names):
return [self.nameType(x) for x in names]
d = self.t.names()
d.addCallback(gotNames)
return d
__all__ = ['Missing', 'Items'] | /sAsync-0.9.3.tar.gz/sAsync-0.9.3/sasync/items.py | 0.715623 | 0.247692 | items.py | pypi |
# Imports
from twisted.internet import defer, reactor
import sqlalchemy as SA
from asynqueue import DeferredTracker
from sasync.database import transact, AccessBroker
NICENESS_WRITE = 6
class Transactor(AccessBroker):
"""
I do the hands-on work of (potentially) non-blocking database access for
the persistence of array elements within a uniquely-identified group.
My methods return Twisted deferred instances to the results of their
database accesses rather than forcing the client code to block while the
database access is being completed.
"""
def __init__(self, ID, *url, **kw):
"""
Instantiates me for a three-dimensional array of elements within a
particular group uniquely identified by the supplied integer I{ID},
using a database connection to I{url}.
"""
if not isinstance(ID, int):
raise TypeError("Item IDs must be integers")
self.groupID = ID
self.dt = DeferredTracker()
super(Transactor, self).__init__(*url[:1], **kw)
def startup(self):
"""
You can run my transaction methods when the deferred returned from
this method fires, and not before.
"""
return self.table(
'sasync_array',
SA.Column('group_id', SA.Integer),
SA.Column('x', SA.Integer),
SA.Column('y', SA.Integer),
SA.Column('z', SA.Integer),
SA.Column('value', SA.PickleType, nullable=False),
unique_elements=['group_id', 'x', 'y', 'z']
)
@transact
def load(self, x, y, z):
"""
Element load transaction
"""
array = self.sasync_array
if not self.s('load'):
self.s(
[array.c.value],
SA.and_(array.c.group_id == self.groupID,
array.c.x == SA.bindparam('x'),
array.c.y == SA.bindparam('y'),
array.c.z == SA.bindparam('z'))
)
rows = self.s().execute(x=hash(x), y=hash(y), z=hash(z)).first()
return rows['value'] if rows else None
@transact
def update(self, x, y, z, value):
"""
Element overwrite (entry update) transaction
"""
elements = self.sasync_array
u = elements.update(
SA.and_(elements.c.group_id == self.groupID,
elements.c.x == hash(x),
elements.c.y == hash(y),
elements.c.z == hash(z))
)
u.execute(value=value)
@transact
def insert(self, x, y, z, value):
"""
Element add (entry insert) transaction
"""
self.sasync_array.insert().execute(
group_id=self.groupID,
x=hash(x), y=hash(y), z=hash(z), value=value)
@transact
def delete(self, x, y, z):
"""
Element delete transaction
"""
elements = self.sasync_array
self.sasync_array.delete(
SA.and_(elements.c.group_id == self.groupID,
elements.c.x == hash(x),
elements.c.y == hash(y),
elements.c.z == hash(z))
).execute()
@transact
def clear(self):
"""
Transaction to clear all elements (B{Use with care!})
"""
elements = self.sasync_array
self.sasync_array.delete(
elements.c.group_id == self.groupID).execute()
class PersistentArray(object):
"""
I am a three-dimensional array of Python objects, addressable by any
three-way combination of hashable Python objects. You can use me as a
two-dimensional array by simply using some constant, e.g., C{None} when
supplying an address for my third dimension.
"""
search = None
def __init__(self, ID, *url, **kw):
"""
Constructor, with a URL and any engine-specifying keywords
supplied if a particular engine is to be used for this
instance. The following additional keyword is particular to
this constructor:
"""
try:
self.ID = hash(ID)
except:
raise TypeError("Item IDs must be hashable")
self.dt = DeferredTracker()
self.t = Transactor(self.ID, *url[:1], **kw)
for name in ('waitUntilRunning', 'callWhenRunning', 'shutdown'):
setattr(self, name, getattr(self.t, name))
self.dt.put(self.waitUntilRunning())
def write(self, funcName, *args, **kw):
"""
Performs a database write transaction, returning a deferred to its
completion.
"""
func = getattr(self.t, funcName)
kw = {'niceness':kw.get('niceness', NICENESS_WRITE)}
return self.callWhenRunning(func, *args, **kw)
def get(self, x, y, z):
"""
Retrieves an element (x,y,z) from the database.
"""
return self.dt.deferToAll().addCallback(
lambda _: self.t.load(x, y, z))
def set(self, x, y, z, value):
"""
Persists the supplied I{value} of element (x,y,z) to the database,
inserting or updating a row as appropriate.
"""
def loaded(loadedValue):
if loadedValue is None:
return self.write("insert", x, y, z, value)
return self.write("update", x, y, z, value)
d = self.callWhenRunning(self.t.load, x, y, z).addCallback(loaded)
self.dt.put(d)
return d
def delete(self, x, y, z):
"""
Deletes the database row for element (x,y,z).
"""
d = self.write("delete", x, y, z)
self.dt.put(d)
return d
def clear(self):
"""
Deletes the entire group of database rows for B{all} of my
elements. B{Use with care!}
"""
d =self.write("clear", niceness=0)
self.dt.put(d)
return d
__all__ = ['PersistentArray'] | /sAsync-0.9.3.tar.gz/sAsync-0.9.3/sasync/parray.py | 0.710427 | 0.362913 | parray.py | pypi |
import logging
from twisted.internet import defer
import sqlalchemy as SA
from sqlalchemy import pool
import asynqueue
class Factory(object):
"""
I generate C{asynqueue.ThreadQueue} objects, a unique one for each
call to me with a unique url-kw combination.
"""
globalQueue = None
def __init__(self):
self.queues = {}
@staticmethod
def newQueue(url, **kw):
"""
Returns a C{Deferred} that fires with a new C{asynqueue.ThreadQueue}
that has a new SQLAlchemy engine attached as its I{engine}
attribute.
"""
def getEngine():
# Add a NullHandler to avoid "No handlers could be
# found for logger sqlalchemy.pool." messages
logging.getLogger(
"sqlalchemy.pool").addHandler(logging.NullHandler())
# Now create the engine
return SA.create_engine(url, **kw)
def gotEngine(engine):
q.engine = engine
return q
# Iterators are always returned as raw because ResultProxy
# objects are iterators but sAsync is smarter at handling them
# than AsynQueue.
q = asynqueue.ThreadQueue(
raw=True,
verbose=kw.pop('verbose', False),
spew=kw.pop('spew', False),
returnFailure=kw.pop('returnFailure', False))
return q.call(getEngine).addCallback(gotEngine)
@classmethod
def getGlobal(cls):
"""
Returns a deferred reference to the global queue, assuming one has
been defined with L{setGlobal}.
Calling this method, or calling an instance of me without a
url argument, is the only approved way to get a reference to
the global queue.
"""
if cls.globalQueue:
return defer.succeed(cls.globalQueue)
d = defer.Deferred()
if hasattr(cls, 'd') and not cls.d.called:
cls.d.chainDeferred(d)
else:
cls.d = d
return d
@classmethod
def setGlobal(cls, url, **kw):
"""
Sets up a global queue and engine, storing as the default and
returning a deferred reference to it.
Calling this method is the only approved way to set the global
queue.
"""
def gotQueue(q):
del cls.d
cls.globalQueue = q
return q
cls.d = cls.newQueue(url, **kw).addCallback(gotQueue)
return cls.d
def kill(self, q):
"""
Removes the supplied queue object from my local queue cache and
shuts down the queue. Returns a C{Deferred} that fires when
the removal and shutdown are done.
Has no effect on the global queue.
"""
for key, value in self.queues.iteritems():
if value == q:
# Found it. Delete and quit looking.
del self.queues[key]
break
if q == self.globalQueue:
# We can't kill the global queue
return defer.succeed(None)
# Shut 'er down
return q.shutdown()
def __call__(self, *url, **kw):
"""
Returns a C{Deferred} that fires with an C{asynqueue.ThreadQueue}
that has an C{SQLAlchemy} engine attached to it, constructed
with the supplied url and any keywords. The engine can be
referenced via the queue's I{engine} attribute.
If a queue has already been constructed with the same url-kw
parameters, that same one is returned. Otherwise, a new one is
constructed and saved for a repeat call.
If there is no I{url} argument, the global default queue will
be returned. There must be one for that to work, of course.
Separate instances of me can have separate queues for the
exact same url-kw parameters. But all instances share the same
global queue.
"""
def gotQueue(q):
self.queues[key] = q
return q
if url:
url = url[0]
key = hash((url, tuple(kw.items())))
if key in self.queues:
return defer.succeed(self.queues[key])
return self.newQueue(url, **kw).addCallback(gotQueue)
return self.getGlobal() | /sAsync-0.9.3.tar.gz/sAsync-0.9.3/sasync/queue.py | 0.655667 | 0.182062 | queue.py | pypi |
def engine(url, **kw):
"""
Specifies the parameters for creating an SQLAlchemy database
engine that will be used as a default for all instances of
L{AccessBroker} and all persistent objects based thereon.
@see: U{http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html}.
@param url: An RFC-1738 url to a database connection.
@keyword strategy: The Strategy describes the general
configuration used to create this Engine. The two available
values are plain, which is the default, and threadlocal, which
applies a 'thread-local context' to implicit executions
performed by the Engine. This context is further described in
Implicit Connection Contexts.
@type strategy: 'plain'.
@keyword pool: An instance of sqlalchemy.pool.Pool to be used as
the underlying source for connections, overriding the engine's
connect arguments (pooling is described in Connection
Pooling). If C{None}, a default Pool (usually QueuePool, or
SingletonThreadPool in the case of SQLite) will be created using
the engine's connect arguments.
@type pool: C{None}
@keyword pool_size: The number of connections to keep open inside
the connection pool. This is only used with QueuePool.
@type pool_size: 5
@keyword max_overflow: The number of connections to allow in
'overflow,' that is connections that can be opened above and
beyond the initial five. This is only used with QueuePool.
@type max_overflow: 10
@keyword pool_timeout: number of seconds to wait before giving up
on getting a connection from the pool. This is only used with
QueuePool.
@type pool_timeout: 30
@keyword echo: if C{True}, the Engine will log all statements as
well as a repr() of their parameter lists to the engines logger,
which defaults to sys.stdout. The echo attribute of
ComposedSQLEngine can be modified at any time to turn logging on
and off. If set to the string 'debug', result rows will be
printed to the standard output as well.
@type echo: C{False}
@keyword module: used by database implementations which support
multiple DBAPI modules, this is a reference to a DBAPI2 module
to be used instead of the engine's default module. For Postgres,
the default is psycopg2, or psycopg1 if 2 cannot be found. For
Oracle, its cx_Oracle.
@type module: C{None}
@keyword use_ansi: used only by Oracle; when C{False}, the Oracle
driver attempts to support a particular 'quirk' of Oracle
versions 8 and previous, that the LEFT OUTER JOIN SQL syntax is
not supported, and the 'Oracle join' syntax of using
<column1>(+)=<column2> must be used in order to achieve a LEFT
OUTER JOIN.
@type use_ansi: C{True}
@keyword threaded: used by cx_Oracle; sets the threaded parameter
of the connection indicating thread-safe usage. cx_Oracle docs
indicate setting this flag to C{False} will speed performance by
10-15%. While this defaults to C{False} in cx_Oracle, SQLAlchemy
defaults it to C{True}, preferring stability over early
optimization.
@type threaded: C{True}
@keyword use_oids: used only by Postgres, will enable the column
name 'oid' as the object ID column, which is also used for the
default sort order of tables. Postgres as of 8.1 has object IDs
disabled by default.
@type use_oids: C{False}
@keyword convert_unicode: if set to C{True}, all String/character
based types will convert Unicode values to raw byte values going
into the database, and all raw byte values to Python Unicode
coming out in result sets. This is an engine-wide method to
provide unicode across the board. For unicode conversion on a
column-by-column level, use the Unicode column type instead.
@type convert_unicode: C{False}
@keyword encoding: the encoding to use for all Unicode
translations, both by engine-wide unicode conversion as well as
the Unicode type object.
@type encoding: 'utf-8'
"""
from queue import Factory
Factory.setGlobal(url, **kw) | /sAsync-0.9.3.tar.gz/sAsync-0.9.3/sasync/__init__.py | 0.816406 | 0.531635 | __init__.py | pypi |
from twisted.internet import defer
import sqlalchemy as SA
class SelectAndResultHolder(object):
"""
I am yielded by L{database.AccessBroker.selectorator} to let you
work on (1) a C{select} of the provided columns or (2) an object
produced by a callable and any args for it, then call me for its
result.
Provide my constructor with a reference to the
L{database.AccessBroker} and the args, plus any keywords you want
added to the call.
Everything is cleaned up via my L{close} method after the "loop"
ends.
"""
def __init__(self, broker, *args, **kw):
self.broker = broker
if callable(args[0]):
self._sObject = args[0](*args[1:])
else:
self._sObject = SA.select(args)
self.kw = kw
def _wrapper(self, *args, **kw):
"""
Replaces the C{select} object with the result of a method of it that
you obtained as an attribute of me. Henceforth my attributes
shall be those of the replacement object.
"""
self._sObject = getattr(self._sObject, self._methodName)(*args, **kw)
def __getattr__(self, name):
"""
Access an attribute of my C{select} object (or a replacement obtained
via a method call) as if it were my own. If the attribute is
callable, wrap it in my magic object-replacement wrapper
method.
"""
obj = getattr(self._sObject, name)
if callable(obj):
self._methodName = name
return self._wrapper
return obj
def __call__(self, *args, **kw):
"""
Executes the C{select} object, with any supplied args and keywords.
If you call this from within a transaction already, the
nesting will be dealt with appropriately and you will get an
immediate C{ResultProxy}. Otherwise, you'll get a deferred that
fires with the result, with row iteration coolness.
As with any transaction, you can disable such behavior and get
either the raw C{ResultProxy} (with I{raw}) or a list of rows
(with I{asList}). Those transaction keywords can get supplied
to my constructor or to this call, if it doesn't itself occur
from inside a transaction.
"""
kw.update(self.kw)
self.result = self.broker.execute(self._sObject, *args, **kw)
return self.result
def close(self):
"""
Closes the C{ResultProxy} if possible.
"""
def closer(rp):
rp.close()
return rp
result = getattr(self, 'result', None)
if isinstance(result, defer.Deferred):
result.addCallback(closer)
elif callable(getattr(result, 'close', None)):
result.close() | /sAsync-0.9.3.tar.gz/sAsync-0.9.3/sasync/selex.py | 0.619356 | 0.250655 | selex.py | pypi |
import math
import json
import os
from sPyBlocks.constant_spike_source import ConstantSpikeSource
from sPyBlocks.neural_decoder import NeuralDecoder
from sPyBlocks.neural_encoder import NeuralEncoder
"""
Memory with forgetting (DG-CA3-CA1 one-hot memory)
+ Population:
+ Input: memory input
+ DG: one-hot codification of cue of the memory
+ CA3cue: store direction/cue of memories
+ CA3cont: store content of memories
+ CA1: recode the cue of the memory to make it binary again in the output
+ Output: output of the network
+ Synapses:
+ Input-DG: 1 to 1 excitatory and static (first n bits: corresponding to the cue of memories)
+ Input-CA3mem: 1 to 1 excitatory and static (the rest of the bits)
+ DG-CA3cue: 1 to 1 excitatory and static
+ CA3cue-CA3mem: all to all excitatory and dinamic (STDP).
+ CA3cue-CA1: 1 to 1 excitatory and static
+ CA1-Output: 1 to 1 excitatory and static
+ CA3mem-Output: 1 to 1 excitatory and static
More information in paper:
https://arxiv.org/abs/2205.04782
"""
class Memory:
"""Spike-based bio-inspired hippocampal memory model with forgetting
:param cueSize: number of cues of the memory
:type cueSize: int
:param contSize: size of the content of the memory in bits/neuron
:type contSize: int
:param sim: object in charge of handling the simulation
:type sim: simulation object (spynnaker8 for spynnaker)
:param ILayer: input population to the memory model
:type ILayer: population
:param OLayer: output population of the memory model
:type OLayer: population
:param configFilePath: path + filename to the config file of internal model parameters
:type configFilePath: int, optional
:param initCA3W: list of initial weight to use in CA3 synapse (initial memory content); format of each element of the list: (source_neuron_id, destination_neuron_id, initial_weight, delay)
:type initCA3W: list, optional
:ivar cueSize: number of cues of the memory, initial value: cueSize
:vartype cueSize: int
:ivar contSize: size of the content of the memory in bits/neuron, initial value: contSize
:vartype contSize: int
:ivar sim: object in charge of handling the simulation, initial value: sim
:vartype sim: simulation object (spynnaker8 for spynnaker)
:ivar ILayer: input population to the memory model, initial value: ILayer
:vartype ILayer: population
:ivar CA3cueLayer: CA3cue population
:vartype CA3cueLayer: population
:ivar CA3contLayer: CA3cont population
:vartype CA3contLayer: population
:ivar DGLayer: DG population
:vartype DGLayer: population
:ivar CA1Layer: CA1 population
:vartype CA1Layer: population
:ivar OLayer: output population of the memory model, initial value: OLayer
:vartype OLayer: population
:ivar configFilePath: path + filename to the config file of internal model parameters, initial value: configFilePath or internal path to default config file
:vartype configFilePath: str
:ivar initCA3W: list of initial weight to use in CA3 synapse (initial memory content); format of each element of the list: (source_neuron_id, destination_neuron_id, initial_weight, delay), initial value: None or input class parameter
:vartype initCA3W: list
:ivar popNeurons: dict that contains the number of neuron of each population, at the input interface level - {"ILayer": ilInputSize, "DGLayer": dgInputSize, "CA3cueLayer": self.cueSize, "CA3contLayer": self.contSize, "CA1Layer": self.cueSize, "OLayer": ilInputSize}
:vartype popNeurons: dict
:ivar neuronParameters: all neuron parameters of each population (for more information see `Custom config files`_)
:vartype neuronParameters: dict
:ivar initNeuronParameters: init membrane potential of each population (for more information see `Custom config files`_)
:vartype initNeuronParameters: dict
:ivar synParameters: all synapses parameters of each synapse group (for more information see `Custom config files`_)
:vartype synParameters: dict
:ivar IL_CA3contL_conn: IL-CA3cont synapses
:vartype IL_CA3contL_conn: synapse
:ivar CA3cueL_CA3contL_conn: CA3cue-CA3cont synapses (STDP)
:vartype CA3cueL_CA3contL_conn: synapse
:ivar CA3contL_OL_conn: CA3cont-OL synapses
:vartype CA3contL_OL_conn: synapse
"""
def __init__(self, cueSize, contSize, sim, ILayer, OLayer, initCA3W=None, configFilePath=None):
"""Constructor method
"""
# Storing parameters
self.cueSize = cueSize
self.contSize = contSize
self.sim = sim
self.ILayer = ILayer
self.OLayer = OLayer
if configFilePath == None:
self.configFilePath = os.path.dirname(__file__) + "/config/hippocampus_with_forgetting_network_config.json"
else:
self.configFilePath = os.getcwd() + "/" + configFilePath
self.initCA3W = initCA3W
# Open configurations files to get the parameters
self.open_config_files()
# Create the network
self.create_population()
self.create_synapses()
def read_json(self):
"""Open json file
:raises: :class:`NameError`: path to config file not found
:returns: the json data as a dict
:rtype: dict
"""
try:
file = open(self.configFilePath)
return json.load(file)
except FileNotFoundError:
raise NameError(str(self.configFilePath) + " - path to config file not found")
def open_config_files(self):
"""Open configuration json file with all the internal parameters needed by the network and assign parameters to variables
:returns:
"""
# + Calculated memory parameters
# Input size of DG population (decoder)
dgInputSize = math.ceil(math.log2(self.cueSize+1))
# Size of IN population
ilInputSize = dgInputSize + self.contSize
# Number of neurons for each population
self.popNeurons = {"ILayer": ilInputSize, "DGLayer": dgInputSize, "CA3cueLayer": self.cueSize, "CA3contLayer": self.contSize,
"CA1Layer": self.cueSize, "OLayer": ilInputSize}
# + Network components parameters
network_config = self.read_json()
# Neurons paramaters
self.neuronParameters = network_config["neuronParameters"]
# Initial neuron parameters
self.initNeuronParameters = network_config["initNeuronParameters"]
# Synapses parameters
self.synParameters = network_config["synParameters"]
def create_population(self):
"""Create all populations of the memory model
:returns:
"""
# CA3cue
self.CA3cueLayer = self.sim.Population(self.popNeurons["CA3cueLayer"], self.sim.IF_curr_exp(**self.neuronParameters["CA3cueL"]),
label="CA3cueLayer")
self.CA3cueLayer.set(v=self.initNeuronParameters["CA3cueL"]["vInit"])
# CA3cont
self.CA3contLayer = self.sim.Population(self.popNeurons["CA3contLayer"], self.sim.IF_curr_exp(**self.neuronParameters["CA3contL"]),
label="CA3contLayer")
self.CA3contLayer.set(v=self.initNeuronParameters["CA3contL"]["vInit"])
# DG (decoder)
self.DGLayer = NeuralDecoder(self.popNeurons["DGLayer"], self.sim, {"min_delay": self.synParameters["IL-DGL"]["delay"]},
self.neuronParameters["DGL"], self.sim.StaticSynapse(weight=self.synParameters["IL-DGL"]["initWeight"],
delay=self.synParameters["IL-DGL"]["delay"]))
# Necessary for the Decoder
self.constant_spike_source = ConstantSpikeSource(self.sim, {"min_delay": self.synParameters["IL-DGL"]["delay"]},
self.neuronParameters["DGL"],
self.sim.StaticSynapse(weight=self.synParameters["IL-DGL"]["initWeight"],
delay=self.synParameters["IL-DGL"]["delay"]))
# CA1 (encoder)
self.CA1Layer = NeuralEncoder(2 ** self.popNeurons["DGLayer"], self.sim, {"min_delay": self.synParameters["CA3cueL-CA1L"]["delay"]},
self.neuronParameters["CA1L"],
self.sim.StaticSynapse(weight=self.synParameters["CA3cueL-CA1L"]["initWeight"],
delay=self.synParameters["CA3cueL-CA1L"]["delay"]))
def create_synapses(self):
"""Create all synapses of the memory model
:returns:
"""
# IL-DG -> 1 to 1, excitatory and static (first dgInputSize bits/neurons)
self.DGLayer.connect_inputs(self.sim.PopulationView(self.ILayer, range(self.popNeurons["DGLayer"])),
ini_pop_indexes=[[i] for i in range(self.popNeurons["DGLayer"])])
# DG-CA3cueL -> 1 to 1, excitatory and static
self.DGLayer.connect_outputs(self.CA3cueLayer, end_pop_indexes=[[i] for i in range(self.cueSize)],
and_indexes=range(1, self.cueSize + 1),
conn=self.sim.StaticSynapse(weight=self.synParameters["DGL-CA3cueL"]["initWeight"],
delay=self.synParameters["DGL-CA3cueL"]["delay"]))
self.DGLayer.connect_constant_spikes([self.constant_spike_source.set_source, self.constant_spike_source.latch.output_neuron])
# IL-CA3cont -> 1 to 1, excitatory and static (last m neurons of DG: only the number of cues to use)
self.IL_CA3contL_conn = self.sim.Projection(self.sim.PopulationView(self.ILayer, range(self.popNeurons["DGLayer"], self.popNeurons["ILayer"], 1)),
self.CA3contLayer,
self.sim.OneToOneConnector(),
synapse_type=self.sim.StaticSynapse(
weight=self.synParameters["IL-CA3contL"]["initWeight"],
delay=self.synParameters["IL-CA3contL"]["delay"]),
receptor_type=self.synParameters["IL-CA3contL"]["receptor_type"])
# CA3cue-CA3cont -> all to all STDP
# + Time rule
timing_rule = self.sim.SpikePairRule(tau_plus=self.synParameters["CA3cueL-CA3contL"]["tau_plus"],
tau_minus=self.synParameters["CA3cueL-CA3contL"]["tau_minus"],
A_plus=self.synParameters["CA3cueL-CA3contL"]["A_plus"],
A_minus=self.synParameters["CA3cueL-CA3contL"]["A_minus"])
# + Weight rule
weight_rule = self.sim.AdditiveWeightDependence(w_max=self.synParameters["CA3cueL-CA3contL"]["w_max"],
w_min=self.synParameters["CA3cueL-CA3contL"]["w_min"])
# + STDP model
stdp_model = self.sim.STDPMechanism(timing_dependence=timing_rule, weight_dependence=weight_rule,
weight=self.synParameters["CA3cueL-CA3contL"]["initWeight"],
delay=self.synParameters["CA3cueL-CA3contL"]["delay"])
# + Create the STDP synapses
if self.initCA3W == None:
self.CA3cueL_CA3contL_conn = self.sim.Projection(self.CA3cueLayer, self.CA3contLayer,
self.sim.AllToAllConnector(allow_self_connections=True),
synapse_type=stdp_model)
else:
self.CA3cueL_CA3contL_conn = self.sim.Projection(self.CA3cueLayer, self.CA3contLayer,
self.sim.FromListConnector(self.initCA3W),
synapse_type=stdp_model)
# CA3cue-CA1 -> 1 to 1 excitatory and static
pop_len = len(self.CA3cueLayer)
input_indexes = range(pop_len)
channel_indexes = range(1, self.CA3cueLayer.size + 1)
if len(input_indexes) != len(channel_indexes):
raise ValueError("There is not the same number of elements in input_indexes and channel_indexes")
for i in range(pop_len):
i_bin = format(channel_indexes[i], "0" + str(self.CA1Layer.n_outputs) + 'b')
i_bin_splitted = [j for j in reversed(i_bin)]
connections = [k for k in range(0, len(i_bin_splitted)) if i_bin_splitted[k] == '1']
self.CA1Layer.connect_inputs(self.CA3cueLayer, ini_pop_indexes=[input_indexes[i]], or_indexes=connections)
# CA1-Output -> 1 to 1 excitatory and static
self.CA1Layer.connect_outputs(self.sim.PopulationView(self.OLayer, range(self.popNeurons["DGLayer"])),
end_pop_indexes=[[i] for i in range(self.popNeurons["DGLayer"])],
conn=self.sim.StaticSynapse(weight=self.synParameters["CA1L-OL"]["initWeight"],
delay=self.synParameters["CA1L-OL"]["delay"]))
# CA3cont-Output -> 1 to 1 excitatory and static
self.CA3contL_OL_conn = self.sim.Projection(self.CA3contLayer, self.sim.PopulationView(self.OLayer, range(self.popNeurons["DGLayer"], self.popNeurons["OLayer"], 1)),
self.sim.OneToOneConnector(),
synapse_type=self.sim.StaticSynapse(
weight=self.synParameters["CA3contL-OL"]["initWeight"],
delay=self.synParameters["CA3contL-OL"]["delay"]),
receptor_type=self.synParameters["CA3contL-OL"]["receptor_type"]) | /hippocampus_with_forgetting/hippocampus_with_forgetting.py | 0.614047 | 0.582135 | hippocampus_with_forgetting.py | pypi |
from contextlib import contextmanager
try:
# this fails in <=2020 versions of Python on OS X 11.x
import OpenGL.GL # noqa: F401 # pylint: disable=unused-import
except ImportError:
# Hack for macOS Big Sur
from ._bigsurhack import patch_ctypes
patch_ctypes()
import OpenGL.GL as GL
# pylint: disable=invalid-name
blend = GL.GL_BLEND
color_buffer_bit = GL.GL_COLOR_BUFFER_BIT
depth_buffer_bit = GL.GL_DEPTH_BUFFER_BIT
line_smooth = GL.GL_LINE_SMOOTH
lines = GL.GL_LINES
model_view = GL.GL_MODELVIEW
one_minus_src_alpha = GL.GL_ONE_MINUS_SRC_ALPHA
points = GL.GL_POINTS
projection = GL.GL_PROJECTION
smooth = GL.GL_SMOOTH
src_alpha = GL.GL_SRC_ALPHA
depth_test = GL.GL_DEPTH_TEST
rgb = GL.GL_RGB
unsigned_byte = GL.GL_UNSIGNED_BYTE
# pylint: enable=invalid-name
def blend_function(sfactor, dfactor):
""" Set the blending function. """
GL.glBlendFunc(sfactor, dfactor)
def clear(mask):
""" Clear the drawing surface. """
GL.glClear(mask)
def clear_color(red, green, blue, alpha=1.0):
""" Clear the surface to the given colour. """
GL.glClearColor(float(red), float(green), float(blue), float(alpha))
def color(*args):
""" Set the drawing colour. """
GL.glColor(*args)
def disable(*args):
""" Disable the listed features. """
for feature in args:
GL.glDisable(feature)
def enable(*args):
""" Enable the listed features. """
for feature in args:
GL.glEnable(feature)
def line_width(width):
""" Set the line width. """
GL.glLineWidth(float(width))
def load_identity():
""" Load the identity matrix. """
GL.glLoadIdentity()
def matrix_mode(mode):
""" Set the matrix mode. """
GL.glMatrixMode(mode)
def orthographic_projction(*args):
""" Set an orthographic (non-perspective) projection. """
GL.glOrtho(*args)
def point_size(size):
""" Set the size of points. """
GL.glPointSize(float(size))
def raster_position(*args):
""" Set the raster position. """
GL.glRasterPos(*args)
def rotate(angle, x, y, z):
""" Rotate the projection about a point. """
GL.glRotatef(angle, x, y, z)
def scale(x, y, z):
""" Scale the projection about the origin. """
GL.glScale(x, y, z)
def shade_model(mode):
""" Set the shading model. """
GL.glShadeModel(mode)
def translate(x, y, z):
""" Translate the projection. """
GL.glTranslate(x, y, z)
def vertex(*args):
""" Mark a vertex of a drawing path. """
GL.glVertex(*args)
def viewport(x, y, width, height):
""" Set up the view port. """
GL.glViewport(int(x), int(y), int(width), int(height))
def draw_pixels(*args):
GL.glDrawPixels(*args)
@contextmanager
def draw(drawing_style):
""" Draw a line, set of points or closed curve (depending on\
drawing_style). Use as a context manager and specify the vertices of\
the path in the body of the context.
"""
GL.glBegin(drawing_style)
yield
GL.glEnd()
@contextmanager
def save_matrix():
""" Manipulate the view matrix in a temporary context; the view matrix is\
restored once this context is left.
"""
GL.glPushMatrix()
yield
GL.glPopMatrix() | /sPyNNaker_visualisers-1!7.0.0a4-py3-none-any.whl/spynnaker_visualisers/opengl_support.py | 0.811377 | 0.450359 | opengl_support.py | pypi |
from datetime import datetime
import os
import traceback
import OpenGL.error
from spinn_utilities.abstract_base import AbstractBase, abstractmethod
from spynnaker_visualisers.opengl_support import (
viewport, save_matrix, enable, blend, line_smooth, disable, line_width,
blend_function, src_alpha, one_minus_src_alpha, rotate, scale, translate,
raster_position)
try:
# this fails in <=2020 versions of Python on OS X 11.x
import OpenGL.GLUT # noqa: F401
except ImportError:
# Hack for macOS Big Sur
from ._bigsurhack import patch_ctypes
patch_ctypes()
import OpenGL.GLUT as GLUT
keyUp = GLUT.GLUT_KEY_UP
keyDown = GLUT.GLUT_KEY_DOWN
keyLeft = GLUT.GLUT_KEY_LEFT
keyRight = GLUT.GLUT_KEY_RIGHT
displayModeDouble = GLUT.GLUT_DOUBLE
class _PerformanceTimer(object):
__slots__ = [
"_stamp_1", "_stamp_2", "_stopped"]
@staticmethod
def _now():
return datetime.now()
def __init__(self):
self._stopped = True
self._stamp_1 = 0
self._stamp_2 = 0
def start(self):
""" Start the timer. """
self._stopped = False
self._stamp_1 = _PerformanceTimer._now()
def stop(self):
""" Stop the timer. """
self._stamp_2 = _PerformanceTimer._now()
self._stopped = True
@property
def stopped(self):
""" Is the timer stopped? """
return self._stopped
@property
def elapsed_milliseconds(self):
""" How long elapsed in the last timing run? In milliseconds.
..note::
Only valid when the timer has previously been run and is currently\
stopped.
"""
delta = self._stamp_2 - self._stamp_1
return float(delta.seconds) * 1000 + float(delta.microseconds) / 1000
@property
def elapsed_seconds(self):
""" How long elapsed in the last timing run? In seconds.
..note::
Only valid when the timer has previously been run and is currently\
stopped.
"""
delta = self._stamp_2 - self._stamp_1
return float(delta.seconds) + float(delta.microseconds) / 1000000
class GlutFramework(object, metaclass=AbstractBase):
''' Base for code that wants to visualise using an OpenGL surface.
'''
# pylint: disable=broad-except
__slots__ = [
"display_timer",
"elapsed_time_in_seconds",
"frame_rate_timer",
"frame_time",
"frame_time_elapsed",
"_logged_errors",
"window"]
def __init__(self):
self.window = None
self.frame_time_elapsed = 0.0
self.frame_time = 0.0
self.frame_rate_timer = _PerformanceTimer()
self.display_timer = _PerformanceTimer()
self.elapsed_time_in_seconds = 0.0
self._logged_errors = set()
# pylint: disable=unsupported-binary-operation
def start_framework(self, args, title, width, height, posx, posy, fps, *,
display_mode=GLUT.GLUT_RGB | GLUT.GLUT_DOUBLE):
""" start_framework will initialize framework and start the GLUT run\
loop. It must be called after the GlutFramework class is created\
to start the application.
Not expected to return.
"""
# Sets the instance to this, used in the callback wrapper functions
self.frame_time = 1.0 / fps * 1000.0
# Initialize GLUT
GLUT.glutInit(args)
GLUT.glutInitDisplayMode(display_mode)
GLUT.glutInitWindowSize(width, height)
GLUT.glutInitWindowPosition(posx, posy)
self.window = GLUT.glutCreateWindow(title)
try:
GLUT.glutSetOption(GLUT.GLUT_ACTION_ON_WINDOW_CLOSE,
GLUT.GLUT_ACTION_CONTINUE_EXECUTION)
except OpenGL.error.NullFunctionError:
pass
self.init() # Initialize
# Function callbacks with wrapper functions
GLUT.glutDisplayFunc(self.__display_framework)
GLUT.glutReshapeFunc(self.__reshape_framework)
GLUT.glutIdleFunc(self.__run)
GLUT.glutMouseFunc(self.__mouse_button_press)
GLUT.glutMotionFunc(self.__mouse_move)
GLUT.glutKeyboardFunc(self.__keyboard_down)
GLUT.glutKeyboardUpFunc(self.__keyboard_up)
GLUT.glutSpecialFunc(self.__special_keyboard_down)
GLUT.glutSpecialUpFunc(self.__special_keyboard_up)
try:
GLUT.glutCloseFunc(self._terminate)
except OpenGL.error.NullFunctionError:
GLUT.glutWMCloseFunc(self._terminate)
GLUT.glutMainLoop()
def init(self):
""" Initialises GLUT and registers any extra callback functions.
"""
@abstractmethod
def display(self, dTime):
""" The display function is called at a specified frames-per-second\
(FPS). Any animation drawing code can be run in the display method.
:param dTime: the change in time (seconds)
"""
def reshape(self, width, height):
""" Called when the window dimensions change.
:param width: the width of the window in pixels
:param height: the height of the window in pixels
"""
viewport(0, 0, width, height)
def mouse_button_press(self, button, state, x, y):
""" Called when the mouse buttons are pressed.
:param button: the mouse buttons
:param state: the state of the buttons
:param x: the x coordinate
:param y: the y coordinate
"""
def mouse_move(self, x, y):
""" Called when the mouse moves on the screen.
:param x: the x coordinate
:param y: the y coordinate
"""
def keyboard_down(self, key, x, y):
""" The keyboard function is called when a standard key is pressed\
down.
:param key: the key press
:param x: the x coordinate of the mouse
:param y: the y coordinate of the mouse
"""
def keyboard_up(self, key, x, y):
""" The keyboard function is called when a standard key is "unpressed".
:param key: the key press
:param x: the x coordinate of the mouse
:param y: the y coordinate of the mouse
"""
def special_keyboard_down(self, key, x, y):
""" The keyboard function is called when a special key is pressed down\
(F1 keys, Home, Inser, Delete, Page Up/Down, End, arrow keys).\
https://www.opengl.org/resources/libraries/glut/spec3/node54.html
:param key: the key press
:param x: the x coordinate of the mouse
:param y: the y coordinate of the mouse
"""
def special_keyboard_up(self, key, x, y):
""" The keyboard function is called when a special key is "unpressed"\
(F1 keys, Home, Inser, Delete, Page Up/Down, End, arrow keys).
:param key: the key press
:param x: the x coordinate of the mouse
:param y: the y coordinate of the mouse
"""
def run(self):
""" The run method is called by GLUT and contains the logic to set the\
frame rate of the application.
"""
if self.frame_rate_timer.stopped:
self.frame_rate_timer.start()
# stop the timer and calculate time since last frame
self.frame_rate_timer.stop()
milliseconds = self.frame_rate_timer.elapsed_milliseconds
self.frame_time_elapsed += milliseconds
if self.frame_time_elapsed >= self.frame_time:
# If the time exceeds a certain "frame rate" then show the next
# frame
GLUT.glutPostRedisplay()
# remove a "frame" and start counting up again
self.frame_time_elapsed -= self.frame_time
self.frame_rate_timer.start()
def display_framework(self):
""" The display_framework() function sets up initial GLUT state and\
calculates the change in time between each frame. It calls the\
display(float) function, which can be subclassed.
"""
if self.display_timer.stopped:
self.display_timer.start()
self.display_timer.stop()
elapsedTimeInSeconds = self.display_timer.elapsed_seconds
if GLUT.glutGetWindow() == self.window:
self.display(elapsedTimeInSeconds)
GLUT.glutSwapBuffers()
self.display_timer.start()
def reshape_framework(self, width, height):
""" Handle resizing of the window.
"""
if GLUT.glutGetWindow() == self.window:
self.reshape(width, height)
@staticmethod
def write_large(x, y, string, *args):
""" Utility function: write a string to a given location as a bitmap.
"""
# pylint: disable=no-member
if args:
string = string % args
raster_position(x, y)
for ch in string:
GLUT.glutBitmapCharacter(GLUT.GLUT_BITMAP_TIMES_ROMAN_24, ord(ch))
@staticmethod
def write_small(x, y, size, rotation, string, *args):
""" Utility function: write a string to a given location as a strokes.
"""
# pylint: disable=no-member
if args:
string = string % args
with save_matrix():
# antialias the font
enable(blend, line_smooth)
blend_function(src_alpha, one_minus_src_alpha)
line_width(1.5)
translate(x, y, 0.0)
scale(size, size, size)
rotate(rotation, 0.0, 0.0, 1.0)
for ch in string:
GLUT.glutStrokeCharacter(GLUT.GLUT_STROKE_ROMAN, ord(ch))
disable(blend, line_smooth)
@staticmethod
def _terminate(exit_code=0):
"""
Because sys.exit() doesn't always work in the ctype-handled callbacks.
"""
os._exit(exit_code) # pylint: disable=protected-access
def __display_framework(self):
if not GLUT.glutGetWindow():
return
try:
return self.display_framework()
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __reshape_framework(self, width, height):
if not GLUT.glutGetWindow():
return
try:
return self.reshape_framework(width, height)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __run(self):
if not GLUT.glutGetWindow():
return
try:
return self.run()
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __mouse_button_press(self, button, state, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.mouse_button_press(button, state, x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __mouse_move(self, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.mouse_move(x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __keyboard_down(self, key, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.keyboard_down(key.decode(), x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __keyboard_up(self, key, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.keyboard_up(key.decode(), x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __special_keyboard_down(self, key, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.special_keyboard_down(key, x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __special_keyboard_up(self, key, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.special_keyboard_up(key, x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __log_error(self):
tb = traceback.format_exc()
if tb not in self._logged_errors:
self._logged_errors.add(tb)
traceback.print_exc() | /sPyNNaker_visualisers-1!7.0.0a4-py3-none-any.whl/spynnaker_visualisers/glut_framework.py | 0.812607 | 0.273791 | glut_framework.py | pypi |
# encoding: utf-8
""" A live plotter for the sPyNNaker Sudoku network.
"""
from argparse import ArgumentParser, REMAINDER
import sys
from threading import Condition, RLock
from spinn_utilities.overrides import overrides
from spinn_front_end_common.utilities.connections import LiveEventConnection
from spynnaker_visualisers.glut_framework import GlutFramework
from spynnaker_visualisers.opengl_support import (
vertex, draw, lines, color, point_size, points, line_width, clear_color,
clear, color_buffer_bit, load_identity, viewport, matrix_mode, projection,
model_view, orthographic_projction, shade_model, smooth)
__all__ = []
__version__ = 1
__date__ = '2017-07-25'
WINDOW_BORDER = 110
INIT_WINDOW_WIDTH = 800
INIT_WINDOW_HEIGHT = 600
INIT_WINDOW_X = 100
INIT_WINDOW_Y = 100
FRAMES_PER_SECOND = 10
class SudokuPlot(GlutFramework):
""" A live plotter for the sPyNNaker Sudoku network.
"""
__slots__ = [
"args",
"cell_id",
"cell_labels",
"cell_size_map",
"database_read",
"label_to_cell_map",
"latest_time",
"ms_per_bin",
"n_neurons",
"n_populations_to_read",
"neurons_per_number",
"plot_time_ms",
"point_mutex",
"points_to_draw",
"simulation_started",
"start_condition",
"timestep_ms",
"user_pressed_start",
"window_height",
"window_width"]
def __init__(self, args, neurons_per_number, ms_per_bin, wait_for_start):
"""
:param args:
Arguments (relating to the display) to pass through to GLUT
:param neurons_per_number:
How many neurons are used per number in the Sudoku cells
:param ms_per_bin:
How long does a sampling period last
:param wait_for_start:
Whether the system should wait for the SpiNNaker simulation to\
boot (probably yes!)
"""
super(SudokuPlot, self).__init__()
self.window_width = INIT_WINDOW_WIDTH
self.window_height = INIT_WINDOW_HEIGHT
self.cell_id = 0
self.user_pressed_start = not wait_for_start
self.simulation_started = False
self.database_read = False
self.n_neurons = 0
self.timestep_ms = 0
self.plot_time_ms = 0
self.ms_per_bin = float(ms_per_bin)
self.latest_time = 0.0
self.neurons_per_number = neurons_per_number
self.n_populations_to_read = 1
self.args = args
self.points_to_draw = [[] for _ in range(81)]
self.point_mutex = RLock()
self.label_to_cell_map = dict()
self.cell_size_map = dict()
self.cell_labels = dict()
self.start_condition = Condition()
@overrides(GlutFramework.init)
def init(self):
clear_color(0.0, 0.0, 0.0, 1.0)
color(1.0, 1.0, 1.0)
shade_model(smooth)
def connect_callbacks(self, connection, label):
""" Arrange so that labels on the given connection report their\
goings-on to this class.
:type connection: LiveEventConnection
:type label: str
"""
connection.add_init_callback(label, self._init_cb)
connection.add_receive_callback(label, self._receive_cb)
connection.add_start_resume_callback(label, self._start_cb)
def _init_cb(self, label, n_neurons, run_time_ms, machine_time_step_ms):
self.plot_time_ms = float(run_time_ms)
self.timestep_ms = float(machine_time_step_ms)
self.cell_labels[self.cell_id] = label
self.cell_size_map[self.cell_id] = n_neurons
self.label_to_cell_map[label] = self.cell_id
self.n_neurons += n_neurons
self.cell_id += 1
with self.start_condition:
self.n_populations_to_read -= 1
if self.n_populations_to_read <= 0:
self.database_read = True
while not self.user_pressed_start:
self.start_condition.wait()
def _start_cb(self, *args): # @UnusedVariable
with self.start_condition:
self.simulation_started = True
def _receive_cb(self, label, time, spikes=None): # @UnusedVariable
if spikes is None:
spikes = []
with self.point_mutex:
for spike in spikes:
cell_id, neuron_id = divmod(
spike, self.neurons_per_number * 9)
self.points_to_draw[cell_id].append((time, neuron_id))
time_ms = time * self.timestep_ms
if time_ms > self.latest_time:
self.latest_time = time_ms
def main_loop(self):
""" Run the GUI.
"""
self.start_framework(
self.args, "Sudoku", self.window_width, self.window_height,
INIT_WINDOW_X, INIT_WINDOW_Y, FRAMES_PER_SECOND)
@overrides(GlutFramework.display)
def display(self, dTime): # @UnusedVariable
self._start_display()
cell_width = (self.window_width - 2 * WINDOW_BORDER) / 9.0
cell_height = (self.window_height - 2 * WINDOW_BORDER) / 9.0
end = self.latest_time
start = end - self.ms_per_bin
if start < 0.0:
start = 0.0
end = start + self.ms_per_bin
with self.start_condition:
if not self.database_read:
prompt = "Waiting for simulation to load..."
elif not self.user_pressed_start:
prompt = "Press space bar to start..."
elif not self.simulation_started:
prompt = "Waiting for simulation to start..."
else:
prompt = "Sudoku"
self._print_text(prompt)
self._draw_cells(cell_width, cell_height)
if self.timestep_ms != 0:
x_spacing = cell_width / ((end - start) / self.timestep_ms)
start_tick = int(start / self.timestep_ms)
with self.point_mutex:
values, probs = self._find_cell_values(start_tick)
valid = self._find_cell_correctness(values)
self._draw_cell_contents(values, valid, probs, start_tick,
x_spacing, cell_width, cell_height)
@overrides(GlutFramework.reshape)
def reshape(self, width, height):
self.window_width = width
self.window_height = height
# Viewport dimensions
viewport(0, 0, width, height)
matrix_mode(projection)
load_identity()
# An orthographic projection. Should probably look into OpenGL
# perspective projections for 3D if that's your thing
orthographic_projction(0.0, width, 0.0, height, -50.0, 50.0)
matrix_mode(model_view)
load_identity()
@overrides(GlutFramework.keyboard_down)
def keyboard_down(self, key, x, y): # @UnusedVariable
if key == 32 or key == ' ':
with self.start_condition:
if not self.user_pressed_start:
print("Starting the simulation")
self.user_pressed_start = True
self.start_condition.notify_all()
def _find_cell_values(self, start_tick):
cell_value = [0] * 81
cell_prob = [0.0] * 81
for cell in range(81):
# Strip off items that are no longer needed
queue = self.points_to_draw[cell]
while queue and queue[0][0] < start_tick:
queue.pop(0)
# Count the spikes per number
count, total = self._count_spikes_per_number(queue)
# Work out the probability of a given number in a given cell
max_prob_number = 0
max_prob = 0.0
for i in range(9):
if count[i] > 0:
prob = count[i] / total
if prob > max_prob:
max_prob = prob
max_prob_number = i + 1
cell_value[cell] = max_prob_number
cell_prob[cell] = max_prob
return cell_value, cell_prob
def _count_spikes_per_number(self, queue):
count = [0] * 9
total = 0
for (_, n_id) in queue:
number = n_id // self.neurons_per_number
if number < 9:
count[number] += 1
total += 1
else:
sys.stderr.write(f"Neuron id {n_id} out of range\n")
return count, float(total)
def _find_cell_correctness(self, values):
# Work out the correctness of each cell
cell_valid = [True] * 81
for cell in range(81):
y, x = divmod(cell, 9)
for row in range(9):
if row != y:
self._check_cell(values, cell_valid, x, y, row, x)
for col in range(9):
if col != x:
self._check_cell(values, cell_valid, x, y, y, col)
for row in range(3 * (y // 3), 3 * (y // 3 + 1)):
for col in range(3 * (x // 3), 3 * (x // 3 + 1)):
if x != col and y != row:
self._check_cell(values, cell_valid, x, y, row, col)
return cell_valid
@staticmethod
def _start_display():
point_size(1.0)
clear(color_buffer_bit)
clear_color(1.0, 1.0, 1.0, 1.0)
color(0.0, 0.0, 0.0, 1.0)
# TODO positioning
# https://github.com/SpiNNakerManchester/sPyNNakerVisualisers/issues/23
def _print_text(self, prompt):
# Guesstimate of length of prompt in pixels
plen = len(prompt) * 4
self.write_large(
self.window_width / 2 - plen, self.window_height - 50, prompt)
def _draw_cells(self, width, height):
color(0.0, 0.0, 0.0, 1.0)
for i in range(10):
line_width(3.0 if i % 3 == 0 else 1.0)
pos = WINDOW_BORDER + i * height
self._line(self.window_width - WINDOW_BORDER, pos,
WINDOW_BORDER, pos)
pos = WINDOW_BORDER + i * width
self._line(pos, self.window_height - WINDOW_BORDER,
pos, WINDOW_BORDER)
def _draw_cell_contents(self, value, valid, prob, start, x_spacing,
cell_width, cell_height):
# Print the spikes
for cell in range(81):
cell_y, cell_x = divmod(cell, 9)
x_start = WINDOW_BORDER + (cell_x * cell_width) + 1
y_start = WINDOW_BORDER + (cell_y * cell_height) + 1
y_spacing = cell_height / (self.neurons_per_number * 9.0)
# Work out how probable the number is and use this for colouring
cell_sat = 1 - prob[cell]
point_size(2.0)
with draw(points):
if valid[cell]:
color(cell_sat, 1.0, cell_sat, 1.0)
else:
color(1.0, cell_sat, cell_sat, 1.0)
for (time, n_id) in self.points_to_draw[cell]:
x_value = (time - start) * x_spacing + x_start
y_value = n_id * y_spacing + y_start
vertex(x_value, y_value)
# Print the number
if value[cell] != 0:
color(0, 0, 0, 1 - cell_sat)
size = 0.005 * cell_height
self.write_small(
x_start + (cell_width / 2.0) - (size * 50.0),
y_start + (cell_height / 2.0) - (size * 50.0),
size, 0, "%d", value[cell])
@staticmethod
def _line(x1, y1, x2, y2):
with draw(lines):
vertex(x1, y1)
vertex(x2, y2)
@staticmethod
def _check_cell(values, correct, x, y, row, col):
value = values[y * 9 + x]
if value == values[row * 9 + col]:
correct[y * 9 + x] = False
# https://github.com/SpiNNakerManchester/sPyNNakerVisualisers/issues/24
def sudoku_visualiser(args, port=19999, neurons=5, ms=100, database=None):
""" Make a visualiser, connecting a LiveEventConnection that listens to a\
population labelled "Cells" to a GLUT GUI.
"""
# Set up the application
cells = ["Cells"]
connection = LiveEventConnection(
"LiveSpikeReceiver", receive_labels=cells, local_port=port)
plotter = SudokuPlot(args, neurons, ms, database is None)
for label in cells:
plotter.connect_callbacks(connection, label)
if database is not None:
# TODO: This concept not present on Python side!
# connection.set_database(database)
sys.stderr.write("Database setting not currently supported")
plotter.main_loop()
def main(argv=None):
""" The main script.\
Parses command line arguments and launches the visualiser.
"""
program_name = "sudoku_visualiser"
program_version = "v%d" % (__version__)
program_description = "Visualise the SpiNNaker sudoku solver."
program_version_string = '%%prog %s (%s)' % (program_version, __date__)
# setup option parser
parser = ArgumentParser(prog=program_name,
description=program_description)
parser.add_argument(
"-d", "--database", dest="database", metavar="FILE",
help="optional file path to where the database is located, if "
"needed for manual configuration", default=None)
parser.add_argument(
"-m", "--ms_per_bin", dest="ms", metavar="MILLISECONDS",
help="optional number of milliseconds to show at once",
type=float, default=100)
parser.add_argument(
"-n", "--neurons_per_number", dest="neurons",
help="the number of neurons that represent each number in a cell",
metavar="COUNT", type=int, default=5)
parser.add_argument(
"-p", "--hand_shake_port", dest="port", default="19999",
help="optional port which the visualiser will listen to for"
" database hand shaking", metavar="PORT", type=int)
parser.add_argument('--version', action='version',
version=program_version_string)
parser.add_argument("args", nargs=REMAINDER)
# Set up and run the application
try:
if argv is None:
argv = sys.argv[1:]
sudoku_visualiser(**parser.parse_args(argv).__dict__)
return 0
except Exception as e: # pylint: disable=broad-except
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
return 2
if __name__ == "__main__":
sys.exit(main()) | /sPyNNaker_visualisers-1!7.0.0a4-py3-none-any.whl/spynnaker_visualisers/sudoku/sudoku_visualiser.py | 0.516108 | 0.233717 | sudoku_visualiser.py | pypi |
import socket
import struct
import sys
import threading
from numpy import dot, cross, array, zeros, cos, sin, uint8, uint32
from numpy.linalg import norm
import spynnaker_visualisers.opengl_support as gl
import spynnaker_visualisers.glut_framework as glut
class RaytraceDrawer(glut.GlutFramework):
__slots__ = (
"_moving", "_strafing", "_turn_down", "_turn_right", "_rolling",
"_height", "_width", "_win_height", "_win_width",
"_viewing_frame", "_received_frame", "_sockfd_input",
"_look", "_up", "_position")
moveAmount = 0.00003
turnAmount = 0.0000003
# Fields of view
VERT_FOV = 50.0
HORIZ_FOV = 60.0
INPUT_PORT_SPINNAKER = 17894
SDP_HEADER = struct.Struct("<HBBBBHHHHIII")
PIXEL_FORMAT = struct.Struct(">HHBBB")
RECV_BUFFER_SIZE = 1500 # Ethernet MTU; SpiNNaker doesn't jumbo
def __init__(self, size=256):
super().__init__()
self._moving = 0
self._strafing = 0
# Turn left is negative
self._turn_right = 0
# Turn up is negative
self._turn_down = 0
self._rolling = 0
self._position = array([-220.0, 50.0, 0.0])
self._look = array([1.0, 0.0, 0.0])
self._up = array([0.0, 1.0, 0.0])
self._height = size
self._width = int(self.HORIZ_FOV * self._height / self.VERT_FOV)
self._win_height = self._height
self._win_width = self._width
self._viewing_frame = zeros(
self._width * self._height * 3, dtype=uint8)
self._received_frame = zeros(
self._width * self._height, dtype=uint32)
self._sockfd_input = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sockfd_input.bind(('0.0.0.0', self.INPUT_PORT_SPINNAKER))
def start(self, args):
threading.Thread(target=self._input_thread, daemon=True).start()
self.start_framework(
args, "Path Tracer", self._width, self._height, 0, 0, 10,
display_mode=glut.displayModeDouble)
def init(self):
gl.enable(gl.blend, gl.depth_test)
gl.blend_function(gl.src_alpha, gl.one_minus_src_alpha)
def display(self, dTime):
gl.clear_color(1.0, 1.0, 1.0, 0.001)
# pylint: disable=unsupported-binary-operation
gl.clear(gl.color_buffer_bit | gl.depth_buffer_bit)
gl.draw_pixels(
self._win_width, self._win_height, gl.rgb, gl.unsigned_byte,
self._viewing_frame.data)
def reshape(self, width, height):
self._win_width = min(width, self._width)
self._win_height = min(height, self._height)
gl.viewport(0, 0, width, height)
gl.load_identity()
def special_keyboard_down(self, key, x, y): # @UnusedVariable
if key == glut.keyUp:
self._turn_down = -1
elif key == glut.keyDown:
self._turn_down = 1
elif key == glut.keyRight:
self._rolling = -1
elif key == glut.keyLeft:
self._rolling = 1
def special_keyboard_up(self, key, x, y): # @UnusedVariable
if key == glut.keyUp or key == glut.keyDown:
self._turn_down = 0
elif key == glut.keyLeft or key == glut.keyRight:
self._rolling = 0
def keyboard_down(self, key, x, y): # @UnusedVariable
if key == 'w':
self._moving = 1
elif key == 's':
self._moving = -1
elif key == 'a':
self._turn_right = -1
elif key == 'd':
self._turn_right = 1
elif key == 'q':
self._strafing = 1
elif key == 'e':
self._strafing = -1
elif key == '\x1b': # Escape
sys.exit()
def keyboard_up(self, key, x, y): # @UnusedVariable
if key == 'w' or key == 's':
self._moving = 0
elif key == 'a' or key == 'd':
self._turn_right = 0
elif key == 'q' or key == 'e':
self._strafing = 0
@staticmethod
def vector_rotate(rotated, axis, theta):
"""Rotate the first vector around the second"""
# https://gist.github.com/fasiha/6c331b158d4c40509bd180c5e64f7924
par = (dot(rotated, axis) / dot(axis, axis) * axis)
perp = rotated - par
w = cross(axis, perp)
w = w / norm(w)
result = par + perp * cos(theta) + norm(perp) * w * sin(theta)
return result / norm(result)
def calculate_movement(self, dt):
# Forward movement
if self._moving:
self._position += self._look * dt * self.moveAmount * self._moving
right = cross(self._up, self._look)
# Strafing movement
if self._strafing:
self._position += right * dt * self.moveAmount * self._strafing
# To turn left/right, rotate the look vector around the up vector
if self._turn_right:
self._look = self.vector_rotate(
self._look, self._up, dt * self.turnAmount * self._turn_right)
# To turn up/down, rotate the look vector and up vector about the right
# vector
if self._turn_down:
self._look = self.vector_rotate(
self._look, right, dt * self.turnAmount * self._turn_down)
self._up = self.vector_rotate(
self._up, right, dt * self.turnAmount * self._turn_down)
# To roll, rotate the up vector around the look vector
if self._rolling:
self._up = self.vector_rotate(
self._up, self._look, dt * self.turnAmount * self._rolling)
def run(self):
"""Calculate movement ten times a second"""
super().run()
self.calculate_movement(self.frame_time_elapsed * 1000)
def _input_thread(self):
print(
f"Drawer running (listening port: {self.INPUT_PORT_SPINNAKER})...")
while True:
msg = self._sockfd_input.recv(self.RECV_BUFFER_SIZE)
sdp_msg = self.SDP_HEADER.unpack_from(msg)
data = msg[self.SDP_HEADER.size:] # sdp_msg.data
if sdp_msg[7] == 3: # sdp_msg.command
for pixel_datum in self._pixelinfo(
data, sdp_msg[9]): # sdp_msg.arg1
self.process_one_pixel(*pixel_datum)
@classmethod
def _pixelinfo(cls, data, number_of_pixels):
for i in range(number_of_pixels):
yield cls.PIXEL_FORMAT.unpack_from(
data, i * cls.PIXEL_FORMAT.size)
def process_one_pixel(self, x, y, r, g, b):
index = (self._height - y - 1) * self._width + x
if index < self._width * self._height:
ix3 = index * 3
count = self._received_frame[index]
cp1 = count + 1
self._viewing_frame[ix3] = (
(r + count * self._viewing_frame[ix3]) // cp1)
self._viewing_frame[ix3 + 1] = (
(g + count * self._viewing_frame[ix3 + 1]) // cp1)
self._viewing_frame[ix3 + 2] = (
(b + count * self._viewing_frame[ix3 + 2]) // cp1)
self._received_frame[index] += 1
def main(args):
drawer = RaytraceDrawer()
drawer.start(args)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv)) | /sPyNNaker_visualisers-1!7.0.0a4-py3-none-any.whl/spynnaker_visualisers/raytrace/drawer.py | 0.502686 | 0.170404 | drawer.py | pypi |
from pacman.model.constraints.partitioner_constraints.\
partitioner_same_size_as_vertex_constraint \
import PartitionerSameSizeAsVertexConstraint
from pacman.model.partitionable_graph.multi_cast_partitionable_edge \
import MultiCastPartitionableEdge
from spynnaker.pyNN.models.neural_projections.synapse_information \
import SynapseInformation
from spynnaker.pyNN.models.neuron.synapse_dynamics.synapse_dynamics_static \
import SynapseDynamicsStatic
from spynnaker.pyNN.models.neuron.abstract_population_vertex \
import AbstractPopulationVertex
from spynnaker.pyNN.models.utility_models.delay_extension_vertex \
import DelayExtensionVertex
from spynnaker.pyNN.utilities import constants
from spynnaker.pyNN.models.neural_projections.projection_partitionable_edge \
import ProjectionPartitionableEdge
from spynnaker.pyNN.models.neural_projections\
.delay_afferent_partitionable_edge \
import DelayAfferentPartitionableEdge
from spynnaker.pyNN.models.neuron.connection_holder import ConnectionHolder
from spinn_front_end_common.abstract_models.abstract_changable_after_run \
import AbstractChangableAfterRun
from spinn_front_end_common.utilities import exceptions
from spinn_machine.utilities.progress_bar import ProgressBar
import logging
import math
logger = logging.getLogger(__name__)
EDGE_PARTITION_ID = "SPIKE"
# noinspection PyProtectedMember
class Projection(object):
""" A container for all the connections of a given type (same synapse type\
and plasticity mechanisms) between two populations, together with\
methods to set parameters of those connections, including of\
plasticity mechanisms.
"""
# noinspection PyUnusedLocal
def __init__(
self, presynaptic_population, postsynaptic_population, label,
connector, spinnaker_control, machine_time_step, user_max_delay,
timescale_factor, source=None, target='excitatory',
synapse_dynamics=None, rng=None):
self._spinnaker = spinnaker_control
self._projection_edge = None
self._host_based_synapse_list = None
self._has_retrieved_synaptic_list_from_machine = False
if not isinstance(postsynaptic_population._get_vertex,
AbstractPopulationVertex):
raise exceptions.ConfigurationException(
"postsynaptic population is not designed to receive"
" synaptic projections")
synapse_type = postsynaptic_population._get_vertex\
.synapse_type.get_synapse_id_by_target(target)
if synapse_type is None:
raise exceptions.ConfigurationException(
"Synapse target {} not found in {}".format(
target, postsynaptic_population.label))
synapse_dynamics_stdp = None
if synapse_dynamics is None:
synapse_dynamics_stdp = SynapseDynamicsStatic()
else:
synapse_dynamics_stdp = synapse_dynamics.slow
postsynaptic_population._get_vertex.synapse_dynamics = \
synapse_dynamics_stdp
# Set and store information for future processing
self._synapse_information = SynapseInformation(
connector, synapse_dynamics_stdp, synapse_type)
connector.set_projection_information(
presynaptic_population, postsynaptic_population, rng,
machine_time_step)
max_delay = synapse_dynamics_stdp.get_delay_maximum(connector)
if max_delay is None:
max_delay = user_max_delay
# check if all delays requested can fit into the natively supported
# delays in the models
delay_extension_max_supported_delay = (
constants.MAX_DELAY_BLOCKS *
constants.MAX_TIMER_TICS_SUPPORTED_PER_BLOCK)
post_vertex_max_supported_delay_ms = \
postsynaptic_population._get_vertex.maximum_delay_supported_in_ms
if max_delay > (post_vertex_max_supported_delay_ms +
delay_extension_max_supported_delay):
raise exceptions.ConfigurationException(
"The maximum delay {} for projection is not supported".format(
max_delay))
if max_delay > (user_max_delay / (machine_time_step / 1000.0)):
logger.warn("The end user entered a max delay"
" for which the projection breaks")
# check that the projection edges label is not none, and give an
# auto generated label if set to None
if label is None:
label = "projection edge {}".format(
spinnaker_control.none_labelled_edge_count)
spinnaker_control.increment_none_labelled_edge_count()
# Find out if there is an existing edge between the populations
edge_to_merge = self._find_existing_edge(
presynaptic_population._get_vertex,
postsynaptic_population._get_vertex)
if edge_to_merge is not None:
# If there is an existing edge, add the connector
edge_to_merge.add_synapse_information(self._synapse_information)
self._projection_edge = edge_to_merge
else:
# If there isn't an existing edge, create a new one
self._projection_edge = ProjectionPartitionableEdge(
presynaptic_population._get_vertex,
postsynaptic_population._get_vertex,
self._synapse_information, label=label)
# add edge to the graph
spinnaker_control.add_partitionable_edge(
self._projection_edge, EDGE_PARTITION_ID)
# If the delay exceeds the post vertex delay, add a delay extension
if max_delay > post_vertex_max_supported_delay_ms:
delay_edge = self._add_delay_extension(
presynaptic_population, postsynaptic_population, max_delay,
post_vertex_max_supported_delay_ms, machine_time_step,
timescale_factor)
self._projection_edge.delay_edge = delay_edge
spinnaker_control._add_projection(self)
# If there is a virtual board, we need to hold the data in case the
# user asks for it
self._virtual_connection_list = None
if spinnaker_control.use_virtual_board:
self._virtual_connection_list = list()
pre_vertex = presynaptic_population._get_vertex
post_vertex = postsynaptic_population._get_vertex
connection_holder = ConnectionHolder(
None, False, pre_vertex.n_atoms, post_vertex.n_atoms,
self._virtual_connection_list)
post_vertex.add_pre_run_connection_holder(
connection_holder, self._projection_edge,
self._synapse_information)
@property
def requires_mapping(self):
if (isinstance(self._projection_edge, AbstractChangableAfterRun) and
self._projection_edge.requires_mapping):
return True
return False
def mark_no_changes(self):
if isinstance(self._projection_edge, AbstractChangableAfterRun):
self._projection_edge.mark_no_changes()
def _find_existing_edge(self, presynaptic_vertex, postsynaptic_vertex):
""" Searches though the partitionable graph's edges to locate any\
edge which has the same post and pre vertex
:param presynaptic_vertex: the source partitionable vertex of the\
multapse
:type presynaptic_vertex: instance of\
pacman.model.partitionable_graph.abstract_partitionable_vertex
:param postsynaptic_vertex: The destination partitionable vertex of\
the multapse
:type postsynaptic_vertex: instance of\
pacman.model.partitionable_graph.abstract_partitionable_vertex
:return: None or the edge going to these vertices.
"""
graph_edges = self._spinnaker.partitionable_graph.edges
for edge in graph_edges:
if ((edge.pre_vertex == presynaptic_vertex) and
(edge.post_vertex == postsynaptic_vertex)):
return edge
return None
def _add_delay_extension(
self, presynaptic_population, postsynaptic_population,
max_delay_for_projection, max_delay_per_neuron, machine_time_step,
timescale_factor):
""" Instantiate delay extension component
"""
# Create a delay extension vertex to do the extra delays
delay_vertex = presynaptic_population._internal_delay_vertex
pre_vertex = presynaptic_population._get_vertex
if delay_vertex is None:
delay_name = "{}_delayed".format(pre_vertex.label)
delay_vertex = DelayExtensionVertex(
pre_vertex.n_atoms, max_delay_per_neuron, pre_vertex,
machine_time_step, timescale_factor, label=delay_name)
presynaptic_population._internal_delay_vertex = delay_vertex
pre_vertex.add_constraint(
PartitionerSameSizeAsVertexConstraint(delay_vertex))
self._spinnaker.add_partitionable_vertex(delay_vertex)
# Add the edge
delay_afferent_edge = DelayAfferentPartitionableEdge(
pre_vertex, delay_vertex, label="{}_to_DelayExtension".format(
pre_vertex.label))
self._spinnaker.add_partitionable_edge(
delay_afferent_edge, EDGE_PARTITION_ID)
# Ensure that the delay extension knows how many states it will support
n_stages = int(math.ceil(
float(max_delay_for_projection - max_delay_per_neuron) /
float(max_delay_per_neuron)))
if n_stages > delay_vertex.n_delay_stages:
delay_vertex.n_delay_stages = n_stages
# Create the delay edge if there isn't one already
post_vertex = postsynaptic_population._get_vertex
delay_edge = self._find_existing_edge(delay_vertex, post_vertex)
if delay_edge is None:
delay_edge = MultiCastPartitionableEdge(
delay_vertex, post_vertex, label="{}_delayed_to_{}".format(
pre_vertex.label, post_vertex.label))
self._spinnaker.add_partitionable_edge(
delay_edge, EDGE_PARTITION_ID)
return delay_edge
def describe(self, template='projection_default.txt', engine='default'):
""" Return a human-readable description of the projection.
The output may be customised by specifying a different template
together with an associated template engine (see ``pyNN.descriptions``)
If template is None, then a dictionary containing the template context
will be returned.
"""
# TODO
raise NotImplementedError
def __getitem__(self, i):
"""Return the `i`th connection within the Projection."""
# TODO: Need to work out what is being returned
raise NotImplementedError
# noinspection PyPep8Naming
def getSynapseDynamics(self, parameter_name, list_format='list',
gather=True):
""" Get parameters of the dynamic synapses for all connections in this\
Projection.
:param parameter_name:
:param list_format:
:param gather:
"""
# TODO: Need to work out what is to be returned
raise NotImplementedError
def _get_synaptic_data(self, as_list, data_to_get):
post_vertex = self._projection_edge.post_vertex
pre_vertex = self._projection_edge.pre_vertex
# If in virtual board mode, the connection data should be set
if self._virtual_connection_list is not None:
post_vertex = self._projection_edge.post_vertex
pre_vertex = self._projection_edge.pre_vertex
return ConnectionHolder(
data_to_get, as_list, pre_vertex.n_atoms, post_vertex.n_atoms,
self._virtual_connection_list)
connection_holder = ConnectionHolder(
data_to_get, as_list, pre_vertex.n_atoms, post_vertex.n_atoms)
# If we haven't run, add the holder to get connections, and return it
if not self._spinnaker.has_ran:
post_vertex.add_pre_run_connection_holder(
connection_holder, self._projection_edge,
self._synapse_information)
return connection_holder
# Otherwise, get the connections now
graph_mapper = self._spinnaker.graph_mapper
placements = self._spinnaker.placements
transceiver = self._spinnaker.transceiver
routing_infos = self._spinnaker.routing_infos
partitioned_graph = self._spinnaker.partitioned_graph
subedges = graph_mapper.get_partitioned_edges_from_partitionable_edge(
self._projection_edge)
progress = ProgressBar(
len(subedges),
"Getting {}s for projection between {} and {}".format(
data_to_get, pre_vertex.label, post_vertex.label))
for subedge in subedges:
placement = placements.get_placement_of_subvertex(
subedge.post_subvertex)
connections = post_vertex.get_connections_from_machine(
transceiver, placement, subedge, graph_mapper, routing_infos,
self._synapse_information, partitioned_graph)
if connections is not None:
connection_holder.add_connections(connections)
progress.update()
progress.end()
connection_holder.finish()
return connection_holder
# noinspection PyPep8Naming
def getWeights(self, format='list', gather=True): # @ReservedAssignment
"""
Get synaptic weights for all connections in this Projection.
Possible formats are: a list of length equal to the number of
connections in the projection, a 2D weight array (with NaN for
non-existent connections). Note that for the array format, if there is
more than connection between two cells, the summed weight will be
given.
:param format: the type of format to be returned (only support "list")
:param gather: gather the weights from stuff. currently has no meaning\
in spinnaker when set to false. Therefore is always true
"""
if not gather:
exceptions.ConfigurationException(
"the gather param has no meaning for spinnaker when set to "
"false")
return self._get_synaptic_data(format == 'list', "weight")
# noinspection PyPep8Naming
def getDelays(self, format='list', gather=True): # @ReservedAssignment
"""
Get synaptic delays for all connections in this Projection.
Possible formats are: a list of length equal to the number of
connections in the projection, a 2D delay array (with NaN for
non-existent connections).
"""
if not gather:
exceptions.ConfigurationException(
"the gather param has no meaning for spinnaker when set to "
"false")
return self._get_synaptic_data(format == 'list', "delay")
def __len__(self):
""" Return the total number of local connections.
"""
# TODO: Need to work out what this means
raise NotImplementedError
# noinspection PyPep8Naming
def printDelays(self, file_name, list_format='list', gather=True):
""" Print synaptic weights to file. In the array format, zeros are\
printed for non-existent connections.
"""
# TODO:
raise NotImplementedError
# noinspection PyPep8Naming
def printWeights(self, file_name, list_format='list', gather=True):
""" Print synaptic weights to file. In the array format, zeros are\
printed for non-existent connections.
"""
# TODO:
raise NotImplementedError
# noinspection PyPep8Naming
def randomizeWeights(self, rand_distr):
""" Set weights to random values taken from rand_distr.
"""
# TODO: Requires that the synapse list is not created proactively
raise NotImplementedError
# noinspection PyPep8Naming
def randomizeDelays(self, rand_distr):
""" Set delays to random values taken from rand_distr.
"""
# TODO: Requires that the synapse list is not created proactively
raise NotImplementedError
# noinspection PyPep8Naming
def randomizeSynapseDynamics(self, param, rand_distr):
""" Set parameters of the synapse dynamics to values taken from\
rand_distr
"""
# TODO: Look at what this is randomising
raise NotImplementedError
def __repr__(self):
return "projection {}".format(self._projection_edge.label)
# noinspection PyPep8Naming
def saveConnections(self, file_name, gather=True, compatible_output=True):
""" Save connections to file in a format suitable for reading in with\
a FromFileConnector.
"""
# TODO
raise NotImplementedError
def size(self, gather=True):
""" Return the total number of connections.
- only local connections, if gather is False,
- all connections, if gather is True (default)
"""
# TODO
raise NotImplementedError
# noinspection PyPep8Naming
def setDelays(self, d):
""" Set the delays
d can be a single number, in which case all delays are set to this\
value, or a list/1D array of length equal to the number of connections\
in the projection, or a 2D array with the same dimensions as the\
connectivity matrix (as returned by `getDelays(format='array')`).
"""
# TODO: Requires that the synapse list is not created proactively
raise NotImplementedError
# noinspection PyPep8Naming
def setSynapseDynamics(self, param, value):
""" Set parameters of the dynamic synapses for all connections in this\
projection.
"""
# TODO: Need to set this in the edge
raise NotImplementedError
# noinspection PyPep8Naming
def setWeights(self, w):
""" Set the weights
w can be a single number, in which case all weights are set to this\
value, or a list/1D array of length equal to the number of connections\
in the projection, or a 2D array with the same dimensions as the\
connectivity matrix (as returned by `getWeights(format='array')`).\
Weights should be in nA for current-based and uS for conductance-based\
synapses.
"""
# TODO: Requires that the synapse list is not created proactively
raise NotImplementedError
# noinspection PyPep8Naming
def weightHistogram(self, min_weight=None, max_weight=None, nbins=10):
""" Return a histogram of synaptic weights.
If min and max are not given, the minimum and maximum weights are\
calculated automatically.
"""
# TODO
raise NotImplementedError | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/pynn_projection.py | 0.85984 | 0.268905 | pynn_projection.py | pypi |
from pacman.model.partitionable_graph.multi_cast_partitionable_edge\
import MultiCastPartitionableEdge
from pacman.utilities.utility_objs.timer import Timer
from spinn_machine.utilities.progress_bar import ProgressBar
from spynnaker.pyNN.utilities import conf
from spynnaker.pyNN.models.neural_projections.projection_partitioned_edge \
import ProjectionPartitionedEdge
import logging
import copy
logger = logging.getLogger(__name__)
class ProjectionPartitionableEdge(MultiCastPartitionableEdge):
""" An edge which terminates on an AbstractPopulationVertex
"""
def __init__(
self, pre_vertex, post_vertex, synapse_information, label=None):
MultiCastPartitionableEdge.__init__(
self, pre_vertex, post_vertex, label=label)
# A list of all synapse information for all the projections that are
# represented by this edge
self._synapse_information = [synapse_information]
# The edge from the delay extension of the pre_vertex to the
# post_vertex - this might be None if no long delays are present
self._delay_edge = None
self._stored_synaptic_data_from_machine = None
def add_synapse_information(self, synapse_information):
synapse_information.index = len(self._synapse_information)
self._synapse_information.append(synapse_information)
@property
def synapse_information(self):
return self._synapse_information
@property
def delay_edge(self):
return self._delay_edge
@delay_edge.setter
def delay_edge(self, delay_edge):
self._delay_edge = delay_edge
@property
def n_delay_stages(self):
if self._delay_edge is None:
return 0
return self._delay_edge.pre_vertex.n_delay_stages
def create_subedge(
self, pre_subvertex, post_subvertex, label=None):
return ProjectionPartitionedEdge(
self._synapse_information, pre_subvertex, post_subvertex, label)
def get_synaptic_list_from_machine(self, graph_mapper, partitioned_graph,
placements, transceiver, routing_infos):
""" Get synaptic data for all connections in this Projection from the\
machine.
"""
if self._stored_synaptic_data_from_machine is None:
timer = None
if conf.config.getboolean("Reports", "display_algorithm_timings"):
timer = Timer()
timer.start_timing()
subedges = \
graph_mapper.get_partitioned_edges_from_partitionable_edge(
self)
if subedges is None:
subedges = list()
synaptic_list = copy.copy(self._synapse_list)
synaptic_list_rows = synaptic_list.get_rows()
progress_bar = ProgressBar(
len(subedges),
"Reading back synaptic matrix for edge between"
" {} and {}".format(self._pre_vertex.label,
self._post_vertex.label))
for subedge in subedges:
n_rows = subedge.get_n_rows(graph_mapper)
pre_vertex_slice = \
graph_mapper.get_subvertex_slice(subedge.pre_subvertex)
post_vertex_slice = \
graph_mapper.get_subvertex_slice(subedge.post_subvertex)
sub_edge_post_vertex = \
graph_mapper.get_vertex_from_subvertex(
subedge.post_subvertex)
rows = sub_edge_post_vertex.get_synaptic_list_from_machine(
placements, transceiver, subedge.pre_subvertex, n_rows,
subedge.post_subvertex,
self._synapse_row_io, partitioned_graph,
routing_infos, subedge.weight_scales).get_rows()
for i in range(len(rows)):
synaptic_list_rows[
i + pre_vertex_slice.lo_atom].set_slice_values(
rows[i], vertex_slice=post_vertex_slice)
progress_bar.update()
progress_bar.end()
self._stored_synaptic_data_from_machine = synaptic_list
if conf.config.getboolean("Reports", "display_algorithm_timings"):
logger.info("Time to read matrix: {}".format(
timer.take_sample()))
return self._stored_synaptic_data_from_machine
def is_multi_cast_partitionable_edge(self):
return True | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/projection_partitionable_edge.py | 0.777131 | 0.212252 | projection_partitionable_edge.py | pypi |
from pyNN.random import RandomDistribution
from spynnaker.pyNN.utilities import utility_calls
from spinn_front_end_common.interface.provenance\
.abstract_provides_local_provenance_data \
import AbstractProvidesLocalProvenanceData
from spynnaker.pyNN.models.abstract_models.abstract_weight_updatable \
import AbstractWeightUpdatable
from pacman.model.partitioned_graph.multi_cast_partitioned_edge \
import MultiCastPartitionedEdge
from spynnaker.pyNN.models.abstract_models.abstract_filterable_edge \
import AbstractFilterableEdge
class ProjectionPartitionedEdge(
MultiCastPartitionedEdge, AbstractFilterableEdge,
AbstractWeightUpdatable, AbstractProvidesLocalProvenanceData):
def __init__(
self, synapse_information, pre_subvertex, post_subvertex,
label=None):
MultiCastPartitionedEdge.__init__(
self, pre_subvertex, post_subvertex, label=label)
AbstractFilterableEdge.__init__(self)
AbstractWeightUpdatable.__init__(self)
self._synapse_information = synapse_information
def filter_sub_edge(self, graph_mapper):
pre_vertex = graph_mapper.get_vertex_from_subvertex(
self._pre_subvertex)
pre_slice_index = graph_mapper.get_subvertex_index(self._pre_subvertex)
pre_vertex_slice = graph_mapper.get_subvertex_slice(
self._pre_subvertex)
pre_slices = graph_mapper.get_subvertex_slices(pre_vertex)
post_vertex = graph_mapper.get_vertex_from_subvertex(
self._post_subvertex)
post_slice_index = graph_mapper.get_subvertex_index(
self._post_subvertex)
post_vertex_slice = graph_mapper.get_subvertex_slice(
self._post_subvertex)
post_slices = graph_mapper.get_subvertex_slices(post_vertex)
n_connections = 0
for synapse_info in self._synapse_information:
n_connections += synapse_info.connector.\
get_n_connections_to_post_vertex_maximum(
pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice)
if n_connections > 0:
return False
return n_connections == 0
def update_weight(self, graph_mapper):
pre_vertex = graph_mapper.get_vertex_from_subvertex(
self._pre_subvertex)
pre_slice_index = graph_mapper.get_subvertex_index(self._pre_subvertex)
pre_vertex_slice = graph_mapper.get_subvertex_slice(
self._pre_subvertex)
pre_slices = graph_mapper.get_subvertex_slices(pre_vertex)
post_vertex = graph_mapper.get_vertex_from_subvertex(
self._post_subvertex)
post_slice_index = graph_mapper.get_subvertex_index(
self._post_subvertex)
post_vertex_slice = graph_mapper.get_subvertex_slice(
self._post_subvertex)
post_slices = graph_mapper.get_subvertex_slices(post_vertex)
weight = 0
for synapse_info in self._synapse_information:
new_weight = synapse_info.connector.\
get_n_connections_to_post_vertex_maximum(
pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice)
new_weight *= pre_vertex_slice.n_atoms
if hasattr(pre_vertex, "rate"):
rate = pre_vertex.rate
if hasattr(rate, "__getitem__"):
rate = max(rate)
elif isinstance(rate, RandomDistribution):
rate = utility_calls.get_maximum_probable_value(
rate, pre_vertex_slice.n_atoms)
new_weight *= rate
elif hasattr(pre_vertex, "spikes_per_second"):
new_weight *= pre_vertex.spikes_per_second
weight += new_weight
self._weight = weight
def get_local_provenance_data(self):
prov_items = list()
for synapse_info in self._synapse_information:
prov_items.extend(
synapse_info.connector.get_provenance_data())
prov_items.extend(
synapse_info.synapse_dynamics.get_provenance_data(
self._pre_subvertex.label, self._post_subvertex.label))
return prov_items
def __repr__(self):
return "{}:{}".format(self._pre_subvertex, self._post_subvertex) | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/projection_partitioned_edge.py | 0.77675 | 0.258935 | projection_partitioned_edge.py | pypi |
from spynnaker.pyNN.utilities import utility_calls
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
import numpy.random
class MultapseConnector(AbstractConnector):
"""
Create a multapse connector. The size of the source and destination
populations are obtained when the projection is connected. The number of
synapses is specified. when instantiated, the required number of synapses
is created by selecting at random from the source and target populations
with replacement. Uniform selection probability is assumed.
:param num_synapses:
Integer. This is the total number of synapses in the connection.
:param weights:
may either be a float, a !RandomDistribution object, a list/
1D array with at least as many items as connections to be
created. Units nA.
:param delays:
as `weights`. If `None`, all synaptic delays will be set
to the global minimum delay.
"""
def __init__(
self, num_synapses, weights=0.0, delays=1,
safe=True, verbose=False):
"""
Creates a new connector.
"""
AbstractConnector.__init__(self, safe, None, verbose)
self._num_synapses = num_synapses
self._weights = weights
self._delays = delays
self._pre_slices = None
self._post_slices = None
self._synapses_per_subedge = None
self._check_parameters(weights, delays)
def get_delay_maximum(self):
return self._get_delay_maximum(self._delays, self._num_synapses)
def _update_synapses_per_post_vertex(self, pre_slices, post_slices):
if (self._synapses_per_subedge is None or
len(self._pre_slices) != len(pre_slices) or
len(self._post_slices) != len(post_slices)):
n_pre_atoms = sum([pre.n_atoms for pre in pre_slices])
n_post_atoms = sum([post.n_atoms for post in post_slices])
n_connections = n_pre_atoms * n_post_atoms
prob_connect = [
float(pre.n_atoms * post.n_atoms) / float(n_connections)
for pre in pre_slices for post in post_slices]
self._synapses_per_subedge = self._rng.next(
1, distribution="multinomial", parameters=[
self._num_synapses, prob_connect])
self._pre_slices = pre_slices
self._post_slices = post_slices
def _get_n_connections(self, pre_slice_index, post_slice_index):
index = (len(self._post_slices) * pre_slice_index) + post_slice_index
return self._synapses_per_subedge[index]
def _get_connection_slice(self, pre_slice_index, post_slice_index):
index = (len(self._post_slices) * pre_slice_index) + post_slice_index
n_connections = self._synapses_per_subedge[index]
start_connection = 0
if index > 0:
start_connection = numpy.sum(self._synapses_per_subedge[:index])
return slice(start_connection, start_connection + n_connections, 1)
def get_n_connections_from_pre_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
min_delay=None, max_delay=None):
self._update_synapses_per_post_vertex(pre_slices, post_slices)
n_total_connections = self._get_n_connections(
pre_slice_index, post_slice_index)
if n_total_connections == 0:
return 0
prob_per_atom = (
float(n_total_connections) /
float(pre_vertex_slice.n_atoms * post_vertex_slice.n_atoms))
full_connections = 0
while prob_per_atom > 1.0:
full_connections += 1
prob_per_atom -= 1.0
n_connections_per_pre_atom = \
utility_calls.get_probable_maximum_selected(
self._n_pre_neurons * self._n_post_neurons,
post_vertex_slice.n_atoms, prob_per_atom)
n_connections_per_pre_atom += (
full_connections * post_vertex_slice.n_atoms)
if min_delay is None or max_delay is None:
return n_connections_per_pre_atom
return self._get_n_connections_from_pre_vertex_with_delay_maximum(
self._delays, self._n_pre_neurons * self._n_post_neurons,
n_connections_per_pre_atom,
[self._get_connection_slice(pre_slice_index, post_slice_index)],
min_delay, max_delay)
def get_n_connections_to_post_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
self._update_synapses_per_post_vertex(pre_slices, post_slices)
n_total_connections = self._get_n_connections(
pre_slice_index, post_slice_index)
if n_total_connections == 0:
return 0
prob_per_atom = (
float(n_total_connections) /
float(pre_vertex_slice.n_atoms * post_vertex_slice.n_atoms))
full_connections = 0
while prob_per_atom > 1.0:
full_connections += 1
prob_per_atom -= 1.0
return (utility_calls.get_probable_maximum_selected(
self._n_pre_neurons * self._n_post_neurons,
pre_vertex_slice.n_atoms, prob_per_atom) +
(full_connections * pre_vertex_slice.n_atoms))
def get_weight_mean(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
self._update_synapses_per_post_vertex(pre_slices, post_slices)
n_connections = self._get_n_connections(
pre_slice_index, post_slice_index)
if n_connections == 0:
return 0
connection_slice = self._get_connection_slice(
pre_slice_index, post_slice_index)
return self._get_weight_mean(self._weights, [connection_slice])
def get_weight_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
self._update_synapses_per_post_vertex(pre_slices, post_slices)
n_connections = self._get_n_connections(
pre_slice_index, post_slice_index)
if n_connections == 0:
return 0
connection_slice = self._get_connection_slice(
pre_slice_index, post_slice_index)
return self._get_weight_maximum(
self._weights, n_connections, [connection_slice])
def get_weight_variance(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
self._update_synapses_per_post_vertex(pre_slices, post_slices)
connection_slice = self._get_connection_slice(
pre_slice_index, post_slice_index)
return self._get_weight_variance(self._weights, [connection_slice])
def generate_on_machine(self):
return (
not self._generate_lists_on_host(self._weights) and
not self._generate_lists_on_host(self._delays))
def create_synaptic_block(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
synapse_type):
self._update_synapses_per_post_vertex(pre_slices, post_slices)
n_connections = self._get_n_connections(
pre_slice_index, post_slice_index)
if n_connections == 0:
return numpy.zeros(
0, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
connection_slice = self._get_connection_slice(
pre_slice_index, post_slice_index)
block = numpy.zeros(
n_connections, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
block["source"] = numpy.random.choice(
numpy.arange(
pre_vertex_slice.lo_atom, pre_vertex_slice.hi_atom + 1),
size=n_connections, replace=True)
block["source"].sort()
block["target"] = numpy.random.choice(
numpy.arange(
post_vertex_slice.lo_atom, post_vertex_slice.hi_atom + 1),
size=n_connections, replace=True)
block["weight"] = self._generate_weights(
self._weights, n_connections, [connection_slice])
block["delay"] = self._generate_delays(
self._delays, n_connections, [connection_slice])
block["synapse_type"] = synapse_type
return block | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py | 0.894362 | 0.485844 | multapse_connector.py | pypi |
from spynnaker.pyNN.utilities import utility_calls
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
from spinn_front_end_common.utilities import exceptions
import math
import numpy
class FixedProbabilityConnector(AbstractConnector):
"""
For each pair of pre-post cells, the connection probability is constant.
:param `float` p_connect:
a float between zero and one. Each potential connection
is created with this probability.
:param `bool` allow_self_connections:
if the connector is used to connect a
Population to itself, this flag determines whether a neuron is
allowed to connect to itself, or only to other neurons in the
Population.
:param weights:
may either be a float or a !RandomDistribution object. Units nA.
:param delays:
If `None`, all synaptic delays will be set
to the global minimum delay.
:param `pyNN.Space` space:
a Space object, needed if you wish to specify distance-
dependent weights or delays - not implemented
"""
def __init__(
self, p_connect, weights=0.0, delays=1,
allow_self_connections=True, safe=True, space=None, verbose=False):
AbstractConnector.__init__(self, safe, space, verbose)
self._p_connect = p_connect
self._weights = weights
self._delays = delays
self._allow_self_connections = allow_self_connections
self._check_parameters(weights, delays, allow_lists=False)
if not 0 <= self._p_connect <= 1:
raise exceptions.ConfigurationException(
"The probability must be between 0 and 1 (inclusive)")
def get_delay_maximum(self):
return self._get_delay_maximum(
self._delays, utility_calls.get_probable_maximum_selected(
self._n_pre_neurons * self._n_post_neurons,
self._n_pre_neurons * self._n_post_neurons, self._p_connect))
def _get_n_connections(self, out_of):
return utility_calls.get_probable_maximum_selected(
self._n_pre_neurons * self._n_post_neurons, out_of,
self._p_connect)
def get_n_connections_from_pre_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
min_delay=None, max_delay=None):
n_connections = self._get_n_connections(post_vertex_slice.n_atoms)
if min_delay is None or max_delay is None:
return int(math.ceil(n_connections))
return self._get_n_connections_from_pre_vertex_with_delay_maximum(
self._delays, self._n_pre_neurons * self._n_post_neurons,
n_connections, None, min_delay, max_delay)
def get_n_connections_to_post_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
return self._get_n_connections(pre_vertex_slice.n_atoms)
def get_weight_mean(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
n_connections = self._get_n_connections(
pre_vertex_slice.n_atoms * post_vertex_slice.n_atoms)
return self._get_weight_mean(self._weights, None)
def get_weight_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
n_connections = self._get_n_connections(
pre_vertex_slice.n_atoms * post_vertex_slice.n_atoms)
return self._get_weight_maximum(
self._weights, n_connections, None)
def get_weight_variance(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
return self._get_weight_variance(self._weights, None)
def generate_on_machine(self):
return (
not self._generate_lists_on_host(self._weights) and
not self._generate_lists_on_host(self._delays))
def create_synaptic_block(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
synapse_type):
n_items = pre_vertex_slice.n_atoms * post_vertex_slice.n_atoms
items = self._rng.next(n_items)
# If self connections are not allowed, remove possibility the self
# connections by setting them to a value of infinity
if not self._allow_self_connections:
items[0:n_items:post_vertex_slice.n_atoms + 1] = numpy.inf
present = items < self._p_connect
ids = numpy.where(present)[0]
n_connections = numpy.sum(present)
block = numpy.zeros(
n_connections, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
block["source"] = (
(ids / post_vertex_slice.n_atoms) + pre_vertex_slice.lo_atom)
block["target"] = (
(ids % post_vertex_slice.n_atoms) + post_vertex_slice.lo_atom)
block["weight"] = self._generate_weights(
self._weights, n_connections, None)
block["delay"] = self._generate_delays(
self._delays, n_connections, None)
block["synapse_type"] = synapse_type
return block | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py | 0.917227 | 0.42931 | fixed_probability_connector.py | pypi |
from pyNN.random import RandomDistribution
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
import numpy
import logging
logger = logging.getLogger(__file__)
class FixedNumberPostConnector(AbstractConnector):
def __init__(
self, n, weights=0.0, delays=1, allow_self_connections=True,
space=None, safe=True, verbose=False):
AbstractConnector.__init__(self, safe, space, verbose)
self._post_n = n
self._weights = weights
self._delays = delays
self._allow_self_connections = allow_self_connections
self._post_neurons = None
self._check_parameters(weights, delays, allow_lists=False)
if isinstance(n, RandomDistribution):
raise NotImplementedError(
"RandomDistribution is not supported for n in the"
" implementation of FixedNumberPostConnector on this platform")
def get_delay_maximum(self):
return self._get_delay_maximum(
self._delays, self._n_pre_neurons * self._post_n)
def _get_post_neurons(self):
if self._post_neurons is None:
n = 0
while (n < self._post_n):
permutation = numpy.arange(self._n_post_neurons)
for i in range(0, self._n_post_neurons - 1):
j = self._rng.next(
n=1, distribution="uniform",
parameters=[0, self._n_post_neurons])
(permutation[i], permutation[j]) = (
permutation[j], permutation[i])
n += self._n_post_neurons
if self._post_neurons is None:
self._post_neurons = permutation
else:
self._post_neurons = numpy.append(
self._post_neurons, permutation)
self._post_neurons = self._post_neurons[:self._post_n]
self._post_neurons.sort()
return self._post_neurons
def _post_neurons_in_slice(self, post_vertex_slice):
post_neurons = self._get_post_neurons()
return post_neurons[
(post_neurons >= post_vertex_slice.lo_atom) &
(post_neurons <= post_vertex_slice.hi_atom)]
def _is_connected(self, post_vertex_slice):
return self._post_neurons_in_slice(post_vertex_slice).size > 0
def get_n_connections_from_pre_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
min_delay=None, max_delay=None):
if not self._is_connected(post_vertex_slice):
return 0
post_neurons = self._post_neurons_in_slice(post_vertex_slice)
if min_delay is None or max_delay is None:
return len(post_neurons)
return self._get_n_connections_from_pre_vertex_with_delay_maximum(
self._delays, self._n_post * self._n_post_neurons,
pre_vertex_slice.n_atoms * len(post_neurons), None,
min_delay, max_delay)
def get_n_connections_to_post_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
if not self._is_connected(post_vertex_slice):
return 0
return pre_vertex_slice.n_atoms
def get_weight_mean(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
if not self._is_connected(post_vertex_slice):
return 0.0
post_neurons = self._post_neurons_in_slice(post_vertex_slice)
n_connections = pre_vertex_slice.n_atoms * len(post_neurons)
return self._get_weight_mean(self._weights, None)
def get_weight_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
if not self._is_connected(post_vertex_slice):
return 0.0
post_neurons = self._post_neurons_in_slice(post_vertex_slice)
n_connections = pre_vertex_slice.n_atoms * len(post_neurons)
return self._get_weight_maximum(
self._weights, n_connections, None)
def get_weight_variance(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
if not self._is_connected(post_vertex_slice):
return 0.0
return self._get_weight_variance(self._weights, None)
def generate_on_machine(self):
return (
not self._generate_lists_on_host(self._weights) and
not self._generate_lists_on_host(self._delays))
def create_synaptic_block(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
synapse_type):
if not self._is_connected(post_vertex_slice):
return numpy.zeros(0, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
post_neurons_in_slice = self._post_neurons_in_slice(post_vertex_slice)
n_connections = pre_vertex_slice.n_atoms * len(post_neurons_in_slice)
if (not self._allow_self_connections and
pre_vertex_slice is post_vertex_slice):
n_connections -= len(post_neurons_in_slice)
block = numpy.zeros(
n_connections, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
if (not self._allow_self_connections and
pre_vertex_slice is post_vertex_slice):
block["source"] = [
pre_index for pre_index in range(
pre_vertex_slice.lo_atom, post_vertex_slice.hi_atom + 1)
for post_index in post_neurons_in_slice
if pre_index != post_index]
block["target"] = [
post_index for pre_index in range(
pre_vertex_slice.lo_atom, post_vertex_slice.hi_atom + 1)
for post_index in post_neurons_in_slice
if pre_index != post_index]
else:
block["source"] = numpy.repeat(numpy.arange(
pre_vertex_slice.lo_atom, pre_vertex_slice.hi_atom + 1),
len(post_neurons_in_slice))
block["target"] = numpy.tile(
post_neurons_in_slice, pre_vertex_slice.n_atoms)
block["weight"] = self._generate_weights(
self._weights, n_connections, None)
block["delay"] = self._generate_delays(
self._delays, n_connections, None)
block["synapse_type"] = synapse_type
return block | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py | 0.808143 | 0.174059 | fixed_number_post_connector.py | pypi |
from pyNN.space import Space
from spynnaker.pyNN.utilities import utility_calls
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
import logging
import numpy
import math
# support for arbitrary expression for the distance dependence
# NOTE: Do NOT delete these to fix PEP8 issues
# noinspection PyUnresolvedReferences
from numpy import arccos, arcsin, arctan, arctan2, ceil, cos # @UnusedImport
# noinspection PyUnresolvedReferences
from numpy import cosh, exp, fabs, floor, fmod, hypot, ldexp # @UnusedImport
# noinspection PyUnresolvedReferences
from numpy import log, log10, modf, power, sin, sinh, sqrt # @UnusedImport
# noinspection PyUnresolvedReferences
from numpy import tan, tanh, maximum, minimum, e, pi # @UnusedImport
logger = logging.getLogger(__name__)
class DistanceDependentProbabilityConnector(AbstractConnector):
""" Make connections using a distribution which varies with distance.
"""
def __init__(self, d_expression, allow_self_connections=True,
weights=0.0, delays=1, space=Space(), safe=True,
verbose=False, n_connections=None):
"""
:param `string` d_expression:
the right-hand side of a valid python expression for
probability, involving 'd', e.g. "exp(-abs(d))", or "d<3",
that can be parsed by eval(), that computes the distance
dependent distribution
:param `bool` allow_self_connections:
if the connector is used to connect a
Population to itself, this flag determines whether a neuron is
allowed to connect to itself, or only to other neurons in the
Population.
:param `float` weights:
may either be a float, a !RandomDistribution object, a list/
1D array with at least as many items as connections to be
created, or a distance dependence as per a d_expression. Units nA.
:param `float` delays: -- as `weights`. If `None`, all synaptic delays
will be set to the global minimum delay.
:param `pyNN.Space` space:
a Space object, needed if you wish to specify distance-
dependent weights or delays
:param `int` n_connections:
The number of efferent synaptic connections per neuron.
"""
AbstractConnector.__init__(self, safe, space, verbose)
self._d_expression = d_expression
self._allow_self_connections = allow_self_connections
self._weights = weights
self._delays = delays
self._check_parameters(weights, delays, allow_lists=False)
if n_connections is not None:
raise NotImplementedError(
"n_connections is not implemented for"
" DistanceDependentProbabilityConnector on this platform")
# Get the probabilities up-front for now
# TODO: Work out how this can be done statistically
expand_distances = self._expand_distances(self._d_expression)
pre_positions = self._pre_population.positions
post_positions = self._post_population.positions
# d is apparently unused, but is in fact expected by d_expression
# so is used when eval is called
d = self._space.distances( # @UnusedVariable
pre_positions, post_positions, expand_distances)
self._probs = eval(self._d_expression)
def get_delay_maximum(self):
return self._get_delay_maximum(
self._delays, utility_calls.get_probable_maximum_selected(
self._n_pre_neurons * self._n_post_neurons,
self._n_pre_neurons * self._n_post_neurons,
numpy.amax(self._probs)))
def _get_n_connections(self, out_of, pre_vertex_slice, post_vertex_slice):
max_prob = numpy.amax(
self._probs[pre_vertex_slice.as_slice, post_vertex_slice.as_slice])
return utility_calls.get_probable_maximum_selected(
self._n_pre_neurons * self._n_post_neurons, out_of,
max_prob)
def get_n_connections_from_pre_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
min_delay=None, max_delay=None):
n_connections = self._get_n_connections(
post_vertex_slice.n_atoms, pre_vertex_slice, post_vertex_slice)
if min_delay is None or max_delay is None:
return int(math.ceil(n_connections))
return self._get_n_connections_from_pre_vertex_with_delay_maximum(
self._delays, self._n_pre_neurons * self._n_post_neurons,
n_connections, None, min_delay, max_delay)
def get_n_connections_to_post_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
return self._get_n_connections(
pre_vertex_slice.n_atoms, pre_vertex_slice, post_vertex_slice)
def get_weight_mean(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
n_connections = self._get_n_connections(
pre_vertex_slice.n_atoms * post_vertex_slice.n_atoms,
pre_vertex_slice, post_vertex_slice)
return self._get_weight_mean(self._weights, None)
def get_weight_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
n_connections = self._get_n_connections(
pre_vertex_slice.n_atoms * post_vertex_slice.n_atoms,
pre_vertex_slice, post_vertex_slice)
return self._get_weight_maximum(
self._weights, n_connections, None)
def get_weight_variance(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
return self._get_weight_variance(self._weights, None)
def generate_on_machine(self):
return False
def create_synaptic_block(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
synapse_type):
probs = self._probs[
pre_slice_index.to_slice, post_slice_index.to_slice]
n_items = pre_vertex_slice.n_atoms * post_vertex_slice.n_atoms
items = self._rng.next(n_items)
# If self connections are not allowed, remove possibility the self
# connections by setting them to a value of infinity
if not self._allow_self_connections:
items[0:n_items:post_vertex_slice.n_atoms + 1] = numpy.inf
present = items < probs
ids = numpy.where(present)[0]
n_connections = numpy.sum(present)
block = numpy.zeros(
n_connections, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
block["source"] = (
(ids / post_vertex_slice.n_atoms) + pre_vertex_slice.lo_atom)
block["target"] = (
(ids % post_vertex_slice.n_atoms) + post_vertex_slice.lo_atom)
block["weight"] = self._generate_weights(
self._weights, n_connections, None)
block["delay"] = self._generate_delays(
self._delays, n_connections, None)
block["synapse_type"] = synapse_type
return block | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py | 0.862525 | 0.369031 | distance_dependent_probability_connector.py | pypi |
import numpy
from pyNN.random import RandomDistribution
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
class OneToOneConnector(AbstractConnector):
"""
Where the pre- and postsynaptic populations have the same size, connect
cell i in the presynaptic pynn_population.py to cell i in the postsynaptic
pynn_population.py for all i.
"""
def __init__(
self, weights=0.0, delays=1, space=None, safe=True, verbose=False):
"""
:param weights:
may either be a float, a !RandomDistribution object, a list/
1D array with at least as many items as connections to be
created. Units nA.
:param delays:
as `weights`. If `None`, all synaptic delays will be set
to the global minimum delay.
"""
AbstractConnector.__init__(self, safe, space, verbose)
self._weights = weights
self._delays = delays
self._check_parameters(weights, delays)
def get_delay_maximum(self):
return self._get_delay_maximum(
self._delays, max((self._n_pre_neurons, self._n_post_neurons)))
def get_n_connections_from_pre_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
min_delay=None, max_delay=None):
max_lo_atom = max(
(pre_vertex_slice.lo_atom, post_vertex_slice.lo_atom))
min_hi_atom = min(
(pre_vertex_slice.hi_atom, post_vertex_slice.hi_atom))
if min_hi_atom < max_lo_atom:
return 0
if min_delay is None or max_delay is None:
return 1
if isinstance(self._delays, RandomDistribution):
return 1
elif numpy.isscalar(self._delays):
if self._delays >= min_delay and self._delays <= max_delay:
return 1
return 0
else:
connection_slice = slice(max_lo_atom, min_hi_atom + 1)
slice_min_delay = min(self._delays[connection_slice])
slice_max_delay = max(self._delays[connection_slice])
if slice_min_delay >= min_delay and slice_max_delay <= max_delay:
return 1
return 0
def get_n_connections_to_post_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
max_lo_atom = max(
(pre_vertex_slice.lo_atom, post_vertex_slice.lo_atom))
min_hi_atom = min(
(pre_vertex_slice.hi_atom, post_vertex_slice.hi_atom))
if min_hi_atom < max_lo_atom:
return 0
return 1
def get_weight_mean(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
max_lo_atom = max(
(pre_vertex_slice.lo_atom, post_vertex_slice.lo_atom))
min_hi_atom = min(
(pre_vertex_slice.hi_atom, post_vertex_slice.hi_atom))
n_connections = (min_hi_atom - max_lo_atom) + 1
if n_connections <= 0:
return 0
connection_slice = slice(max_lo_atom, min_hi_atom + 1)
return self._get_weight_mean(self._weights, [connection_slice])
def get_weight_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
max_lo_atom = max(
(pre_vertex_slice.lo_atom, post_vertex_slice.lo_atom))
min_hi_atom = min(
(pre_vertex_slice.hi_atom, post_vertex_slice.hi_atom))
n_connections = (min_hi_atom - max_lo_atom) + 1
if n_connections <= 0:
return 0
connection_slice = slice(max_lo_atom, min_hi_atom + 1)
return self._get_weight_maximum(
self._weights, n_connections, [connection_slice])
def get_weight_variance(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
max_lo_atom = max(
(pre_vertex_slice.lo_atom, post_vertex_slice.lo_atom))
min_hi_atom = min(
(pre_vertex_slice.hi_atom, post_vertex_slice.hi_atom))
if max_lo_atom > min_hi_atom:
return 0
connection_slice = slice(max_lo_atom, min_hi_atom + 1)
return self._get_weight_variance(self._weights, [connection_slice])
def generate_on_machine(self):
return (
not self._generate_lists_on_host(self._weights) and
not self._generate_lists_on_host(self._delays))
def create_synaptic_block(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
synapse_type):
max_lo_atom = max(
(pre_vertex_slice.lo_atom, post_vertex_slice.lo_atom))
min_hi_atom = min(
(pre_vertex_slice.hi_atom, post_vertex_slice.hi_atom))
n_connections = max((0, (min_hi_atom - max_lo_atom) + 1))
if n_connections <= 0:
return numpy.zeros(0, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
connection_slice = slice(max_lo_atom, min_hi_atom + 1)
block = numpy.zeros(
n_connections, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
block["source"] = numpy.arange(max_lo_atom, min_hi_atom + 1)
block["target"] = numpy.arange(max_lo_atom, min_hi_atom + 1)
block["weight"] = self._generate_weights(
self._weights, n_connections, [connection_slice])
block["delay"] = self._generate_delays(
self._delays, n_connections, [connection_slice])
block["synapse_type"] = synapse_type
return block | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py | 0.793786 | 0.500427 | one_to_one_connector.py | pypi |
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
import numpy
import logging
logger = logging.getLogger(__file__)
class AllToAllConnector(AbstractConnector):
""" Connects all cells in the presynaptic population to all cells in \
the postsynaptic population
"""
def __init__(
self, weights=0.0, delays=1, allow_self_connections=True,
space=None, safe=True, verbose=None):
"""
:param `bool` allow_self_connections:
if the connector is used to connect a
Population to itself, this flag determines whether a neuron is
allowed to connect to itself, or only to other neurons in the
Population.
:param `float` weights:
may either be a float, a !RandomDistribution object, a list/
1D array with at least as many items as connections to be
created. Units nA.
:param `float` delays: -- as `weights`. If `None`, all synaptic delays
will be set to the global minimum delay.
"""
AbstractConnector.__init__(self, safe, space, verbose)
self._weights = weights
self._delays = delays
self._allow_self_connections = allow_self_connections
self._check_parameters(weights, delays)
def _connection_slices(self, pre_vertex_slice, post_vertex_slice):
""" Get a slice of the overall set of connections
"""
n_post_neurons = self._n_post_neurons
stop_atom = post_vertex_slice.hi_atom + 1
if (not self._allow_self_connections and
pre_vertex_slice is post_vertex_slice):
n_post_neurons -= 1
stop_atom -= 1
return [
slice(n + post_vertex_slice.lo_atom, n + stop_atom)
for n in range(
pre_vertex_slice.lo_atom * n_post_neurons,
(pre_vertex_slice.hi_atom + 1 * n_post_neurons),
n_post_neurons)]
def get_delay_maximum(self):
return self._get_delay_maximum(
self._delays, self._n_pre_neurons * self._n_post_neurons)
def get_n_connections_from_pre_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
min_delay=None, max_delay=None):
total_n_connections_per_pre_neuron = self._n_post_neurons
n_connections_per_pre_neuron = post_vertex_slice.n_atoms
if (not self._allow_self_connections and
pre_vertex_slice is post_vertex_slice):
n_connections_per_pre_neuron -= 1
total_n_connections_per_pre_neuron -= 1
if min_delay is None or max_delay is None:
return n_connections_per_pre_neuron
return self._get_n_connections_from_pre_vertex_with_delay_maximum(
self._delays,
self._n_pre_neurons * total_n_connections_per_pre_neuron,
n_connections_per_pre_neuron,
self._connection_slices(pre_vertex_slice, post_vertex_slice),
min_delay, max_delay)
def get_n_connections_to_post_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
if (not self._allow_self_connections and
pre_vertex_slice is post_vertex_slice):
return pre_vertex_slice.n_atoms - 1
return pre_vertex_slice.n_atoms
def get_weight_mean(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
n_connections = pre_vertex_slice.n_atoms * post_vertex_slice.n_atoms
if (not self._allow_self_connections and
pre_vertex_slice is post_vertex_slice):
n_connections -= post_vertex_slice.n_atoms
connection_slices = self._connection_slices(
pre_vertex_slice, post_vertex_slice)
return self._get_weight_mean(self._weights, connection_slices)
def get_weight_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
n_connections = pre_vertex_slice.n_atoms * post_vertex_slice.n_atoms
if (not self._allow_self_connections and
pre_vertex_slice is post_vertex_slice):
n_connections -= post_vertex_slice.n_atoms
connection_slices = self._connection_slices(
pre_vertex_slice, post_vertex_slice)
return self._get_weight_maximum(
self._weights, n_connections, connection_slices)
def get_weight_variance(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
connection_slices = self._connection_slices(
pre_vertex_slice, post_vertex_slice)
return self._get_weight_variance(self._weights, connection_slices)
def generate_on_machine(self):
return (
not self._generate_lists_on_host(self._weights) and
not self._generate_lists_on_host(self._delays))
def create_synaptic_block(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
synapse_type):
n_connections = pre_vertex_slice.n_atoms * post_vertex_slice.n_atoms
if (not self._allow_self_connections and
pre_vertex_slice is post_vertex_slice):
n_connections -= post_vertex_slice.n_atoms
connection_slices = self._connection_slices(
pre_vertex_slice, post_vertex_slice)
block = numpy.zeros(
n_connections, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
if (not self._allow_self_connections and
pre_vertex_slice is post_vertex_slice):
n_atoms = pre_vertex_slice.n_atoms
block["source"] = numpy.where(numpy.diag(
numpy.repeat(1, n_atoms)) == 0)[0]
block["target"] = [block["source"][
((n_atoms * i) + (n_atoms - 1)) - j]
for j in range(n_atoms) for i in range(n_atoms - 1)]
block["source"] += pre_vertex_slice.lo_atom
block["target"] += post_vertex_slice.lo_atom
else:
block["source"] = numpy.repeat(numpy.arange(
pre_vertex_slice.lo_atom, pre_vertex_slice.hi_atom + 1),
post_vertex_slice.n_atoms)
block["target"] = numpy.tile(numpy.arange(
post_vertex_slice.lo_atom, post_vertex_slice.hi_atom + 1),
pre_vertex_slice.n_atoms)
block["weight"] = self._generate_weights(
self._weights, n_connections, connection_slices)
block["delay"] = self._generate_delays(
self._delays, n_connections, connection_slices)
block["synapse_type"] = synapse_type
return block | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/connectors/all_to_all_connector.py | 0.872538 | 0.360461 | all_to_all_connector.py | pypi |
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
import numpy
class SmallWorldConnector(AbstractConnector):
def __init__(
self, degree, rewiring, allow_self_connections=True, weights=0.0,
delays=1, space=None, safe=True, verbose=False,
n_connections=None):
AbstractConnector.__init__(self, safe, space, verbose)
self._rewiring = rewiring
self._check_parameters(weights, delays, allow_lists=False)
if n_connections is not None:
raise NotImplementedError(
"n_connections is not implemented for"
" SmallWorldConnector on this platform")
# Get the probabilities up-front for now
# TODO: Work out how this can be done statistically
pre_positions = self._pre_population.positions
post_positions = self._post_population.positions
distances = self._space.distances(
pre_positions, post_positions, False)
self._mask = (distances < degree).as_type(float)
self._n_connections = numpy.sum(self._mask)
def get_delay_maximum(self):
return self._get_delay_maximum(self._delays, self._n_connections)
def _get_n_connections(self, pre_vertex_slice, post_vertex_slice):
return numpy.sum(
self._mask[pre_vertex_slice.as_slice, post_vertex_slice.as_slice])
def get_n_connections_from_pre_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
min_delay=None, max_delay=None):
n_connections = numpy.amax([
numpy.sum(self._mask[i, post_vertex_slice.as_slice])
for i in range(
pre_vertex_slice.lo_atom, pre_vertex_slice.hi_atom + 1)])
if min_delay is None or max_delay is None:
return n_connections
return self._get_n_connections_from_pre_vertex_with_delay_maximum(
self._delays, self._n_connections,
n_connections, None, min_delay, max_delay)
def get_n_connections_to_post_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
n_connections = numpy.amax([
numpy.sum(self._mask[pre_vertex_slice.as_slice, i])
for i in range(
post_vertex_slice.lo_atom, post_vertex_slice.hi_atom + 1)])
return n_connections
def get_weight_mean(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
n_connections = self._get_n_connections(
pre_vertex_slice, post_vertex_slice)
return self._get_weight_mean(self._weights, None)
def get_weight_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
n_connections = self._get_n_connections(
pre_vertex_slice, post_vertex_slice)
return self._get_weight_maximum(
self._weights, n_connections, None)
def get_weight_variance(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
return self._get_weight_variance(self._weights, None)
def generate_on_machine(self):
return False
def create_synaptic_block(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
synapse_type):
ids = numpy.where(self._mask[
pre_vertex_slice.as_slice, post_vertex_slice.as_slice])[0]
n_connections = numpy.sum(ids)
block = numpy.zeros(
n_connections, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
block["source"] = (
(ids / post_vertex_slice.n_atoms) + pre_vertex_slice.lo_atom)
block["target"] = (
(ids % post_vertex_slice.n_atoms) + post_vertex_slice.lo_atom)
block["weight"] = self._generate_weights(
self._weights, n_connections, None)
block["delay"] = self._generate_delays(
self._delays, n_connections, None)
block["synapse_type"] = synapse_type
# Re-wire some connections
rewired = numpy.where(
self._rng.next(n_connections) < self._rewiring)[0]
block["target"][rewired] = (
(self._rng.next(rewired.size) * (post_vertex_slice.n_atoms - 1)) +
post_vertex_slice.lo_atom)
return block | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py | 0.627723 | 0.255779 | small_world_connector.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.