text stringlengths 0 1.05M | meta dict |
|---|---|
"Amazon S3 cache backend for Django"
# Copyright (c) 2012,2017 Alexander Todorov <atodorov@MrSenko.com>
#
# Taken directly from django.core.cache.backends.filebased.FileBasedCache
# and adapted for S3.
import time
import hashlib
try:
import cPickle as pickle
except ImportError:
import pickle
from storages.backends import s3boto
from django.core.files.base import ContentFile
from django.core.cache.backends.base import BaseCache
def _key_to_file(key):
"""
All files go into a single flat directory because it's not easier
to search/delete empty directories in _delete().
Plus Amazon S3 doesn't seem to have a problem with many files into one directory.
NB: measuring sha1() with timeit shows it is a bit faster compared to md5()
http://stackoverflow.com/questions/2241013/is-there-a-significant-overhead-by-using-different-versions-of-sha-hashing-hash
UPDATE: this is wrong, md5() is still faster, see:
http://atodorov.org/blog/2013/02/05/performance-test-md5-sha1-sha256-sha512/
"""
return hashlib.sha1(key.encode('utf-8')).hexdigest()
class AmazonS3Cache(BaseCache):
"""
Amazon S3 cache backend for Django
"""
def __init__(self, _location, params):
"""
location is not used but otherwise Django crashes.
"""
BaseCache.__init__(self, params)
# Amazon and boto have a maximum limit of 1000 for get_all_keys(). See:
# http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
# This implementation of the GET operation returns some or all (up to 1000)
# of the objects in a bucket....
if self._max_entries > 1000:
self._max_entries = 1000
self._options = params.get('OPTIONS', {})
# backward compatible syntax for s3cache users before v1.2 for easy upgrades
# in v1.2 we update to latest django-storages 1.1.8 which changes variable names
# in non-backward compatible fashion
if 'ACCESS_KEY' not in self._options.keys():
self._options['ACCESS_KEY'] = self._options.get('ACCESS_KEY_ID', None)
if 'SECRET_KEY' not in self._options.keys():
self._options['SECRET_KEY'] = self._options.get('SECRET_ACCESS_KEY', None)
if 'BUCKET_NAME' not in self._options.keys():
self._options['BUCKET_NAME'] = self._options.get('STORAGE_BUCKET_NAME', None)
# we use S3 compatible varibale names while django-storages doesn't
_bucket_name = self._options.get('BUCKET_NAME', None)
_default_acl = self._options.get('DEFAULT_ACL', 'private')
_bucket_acl = self._options.get('BUCKET_ACL', _default_acl)
# in case it was not specified in OPTIONS default to 'private'
self._options['BUCKET_ACL'] = _bucket_acl
self._location = self._options.get('LOCATION', self._options.get('location', ''))
# sanitize location by removing leading and traling slashes
self._options['LOCATION'] = self._location.strip('/')
# S3BotoStorage wants lower case names
lowercase_options = []
for name, value in self._options.items():
if value: # skip None values
lowercase_options.append((name.lower(), value))
# this avoids RuntimeError: dictionary changed size during iteration
# with Python 3 if we assign to the dictionary directly
for _n, _v in lowercase_options:
self._options[_n] = _v
self._storage = s3boto.S3BotoStorage(
acl=_default_acl,
bucket=_bucket_name,
**self._options
)
def add(self, key, value, timeout=None, version=None):
if self.has_key(key, version=version):
return False
self.set(key, value, timeout, version=version)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = _key_to_file(key)
try:
fobj = self._storage.open(fname, 'rb')
try:
if not self._is_expired(fobj, fname):
return pickle.load(fobj)
finally:
fobj.close()
except (IOError, OSError, EOFError, pickle.PickleError):
pass
return default
def set(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = _key_to_file(key)
self._cull()
try:
content = self._dump_object(value, timeout)
self._storage.save(fname, ContentFile(content))
except (IOError, OSError, EOFError, pickle.PickleError):
pass
def _dump_object(self, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
content = pickle.dumps(time.time() + timeout, pickle.HIGHEST_PROTOCOL)
content += pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
return content
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
try:
self._delete(_key_to_file(key))
except (IOError, OSError):
pass
def _delete(self, fname):
self._storage.delete(fname)
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = _key_to_file(key)
try:
fobj = self._storage.open(fname, 'rb')
try:
return not self._is_expired(fobj, fname)
finally:
fobj.close()
except (IOError, OSError, EOFError, pickle.PickleError):
return False
def _is_expired(self, fobj, fname):
"""
Takes an open cache file and determines if it has expired,
deletes the file if it is has passed its expiry time.
"""
exp = pickle.load(fobj)
if exp < time.time():
self._delete(fname)
return True
return False
def _cull(self, frequency=None):
if frequency is None:
frequency = self._cull_frequency
if not self._max_entries:
return
if int(self._num_entries) < self._max_entries:
return
try:
keylist = self._storage.bucket.get_all_keys(prefix=self._location)
except (IOError, OSError):
return
if not frequency:
doomed = keylist
else:
doomed = [k for (i, k) in enumerate(keylist) if i % frequency == 0]
try:
self._storage.bucket.delete_keys(doomed, quiet=True)
except (IOError, OSError):
pass
def _get_num_entries(self):
"""
There seems to be an artificial limit of 1000
"""
return len(self._storage.bucket.get_all_keys(prefix=self._location))
_num_entries = property(_get_num_entries)
def clear(self):
# delete all keys
self._cull(0)
# For backwards compatibility
class CacheClass(AmazonS3Cache):
"""
Backward compatibility class definition
"""
| {
"repo_name": "atodorov/django-s3-cache",
"path": "s3cache/__init__.py",
"copies": "1",
"size": "7246",
"license": "mit",
"hash": -5435891792446109000,
"line_mean": 32.3917050691,
"line_max": 130,
"alpha_frac": 0.6018492962,
"autogenerated": false,
"ratio": 4.005527915975677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014455961207338105,
"num_lines": 217
} |
from Utils import getTreeFromXml, unicodise, deunicodise
from logging import debug, info, warning, error
try:
import xml.etree.ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
class S3Exception(Exception):
def __init__(self, message = ""):
self.message = unicodise(message)
def __str__(self):
## Call unicode(self) instead of self.message because
## __unicode__() method could be overriden in subclasses!
return deunicodise(unicode(self))
def __unicode__(self):
return self.message
## (Base)Exception.message has been deprecated in Python 2.6
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
class S3Error (S3Exception):
def __init__(self, response):
self.status = response["status"]
self.reason = response["reason"]
self.info = {
"Code" : "",
"Message" : "",
"Resource" : ""
}
debug("S3Error: %s (%s)" % (self.status, self.reason))
if response.has_key("headers"):
for header in response["headers"]:
debug("HttpHeader: %s: %s" % (header, response["headers"][header]))
if response.has_key("data"):
tree = getTreeFromXml(response["data"])
error_node = tree
if not error_node.tag == "Error":
error_node = tree.find(".//Error")
for child in error_node.getchildren():
if child.text != "":
debug("ErrorXML: " + child.tag + ": " + repr(child.text))
self.info[child.tag] = child.text
self.code = self.info["Code"]
self.message = self.info["Message"]
self.resource = self.info["Resource"]
def __unicode__(self):
retval = u"%d " % (self.status)
retval += (u"(%s)" % (self.info.has_key("Code") and self.info["Code"] or self.reason))
if self.info.has_key("Message"):
retval += (u": %s" % self.info["Message"])
return retval
class CloudFrontError(S3Error):
pass
class S3UploadError(S3Exception):
pass
class S3DownloadError(S3Exception):
pass
class S3RequestError(S3Exception):
pass
class S3ResponseError(S3Exception):
pass
class InvalidFileError(S3Exception):
pass
class ParameterError(S3Exception):
pass
# vim:et:ts=4:sts=4:ai
| {
"repo_name": "zhm/s3_cmd_bin",
"path": "resources/S3/Exceptions.py",
"copies": "2",
"size": "2643",
"license": "mit",
"hash": 540107724287981900,
"line_mean": 29.0340909091,
"line_max": 94,
"alpha_frac": 0.5936435868,
"autogenerated": false,
"ratio": 3.717299578059072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5310943164859072,
"avg_score": null,
"num_lines": null
} |
class BidirMap(object):
def __init__(self, **map):
self.k2v = {}
self.v2k = {}
for key in map:
self.__setitem__(key, map[key])
def __setitem__(self, key, value):
if self.v2k.has_key(value):
if self.v2k[value] != key:
raise KeyError("Value '"+str(value)+"' already in use with key '"+str(self.v2k[value])+"'")
try:
del(self.v2k[self.k2v[key]])
except KeyError:
pass
self.k2v[key] = value
self.v2k[value] = key
def __getitem__(self, key):
return self.k2v[key]
def __str__(self):
return self.v2k.__str__()
def getkey(self, value):
return self.v2k[value]
def getvalue(self, key):
return self.k2v[key]
def keys(self):
return [key for key in self.k2v]
def values(self):
return [value for value in self.v2k]
# vim:et:ts=4:sts=4:ai
| {
"repo_name": "zhm/s3_cmd_bin",
"path": "resources/S3/BidirMap.py",
"copies": "15",
"size": "1069",
"license": "mit",
"hash": -3241551106960371000,
"line_mean": 24.4523809524,
"line_max": 107,
"alpha_frac": 0.5341440599,
"autogenerated": false,
"ratio": 3.0895953757225434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from BidirMap import BidirMap
class SortedDictIterator(object):
def __init__(self, sorted_dict, keys):
self.sorted_dict = sorted_dict
self.keys = keys
def next(self):
try:
return self.keys.pop(0)
except IndexError:
raise StopIteration
class SortedDict(dict):
def __init__(self, mapping = {}, ignore_case = True, **kwargs):
"""
WARNING: SortedDict() with ignore_case==True will
drop entries differing only in capitalisation!
Eg: SortedDict({'auckland':1, 'Auckland':2}).keys() => ['Auckland']
With ignore_case==False it's all right
"""
dict.__init__(self, mapping, **kwargs)
self.ignore_case = ignore_case
def keys(self):
keys = dict.keys(self)
if self.ignore_case:
# Translation map
xlat_map = BidirMap()
for key in keys:
xlat_map[key.lower()] = key
# Lowercase keys
lc_keys = xlat_map.keys()
lc_keys.sort()
return [xlat_map[k] for k in lc_keys]
else:
keys.sort()
return keys
def __iter__(self):
return SortedDictIterator(self, self.keys())
if __name__ == "__main__":
d = { 'AWS' : 1, 'Action' : 2, 'america' : 3, 'Auckland' : 4, 'America' : 5 }
sd = SortedDict(d)
print "Wanted: Action, america, Auckland, AWS, [ignore case]"
print "Got: ",
for key in sd:
print "%s," % key,
print " [used: __iter__()]"
d = SortedDict(d, ignore_case = False)
print "Wanted: AWS, Action, Auckland, america, [case sensitive]"
print "Got: ",
for key in d.keys():
print "%s," % key,
print " [used: keys()]"
# vim:et:ts=4:sts=4:ai
| {
"repo_name": "zhm/s3_cmd_bin",
"path": "resources/S3/SortedDict.py",
"copies": "2",
"size": "1943",
"license": "mit",
"hash": 770040982106448900,
"line_mean": 29.8412698413,
"line_max": 84,
"alpha_frac": 0.5337107566,
"autogenerated": false,
"ratio": 3.488330341113106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5022041097713106,
"avg_score": null,
"num_lines": null
} |
import logging
from logging import debug, info, warning, error
import re
import os
import sys
import Progress
from SortedDict import SortedDict
import httplib
try:
import json
except ImportError, e:
pass
class Config(object):
_instance = None
_parsed_files = []
_doc = {}
access_key = ""
secret_key = ""
security_token = ""
host_base = "s3.amazonaws.com"
host_bucket = "%(bucket)s.s3.amazonaws.com"
simpledb_host = "sdb.amazonaws.com"
cloudfront_host = "cloudfront.amazonaws.com"
verbosity = logging.WARNING
progress_meter = True
progress_class = Progress.ProgressCR
send_chunk = 4096
recv_chunk = 4096
list_md5 = False
human_readable_sizes = False
extra_headers = SortedDict(ignore_case = True)
force = False
server_side_encryption = False
enable = None
get_continue = False
put_continue = False
upload_id = None
skip_existing = False
recursive = False
acl_public = None
acl_grants = []
acl_revokes = []
proxy_host = ""
proxy_port = 3128
encrypt = False
dry_run = False
add_encoding_exts = ""
preserve_attrs = True
preserve_attrs_list = [
'uname', # Verbose owner Name (e.g. 'root')
'uid', # Numeric user ID (e.g. 0)
'gname', # Group name (e.g. 'users')
'gid', # Numeric group ID (e.g. 100)
'atime', # Last access timestamp
'mtime', # Modification timestamp
'ctime', # Creation timestamp
'mode', # File mode (e.g. rwxr-xr-x = 755)
'md5', # File MD5 (if known)
#'acl', # Full ACL (not yet supported)
]
delete_removed = False
delete_after = False
delete_after_fetch = False
max_delete = -1
_doc['delete_removed'] = "[sync] Remove remote S3 objects when local file has been deleted"
delay_updates = False
gpg_passphrase = ""
gpg_command = ""
gpg_encrypt = "%(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s"
gpg_decrypt = "%(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s"
use_https = False
bucket_location = "US"
default_mime_type = "binary/octet-stream"
guess_mime_type = True
use_mime_magic = True
mime_type = ""
enable_multipart = True
multipart_chunk_size_mb = 15 # MB
# List of checks to be performed for 'sync'
sync_checks = ['size', 'md5'] # 'weak-timestamp'
# List of compiled REGEXPs
exclude = []
include = []
# Dict mapping compiled REGEXPs back to their textual form
debug_exclude = {}
debug_include = {}
encoding = "utf-8"
add_content_encoding = True
urlencoding_mode = "normal"
log_target_prefix = ""
reduced_redundancy = False
follow_symlinks = False
socket_timeout = 300
invalidate_on_cf = False
# joseprio: new flags for default index invalidation
invalidate_default_index_on_cf = False
invalidate_default_index_root_on_cf = True
website_index = "index.html"
website_error = ""
website_endpoint = "http://%(bucket)s.s3-website-%(location)s.amazonaws.com/"
additional_destinations = []
files_from = []
cache_file = ""
add_headers = ""
ignore_failed_copy = False
## Creating a singleton
def __new__(self, configfile = None):
if self._instance is None:
self._instance = object.__new__(self)
return self._instance
def __init__(self, configfile = None):
if configfile:
try:
self.read_config_file(configfile)
except IOError, e:
if 'AWS_CREDENTIAL_FILE' in os.environ:
self.env_config()
if len(self.access_key)==0:
self.role_config()
def role_config(self):
if sys.version_info[0] * 10 + sys.version_info[1] < 26:
error("IAM authentication requires Python 2.6 or newer")
raise
if not 'json' in sys.modules:
error("IAM authentication not available -- missing module json")
raise
try:
conn = httplib.HTTPConnection(host='169.254.169.254', timeout = 2)
conn.request('GET', "/latest/meta-data/iam/security-credentials/")
resp = conn.getresponse()
files = resp.read()
if resp.status == 200 and len(files)>1:
conn.request('GET', "/latest/meta-data/iam/security-credentials/%s"%files)
resp=conn.getresponse()
if resp.status == 200:
creds=json.load(resp)
Config().update_option('access_key', creds['AccessKeyId'].encode('ascii'))
Config().update_option('secret_key', creds['SecretAccessKey'].encode('ascii'))
Config().update_option('security_token', creds['Token'].encode('ascii'))
else:
raise IOError
else:
raise IOError
except:
raise
def role_refresh(self):
try:
self.role_config()
except:
warning("Could not refresh role")
def env_config(self):
cred_content = ""
try:
cred_file = open(os.environ['AWS_CREDENTIAL_FILE'],'r')
cred_content = cred_file.read()
except IOError, e:
debug("Error %d accessing credentials file %s" % (e.errno,os.environ['AWS_CREDENTIAL_FILE']))
r_data = re.compile("^\s*(?P<orig_key>\w+)\s*=\s*(?P<value>.*)")
r_quotes = re.compile("^\"(.*)\"\s*$")
if len(cred_content)>0:
for line in cred_content.splitlines():
is_data = r_data.match(line)
is_data = r_data.match(line)
if is_data:
data = is_data.groupdict()
if r_quotes.match(data["value"]):
data["value"] = data["value"][1:-1]
if data["orig_key"]=="AWSAccessKeyId":
data["key"] = "access_key"
elif data["orig_key"]=="AWSSecretKey":
data["key"] = "secret_key"
elif data["orig_key"]=="Token":
data["key"] = "security_token"
else:
del data["key"]
if "key" in data:
Config().update_option(data["key"], data["value"])
if data["key"] in ("access_key", "secret_key", "gpg_passphrase", "security_token"):
print_value = ("%s...%d_chars...%s") % (data["value"][:2], len(data["value"]) - 3, data["value"][-1:])
else:
print_value = data["value"]
debug("env_Config: %s->%s" % (data["key"], print_value))
def option_list(self):
retval = []
for option in dir(self):
## Skip attributes that start with underscore or are not string, int or bool
option_type = type(getattr(Config, option))
if option.startswith("_") or \
not (option_type in (
type("string"), # str
type(42), # int
type(True))): # bool
continue
retval.append(option)
return retval
def read_config_file(self, configfile):
cp = ConfigParser(configfile)
for option in self.option_list():
self.update_option(option, cp.get(option))
if cp.get('add_headers'):
for option in cp.get('add_headers').split(","):
(key, value) = option.split(':')
self.extra_headers[key.replace('_', '-').strip()] = value.strip()
self._parsed_files.append(configfile)
def dump_config(self, stream):
ConfigDumper(stream).dump("default", self)
def update_option(self, option, value):
if value is None:
return
#### Handle environment reference
if str(value).startswith("$"):
return self.update_option(option, os.getenv(str(value)[1:]))
#### Special treatment of some options
## verbosity must be known to "logging" module
if option == "verbosity":
try:
setattr(Config, "verbosity", logging._levelNames[value])
except KeyError:
error("Config: verbosity level '%s' is not valid" % value)
## allow yes/no, true/false, on/off and 1/0 for boolean options
elif type(getattr(Config, option)) is type(True): # bool
if str(value).lower() in ("true", "yes", "on", "1"):
setattr(Config, option, True)
elif str(value).lower() in ("false", "no", "off", "0"):
setattr(Config, option, False)
else:
error("Config: value of option '%s' must be Yes or No, not '%s'" % (option, value))
elif type(getattr(Config, option)) is type(42): # int
try:
setattr(Config, option, int(value))
except ValueError, e:
error("Config: value of option '%s' must be an integer, not '%s'" % (option, value))
else: # string
setattr(Config, option, value)
class ConfigParser(object):
def __init__(self, file, sections = []):
self.cfg = {}
self.parse_file(file, sections)
def parse_file(self, file, sections = []):
debug("ConfigParser: Reading file '%s'" % file)
if type(sections) != type([]):
sections = [sections]
in_our_section = True
f = open(file, "r")
r_comment = re.compile("^\s*#.*")
r_empty = re.compile("^\s*$")
r_section = re.compile("^\[([^\]]+)\]")
r_data = re.compile("^\s*(?P<key>\w+)\s*=\s*(?P<value>.*)")
r_quotes = re.compile("^\"(.*)\"\s*$")
for line in f:
if r_comment.match(line) or r_empty.match(line):
continue
is_section = r_section.match(line)
if is_section:
section = is_section.groups()[0]
in_our_section = (section in sections) or (len(sections) == 0)
continue
is_data = r_data.match(line)
if is_data and in_our_section:
data = is_data.groupdict()
if r_quotes.match(data["value"]):
data["value"] = data["value"][1:-1]
self.__setitem__(data["key"], data["value"])
if data["key"] in ("access_key", "secret_key", "gpg_passphrase", "security_token"):
print_value = ("%s...%d_chars...%s") % (data["value"][:2], len(data["value"]) - 3, data["value"][-1:])
else:
print_value = data["value"]
debug("ConfigParser: %s->%s" % (data["key"], print_value))
continue
warning("Ignoring invalid line in '%s': %s" % (file, line))
def __getitem__(self, name):
return self.cfg[name]
def __setitem__(self, name, value):
self.cfg[name] = value
def get(self, name, default = None):
if self.cfg.has_key(name):
return self.cfg[name]
return default
class ConfigDumper(object):
def __init__(self, stream):
self.stream = stream
def dump(self, section, config):
self.stream.write("[%s]\n" % section)
for option in config.option_list():
self.stream.write("%s = %s\n" % (option, getattr(config, option)))
# vim:et:ts=4:sts=4:ai
| {
"repo_name": "sharethis-github/OpenSource",
"path": "s3cmd/S3/Config.py",
"copies": "1",
"size": "11894",
"license": "apache-2.0",
"hash": -1110948396675206700,
"line_mean": 37.1217948718,
"line_max": 145,
"alpha_frac": 0.5311081217,
"autogenerated": false,
"ratio": 3.9176548089591567,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4948762930659157,
"avg_score": null,
"num_lines": null
} |
import logging
from logging import debug, info, warning, error
import re
import Progress
from SortedDict import SortedDict
class Config(object):
_instance = None
_parsed_files = []
_doc = {}
access_key = ""
secret_key = ""
host_base = "s3.amazonaws.com"
host_bucket = "%(bucket)s.s3.amazonaws.com"
simpledb_host = "sdb.amazonaws.com"
cloudfront_host = "cloudfront.amazonaws.com"
verbosity = logging.WARNING
progress_meter = True
progress_class = Progress.ProgressCR
send_chunk = 4096
recv_chunk = 4096
list_md5 = False
human_readable_sizes = False
extra_headers = SortedDict(ignore_case = True)
force = False
enable = None
get_continue = False
skip_existing = False
recursive = False
acl_public = None
acl_grants = []
acl_revokes = []
proxy_host = ""
proxy_port = 3128
encrypt = False
dry_run = False
preserve_attrs = True
preserve_attrs_list = [
'uname', # Verbose owner Name (e.g. 'root')
'uid', # Numeric user ID (e.g. 0)
'gname', # Group name (e.g. 'users')
'gid', # Numeric group ID (e.g. 100)
'atime', # Last access timestamp
'mtime', # Modification timestamp
'ctime', # Creation timestamp
'mode', # File mode (e.g. rwxr-xr-x = 755)
#'acl', # Full ACL (not yet supported)
]
delete_removed = False
_doc['delete_removed'] = "[sync] Remove remote S3 objects when local file has been deleted"
gpg_passphrase = ""
gpg_command = ""
gpg_encrypt = "%(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s"
gpg_decrypt = "%(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s"
use_https = False
bucket_location = "US"
default_mime_type = "binary/octet-stream"
guess_mime_type = True
# List of checks to be performed for 'sync'
sync_checks = ['size', 'md5'] # 'weak-timestamp'
# List of compiled REGEXPs
exclude = []
include = []
# Dict mapping compiled REGEXPs back to their textual form
debug_exclude = {}
debug_include = {}
encoding = "utf-8"
urlencoding_mode = "normal"
log_target_prefix = ""
reduced_redundancy = False
follow_symlinks = False
socket_timeout = 300
invalidate_on_cf = False
website_index = "index.html"
website_error = ""
website_endpoint = "http://%(bucket)s.s3-website-%(location)s.amazonaws.com/"
## Creating a singleton
def __new__(self, configfile = None):
if self._instance is None:
self._instance = object.__new__(self)
return self._instance
def __init__(self, configfile = None):
if configfile:
self.read_config_file(configfile)
def option_list(self):
retval = []
for option in dir(self):
## Skip attributes that start with underscore or are not string, int or bool
option_type = type(getattr(Config, option))
if option.startswith("_") or \
not (option_type in (
type("string"), # str
type(42), # int
type(True))): # bool
continue
retval.append(option)
return retval
def read_config_file(self, configfile):
cp = ConfigParser(configfile)
for option in self.option_list():
self.update_option(option, cp.get(option))
self._parsed_files.append(configfile)
def dump_config(self, stream):
ConfigDumper(stream).dump("default", self)
def update_option(self, option, value):
if value is None:
return
#### Special treatment of some options
## verbosity must be known to "logging" module
if option == "verbosity":
try:
setattr(Config, "verbosity", logging._levelNames[value])
except KeyError:
error("Config: verbosity level '%s' is not valid" % value)
## allow yes/no, true/false, on/off and 1/0 for boolean options
elif type(getattr(Config, option)) is type(True): # bool
if str(value).lower() in ("true", "yes", "on", "1"):
setattr(Config, option, True)
elif str(value).lower() in ("false", "no", "off", "0"):
setattr(Config, option, False)
else:
error("Config: value of option '%s' must be Yes or No, not '%s'" % (option, value))
elif type(getattr(Config, option)) is type(42): # int
try:
setattr(Config, option, int(value))
except ValueError, e:
error("Config: value of option '%s' must be an integer, not '%s'" % (option, value))
else: # string
setattr(Config, option, value)
class ConfigParser(object):
def __init__(self, file, sections = []):
self.cfg = {}
self.parse_file(file, sections)
def parse_file(self, file, sections = []):
debug("ConfigParser: Reading file '%s'" % file)
if type(sections) != type([]):
sections = [sections]
in_our_section = True
f = open(file, "r")
r_comment = re.compile("^\s*#.*")
r_empty = re.compile("^\s*$")
r_section = re.compile("^\[([^\]]+)\]")
r_data = re.compile("^\s*(?P<key>\w+)\s*=\s*(?P<value>.*)")
r_quotes = re.compile("^\"(.*)\"\s*$")
for line in f:
if r_comment.match(line) or r_empty.match(line):
continue
is_section = r_section.match(line)
if is_section:
section = is_section.groups()[0]
in_our_section = (section in sections) or (len(sections) == 0)
continue
is_data = r_data.match(line)
if is_data and in_our_section:
data = is_data.groupdict()
if r_quotes.match(data["value"]):
data["value"] = data["value"][1:-1]
self.__setitem__(data["key"], data["value"])
if data["key"] in ("access_key", "secret_key", "gpg_passphrase"):
print_value = (data["value"][:2]+"...%d_chars..."+data["value"][-1:]) % (len(data["value"]) - 3)
else:
print_value = data["value"]
debug("ConfigParser: %s->%s" % (data["key"], print_value))
continue
warning("Ignoring invalid line in '%s': %s" % (file, line))
def __getitem__(self, name):
return self.cfg[name]
def __setitem__(self, name, value):
self.cfg[name] = value
def get(self, name, default = None):
if self.cfg.has_key(name):
return self.cfg[name]
return default
class ConfigDumper(object):
def __init__(self, stream):
self.stream = stream
def dump(self, section, config):
self.stream.write("[%s]\n" % section)
for option in config.option_list():
self.stream.write("%s = %s\n" % (option, getattr(config, option)))
| {
"repo_name": "sauloal/linuxscripts",
"path": "bin/S3/Config.py",
"copies": "1",
"size": "7379",
"license": "mit",
"hash": -4074002161360082400,
"line_mean": 36.0804020101,
"line_max": 145,
"alpha_frac": 0.5491258978,
"autogenerated": false,
"ratio": 3.8094992256066083,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9807007321858405,
"avg_score": 0.01032356030964066,
"num_lines": 199
} |
import os
import re
import sys
from BidirMap import BidirMap
from logging import debug
import S3
from Utils import unicodise, check_bucket_name_dns_conformity
class S3Uri(object):
type = None
_subclasses = None
def __new__(self, string):
if not self._subclasses:
## Generate a list of all subclasses of S3Uri
self._subclasses = []
dict = sys.modules[__name__].__dict__
for something in dict:
if type(dict[something]) is not type(self):
continue
if issubclass(dict[something], self) and dict[something] != self:
self._subclasses.append(dict[something])
for subclass in self._subclasses:
try:
instance = object.__new__(subclass)
instance.__init__(string)
return instance
except ValueError, e:
continue
raise ValueError("%s: not a recognized URI" % string)
def __str__(self):
return self.uri()
def __unicode__(self):
return self.uri()
def public_url(self):
raise ValueError("This S3 URI does not have Anonymous URL representation")
def basename(self):
return self.__unicode__().split("/")[-1]
class S3UriS3(S3Uri):
type = "s3"
_re = re.compile("^s3://([^/]+)/?(.*)", re.IGNORECASE)
def __init__(self, string):
match = self._re.match(string)
if not match:
raise ValueError("%s: not a S3 URI" % string)
groups = match.groups()
self._bucket = groups[0]
self._object = unicodise(groups[1])
def bucket(self):
return self._bucket
def object(self):
return self._object
def has_bucket(self):
return bool(self._bucket)
def has_object(self):
return bool(self._object)
def uri(self):
return "/".join(["s3:/", self._bucket, self._object])
def is_dns_compatible(self):
return check_bucket_name_dns_conformity(self._bucket)
def public_url(self):
if self.is_dns_compatible():
return "http://%s.s3.amazonaws.com/%s" % (self._bucket, self._object)
else:
return "http://s3.amazonaws.com/%s/%s" % (self._bucket, self._object)
def host_name(self):
if self.is_dns_compatible():
return "%s.s3.amazonaws.com" % (self._bucket)
else:
return "s3.amazonaws.com"
@staticmethod
def compose_uri(bucket, object = ""):
return "s3://%s/%s" % (bucket, object)
@staticmethod
def httpurl_to_s3uri(http_url):
m=re.match("(https?://)?([^/]+)/?(.*)", http_url, re.IGNORECASE)
hostname, object = m.groups()[1:]
hostname = hostname.lower()
if hostname == "s3.amazonaws.com":
## old-style url: http://s3.amazonaws.com/bucket/object
if object.count("/") == 0:
## no object given
bucket = object
object = ""
else:
## bucket/object
bucket, object = object.split("/", 1)
elif hostname.endswith(".s3.amazonaws.com"):
## new-style url: http://bucket.s3.amazonaws.com/object
bucket = hostname[:-(len(".s3.amazonaws.com"))]
else:
raise ValueError("Unable to parse URL: %s" % http_url)
return S3Uri("s3://%(bucket)s/%(object)s" % {
'bucket' : bucket,
'object' : object })
class S3UriS3FS(S3Uri):
type = "s3fs"
_re = re.compile("^s3fs://([^/]*)/?(.*)", re.IGNORECASE)
def __init__(self, string):
match = self._re.match(string)
if not match:
raise ValueError("%s: not a S3fs URI" % string)
groups = match.groups()
self._fsname = groups[0]
self._path = unicodise(groups[1]).split("/")
def fsname(self):
return self._fsname
def path(self):
return "/".join(self._path)
def uri(self):
return "/".join(["s3fs:/", self._fsname, self.path()])
class S3UriFile(S3Uri):
type = "file"
_re = re.compile("^(\w+://)?(.*)")
def __init__(self, string):
match = self._re.match(string)
groups = match.groups()
if groups[0] not in (None, "file://"):
raise ValueError("%s: not a file:// URI" % string)
self._path = unicodise(groups[1]).split("/")
def path(self):
return "/".join(self._path)
def uri(self):
return "/".join(["file:/", self.path()])
def isdir(self):
return os.path.isdir(self.path())
def dirname(self):
return os.path.dirname(self.path())
class S3UriCloudFront(S3Uri):
type = "cf"
_re = re.compile("^cf://([^/]*)/*(.*)", re.IGNORECASE)
def __init__(self, string):
match = self._re.match(string)
if not match:
raise ValueError("%s: not a CloudFront URI" % string)
groups = match.groups()
self._dist_id = groups[0]
self._request_id = groups[1] != "/" and groups[1] or None
def dist_id(self):
return self._dist_id
def request_id(self):
return self._request_id
def uri(self):
uri = "cf://" + self.dist_id()
if self.request_id():
uri += "/" + self.request_id()
return uri
if __name__ == "__main__":
uri = S3Uri("s3://bucket/object")
print "type() =", type(uri)
print "uri =", uri
print "uri.type=", uri.type
print "bucket =", uri.bucket()
print "object =", uri.object()
print
uri = S3Uri("s3://bucket")
print "type() =", type(uri)
print "uri =", uri
print "uri.type=", uri.type
print "bucket =", uri.bucket()
print
uri = S3Uri("s3fs://filesystem1/path/to/remote/file.txt")
print "type() =", type(uri)
print "uri =", uri
print "uri.type=", uri.type
print "path =", uri.path()
print
uri = S3Uri("/path/to/local/file.txt")
print "type() =", type(uri)
print "uri =", uri
print "uri.type=", uri.type
print "path =", uri.path()
print
uri = S3Uri("cf://1234567890ABCD/")
print "type() =", type(uri)
print "uri =", uri
print "uri.type=", uri.type
print "dist_id =", uri.dist_id()
print
# vim:et:ts=4:sts=4:ai
| {
"repo_name": "sauloal/linuxscripts",
"path": "bin/S3/S3Uri.py",
"copies": "1",
"size": "6461",
"license": "mit",
"hash": -8172718895785608000,
"line_mean": 28.502283105,
"line_max": 82,
"alpha_frac": 0.5367590156,
"autogenerated": false,
"ratio": 3.6074818537130096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9625030515972377,
"avg_score": 0.003842070668126637,
"num_lines": 219
} |
import sys
import datetime
import time
import Utils
class Progress(object):
_stdout = sys.stdout
_last_display = 0
def __init__(self, labels, total_size):
self._stdout = sys.stdout
self.new_file(labels, total_size)
def new_file(self, labels, total_size):
self.labels = labels
self.total_size = total_size
# Set initial_position to something in the
# case we're not counting from 0. For instance
# when appending to a partially downloaded file.
# Setting initial_position will let the speed
# be computed right.
self.initial_position = 0
self.current_position = self.initial_position
self.time_start = datetime.datetime.now()
self.time_last = self.time_start
self.time_current = self.time_start
self.display(new_file = True)
def update(self, current_position = -1, delta_position = -1):
self.time_last = self.time_current
self.time_current = datetime.datetime.now()
if current_position > -1:
self.current_position = current_position
elif delta_position > -1:
self.current_position += delta_position
#else:
# no update, just call display()
self.display()
def done(self, message):
self.display(done_message = message)
def output_labels(self):
self._stdout.write(u"%(source)s -> %(destination)s %(extra)s\n" % self.labels)
self._stdout.flush()
def _display_needed(self):
# We only need to update the display every so often.
if time.time() - self._last_display > 1:
self._last_display = time.time()
return True
return False
def display(self, new_file = False, done_message = None):
"""
display(new_file = False[/True], done = False[/True])
Override this method to provide a nicer output.
"""
if new_file:
self.output_labels()
self.last_milestone = 0
return
if self.current_position == self.total_size:
print_size = Utils.formatSize(self.current_position, True)
if print_size[1] != "": print_size[1] += "B"
timedelta = self.time_current - self.time_start
sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
self._stdout.write("100%% %s%s in %.2fs (%.2f %sB/s)\n" %
(print_size[0], print_size[1], sec_elapsed, print_speed[0], print_speed[1]))
self._stdout.flush()
return
rel_position = self.current_position * 100 / self.total_size
if rel_position >= self.last_milestone:
self.last_milestone = (int(rel_position) / 5) * 5
self._stdout.write("%d%% ", self.last_milestone)
self._stdout.flush()
return
class ProgressANSI(Progress):
## http://en.wikipedia.org/wiki/ANSI_escape_code
SCI = '\x1b['
ANSI_hide_cursor = SCI + "?25l"
ANSI_show_cursor = SCI + "?25h"
ANSI_save_cursor_pos = SCI + "s"
ANSI_restore_cursor_pos = SCI + "u"
ANSI_move_cursor_to_column = SCI + "%uG"
ANSI_erase_to_eol = SCI + "0K"
ANSI_erase_current_line = SCI + "2K"
def display(self, new_file = False, done_message = None):
"""
display(new_file = False[/True], done_message = None)
"""
if new_file:
self.output_labels()
self._stdout.write(self.ANSI_save_cursor_pos)
self._stdout.flush()
return
# Only display progress every so often
if not (new_file or done_message) and not self._display_needed():
return
timedelta = self.time_current - self.time_start
sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
if (sec_elapsed > 0):
print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
else:
print_speed = (0, "")
self._stdout.write(self.ANSI_restore_cursor_pos)
self._stdout.write(self.ANSI_erase_to_eol)
self._stdout.write("%(current)s of %(total)s %(percent)3d%% in %(elapsed)ds %(speed).2f %(speed_coeff)sB/s" % {
"current" : str(self.current_position).rjust(len(str(self.total_size))),
"total" : self.total_size,
"percent" : self.total_size and (self.current_position * 100 / self.total_size) or 0,
"elapsed" : sec_elapsed,
"speed" : print_speed[0],
"speed_coeff" : print_speed[1]
})
if done_message:
self._stdout.write(" %s\n" % done_message)
self._stdout.flush()
class ProgressCR(Progress):
## Uses CR char (Carriage Return) just like other progress bars do.
CR_char = chr(13)
def display(self, new_file = False, done_message = None):
"""
display(new_file = False[/True], done_message = None)
"""
if new_file:
self.output_labels()
return
# Only display progress every so often
if not (new_file or done_message) and not self._display_needed():
return
timedelta = self.time_current - self.time_start
sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
if (sec_elapsed > 0):
print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
else:
print_speed = (0, "")
self._stdout.write(self.CR_char)
output = " %(current)s of %(total)s %(percent)3d%% in %(elapsed)4ds %(speed)7.2f %(speed_coeff)sB/s" % {
"current" : str(self.current_position).rjust(len(str(self.total_size))),
"total" : self.total_size,
"percent" : self.total_size and (self.current_position * 100 / self.total_size) or 0,
"elapsed" : sec_elapsed,
"speed" : print_speed[0],
"speed_coeff" : print_speed[1]
}
self._stdout.write(output)
if done_message:
self._stdout.write(" %s\n" % done_message)
self._stdout.flush()
# vim:et:ts=4:sts=4:ai
| {
"repo_name": "sharethis-github/OpenSource",
"path": "s3cmd/S3/Progress.py",
"copies": "2",
"size": "6561",
"license": "apache-2.0",
"hash": 2064344200967371300,
"line_mean": 36.9248554913,
"line_max": 122,
"alpha_frac": 0.582837982,
"autogenerated": false,
"ratio": 3.649054505005562,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006414236176713697,
"num_lines": 173
} |
import sys
import datetime
import Utils
class Progress(object):
_stdout = sys.stdout
def __init__(self, labels, total_size):
self._stdout = sys.stdout
self.new_file(labels, total_size)
def new_file(self, labels, total_size):
self.labels = labels
self.total_size = total_size
# Set initial_position to something in the
# case we're not counting from 0. For instance
# when appending to a partially downloaded file.
# Setting initial_position will let the speed
# be computed right.
self.initial_position = 0
self.current_position = self.initial_position
self.time_start = datetime.datetime.now()
self.time_last = self.time_start
self.time_current = self.time_start
self.display(new_file = True)
def update(self, current_position = -1, delta_position = -1):
self.time_last = self.time_current
self.time_current = datetime.datetime.now()
if current_position > -1:
self.current_position = current_position
elif delta_position > -1:
self.current_position += delta_position
#else:
# no update, just call display()
self.display()
def done(self, message):
self.display(done_message = message)
def output_labels(self):
self._stdout.write(u"%(source)s -> %(destination)s %(extra)s\n" % self.labels)
self._stdout.flush()
def display(self, new_file = False, done_message = None):
"""
display(new_file = False[/True], done = False[/True])
Override this method to provide a nicer output.
"""
if new_file:
self.output_labels()
self.last_milestone = 0
return
if self.current_position == self.total_size:
print_size = Utils.formatSize(self.current_position, True)
if print_size[1] != "": print_size[1] += "B"
timedelta = self.time_current - self.time_start
sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
self._stdout.write("100%% %s%s in %.2fs (%.2f %sB/s)\n" %
(print_size[0], print_size[1], sec_elapsed, print_speed[0], print_speed[1]))
self._stdout.flush()
return
rel_position = selfself.current_position * 100 / self.total_size
if rel_position >= self.last_milestone:
self.last_milestone = (int(rel_position) / 5) * 5
self._stdout.write("%d%% ", self.last_milestone)
self._stdout.flush()
return
class ProgressANSI(Progress):
## http://en.wikipedia.org/wiki/ANSI_escape_code
SCI = '\x1b['
ANSI_hide_cursor = SCI + "?25l"
ANSI_show_cursor = SCI + "?25h"
ANSI_save_cursor_pos = SCI + "s"
ANSI_restore_cursor_pos = SCI + "u"
ANSI_move_cursor_to_column = SCI + "%uG"
ANSI_erase_to_eol = SCI + "0K"
ANSI_erase_current_line = SCI + "2K"
def display(self, new_file = False, done_message = None):
"""
display(new_file = False[/True], done_message = None)
"""
if new_file:
self.output_labels()
self._stdout.write(self.ANSI_save_cursor_pos)
self._stdout.flush()
return
timedelta = self.time_current - self.time_start
sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
if (sec_elapsed > 0):
print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
else:
print_speed = (0, "")
self._stdout.write(self.ANSI_restore_cursor_pos)
self._stdout.write(self.ANSI_erase_to_eol)
self._stdout.write("%(current)s of %(total)s %(percent)3d%% in %(elapsed)ds %(speed).2f %(speed_coeff)sB/s" % {
"current" : str(self.current_position).rjust(len(str(self.total_size))),
"total" : self.total_size,
"percent" : self.total_size and (self.current_position * 100 / self.total_size) or 0,
"elapsed" : sec_elapsed,
"speed" : print_speed[0],
"speed_coeff" : print_speed[1]
})
if done_message:
self._stdout.write(" %s\n" % done_message)
self._stdout.flush()
class ProgressCR(Progress):
## Uses CR char (Carriage Return) just like other progress bars do.
CR_char = chr(13)
def display(self, new_file = False, done_message = None):
"""
display(new_file = False[/True], done_message = None)
"""
if new_file:
self.output_labels()
return
timedelta = self.time_current - self.time_start
sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
if (sec_elapsed > 0):
print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
else:
print_speed = (0, "")
self._stdout.write(self.CR_char)
output = " %(current)s of %(total)s %(percent)3d%% in %(elapsed)4ds %(speed)7.2f %(speed_coeff)sB/s" % {
"current" : str(self.current_position).rjust(len(str(self.total_size))),
"total" : self.total_size,
"percent" : self.total_size and (self.current_position * 100 / self.total_size) or 0,
"elapsed" : sec_elapsed,
"speed" : print_speed[0],
"speed_coeff" : print_speed[1]
}
self._stdout.write(output)
if done_message:
self._stdout.write(" %s\n" % done_message)
self._stdout.flush()
# vim:et:ts=4:sts=4:ai
| {
"repo_name": "sauloal/linuxscripts",
"path": "bin/S3/Progress.py",
"copies": "2",
"size": "6017",
"license": "mit",
"hash": -5955952245465880000,
"line_mean": 37.5705128205,
"line_max": 122,
"alpha_frac": 0.5826823999,
"autogenerated": false,
"ratio": 3.5922388059701493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007113223452381216,
"num_lines": 156
} |
import sys
import os, os.path
import time
import httplib
import logging
import mimetypes
import re
from logging import debug, info, warning, error
from stat import ST_SIZE
try:
from hashlib import md5
except ImportError:
from md5 import md5
from Utils import *
from SortedDict import SortedDict
from AccessLog import AccessLog
from ACL import ACL, GranteeLogDelivery
from BidirMap import BidirMap
from Config import Config
from Exceptions import *
from MultiPart import MultiPartUpload
from S3Uri import S3Uri
try:
import magic
try:
## https://github.com/ahupp/python-magic
magic_ = magic.Magic(mime=True)
def mime_magic(file):
return magic_.from_file(file)
except (TypeError, AttributeError):
## Older python-magic versions
magic_ = magic.open(magic.MAGIC_MIME)
magic_.load()
def mime_magic(file):
return magic_.file(file)
except ImportError, e:
if str(e).find("magic") >= 0:
magic_message = "Module python-magic is not available."
else:
magic_message = "Module python-magic can't be used (%s)." % e.message
magic_message += " Guessing MIME types based on file extensions."
magic_warned = False
def mime_magic(file):
global magic_warned
if (not magic_warned):
warning(magic_message)
magic_warned = True
return mimetypes.guess_type(file)[0]
__all__ = []
class S3Request(object):
def __init__(self, s3, method_string, resource, headers, params = {}):
self.s3 = s3
self.headers = SortedDict(headers or {}, ignore_case = True)
self.resource = resource
self.method_string = method_string
self.params = params
self.update_timestamp()
self.sign()
def update_timestamp(self):
if self.headers.has_key("date"):
del(self.headers["date"])
self.headers["x-amz-date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
def format_param_str(self):
"""
Format URL parameters from self.params and returns
?parm1=val1&parm2=val2 or an empty string if there
are no parameters. Output of this function should
be appended directly to self.resource['uri']
"""
param_str = ""
for param in self.params:
if self.params[param] not in (None, ""):
param_str += "&%s=%s" % (param, self.params[param])
else:
param_str += "&%s" % param
return param_str and "?" + param_str[1:]
def sign(self):
h = self.method_string + "\n"
h += self.headers.get("content-md5", "")+"\n"
h += self.headers.get("content-type", "")+"\n"
h += self.headers.get("date", "")+"\n"
for header in self.headers.keys():
if header.startswith("x-amz-"):
h += header+":"+str(self.headers[header])+"\n"
if self.resource['bucket']:
h += "/" + self.resource['bucket']
h += self.resource['uri']
debug("SignHeaders: " + repr(h))
signature = sign_string(h)
self.headers["Authorization"] = "AWS "+self.s3.config.access_key+":"+signature
def get_triplet(self):
self.update_timestamp()
self.sign()
resource = dict(self.resource) ## take a copy
resource['uri'] += self.format_param_str()
return (self.method_string, resource, self.headers)
class S3(object):
http_methods = BidirMap(
GET = 0x01,
PUT = 0x02,
HEAD = 0x04,
DELETE = 0x08,
POST = 0x10,
MASK = 0x1F,
)
targets = BidirMap(
SERVICE = 0x0100,
BUCKET = 0x0200,
OBJECT = 0x0400,
MASK = 0x0700,
)
operations = BidirMap(
UNDFINED = 0x0000,
LIST_ALL_BUCKETS = targets["SERVICE"] | http_methods["GET"],
BUCKET_CREATE = targets["BUCKET"] | http_methods["PUT"],
BUCKET_LIST = targets["BUCKET"] | http_methods["GET"],
BUCKET_DELETE = targets["BUCKET"] | http_methods["DELETE"],
OBJECT_PUT = targets["OBJECT"] | http_methods["PUT"],
OBJECT_GET = targets["OBJECT"] | http_methods["GET"],
OBJECT_HEAD = targets["OBJECT"] | http_methods["HEAD"],
OBJECT_DELETE = targets["OBJECT"] | http_methods["DELETE"],
OBJECT_POST = targets["OBJECT"] | http_methods["POST"],
)
codes = {
"NoSuchBucket" : "Bucket '%s' does not exist",
"AccessDenied" : "Access to bucket '%s' was denied",
"BucketAlreadyExists" : "Bucket '%s' already exists",
}
## S3 sometimes sends HTTP-307 response
redir_map = {}
## Maximum attempts of re-issuing failed requests
_max_retries = 5
def __init__(self, config):
self.config = config
def get_connection(self, bucket):
if self.config.proxy_host != "":
return httplib.HTTPConnection(self.config.proxy_host, self.config.proxy_port)
else:
if self.config.use_https:
return httplib.HTTPSConnection(self.get_hostname(bucket))
else:
return httplib.HTTPConnection(self.get_hostname(bucket))
def get_hostname(self, bucket):
if bucket and check_bucket_name_dns_conformity(bucket):
if self.redir_map.has_key(bucket):
host = self.redir_map[bucket]
else:
host = getHostnameFromBucket(bucket)
else:
host = self.config.host_base
debug('get_hostname(%s): %s' % (bucket, host))
return host
def set_hostname(self, bucket, redir_hostname):
self.redir_map[bucket] = redir_hostname
def format_uri(self, resource):
if resource['bucket'] and not check_bucket_name_dns_conformity(resource['bucket']):
uri = "/%s%s" % (resource['bucket'], resource['uri'])
else:
uri = resource['uri']
if self.config.proxy_host != "":
uri = "http://%s%s" % (self.get_hostname(resource['bucket']), uri)
debug('format_uri(): ' + uri)
return uri
## Commands / Actions
def list_all_buckets(self):
request = self.create_request("LIST_ALL_BUCKETS")
response = self.send_request(request)
response["list"] = getListFromXml(response["data"], "Bucket")
return response
def bucket_list(self, bucket, prefix = None, recursive = None):
def _list_truncated(data):
## <IsTruncated> can either be "true" or "false" or be missing completely
is_truncated = getTextFromXml(data, ".//IsTruncated") or "false"
return is_truncated.lower() != "false"
def _get_contents(data):
return getListFromXml(data, "Contents")
def _get_common_prefixes(data):
return getListFromXml(data, "CommonPrefixes")
uri_params = {}
truncated = True
list = []
prefixes = []
while truncated:
response = self.bucket_list_noparse(bucket, prefix, recursive, uri_params)
current_list = _get_contents(response["data"])
current_prefixes = _get_common_prefixes(response["data"])
truncated = _list_truncated(response["data"])
if truncated:
if current_list:
uri_params['marker'] = self.urlencode_string(current_list[-1]["Key"])
else:
uri_params['marker'] = self.urlencode_string(current_prefixes[-1]["Prefix"])
debug("Listing continues after '%s'" % uri_params['marker'])
list += current_list
prefixes += current_prefixes
response['list'] = list
response['common_prefixes'] = prefixes
return response
def bucket_list_noparse(self, bucket, prefix = None, recursive = None, uri_params = {}):
if prefix:
uri_params['prefix'] = self.urlencode_string(prefix)
if not self.config.recursive and not recursive:
uri_params['delimiter'] = "/"
request = self.create_request("BUCKET_LIST", bucket = bucket, **uri_params)
response = self.send_request(request)
#debug(response)
return response
def bucket_create(self, bucket, bucket_location = None):
headers = SortedDict(ignore_case = True)
body = ""
if bucket_location and bucket_location.strip().upper() != "US":
bucket_location = bucket_location.strip()
if bucket_location.upper() == "EU":
bucket_location = bucket_location.upper()
else:
bucket_location = bucket_location.lower()
body = "<CreateBucketConfiguration><LocationConstraint>"
body += bucket_location
body += "</LocationConstraint></CreateBucketConfiguration>"
debug("bucket_location: " + body)
check_bucket_name(bucket, dns_strict = True)
else:
check_bucket_name(bucket, dns_strict = False)
if self.config.acl_public:
headers["x-amz-acl"] = "public-read"
request = self.create_request("BUCKET_CREATE", bucket = bucket, headers = headers)
response = self.send_request(request, body)
return response
def bucket_delete(self, bucket):
request = self.create_request("BUCKET_DELETE", bucket = bucket)
response = self.send_request(request)
return response
def get_bucket_location(self, uri):
request = self.create_request("BUCKET_LIST", bucket = uri.bucket(), extra = "?location")
response = self.send_request(request)
location = getTextFromXml(response['data'], "LocationConstraint")
if not location or location in [ "", "US" ]:
location = "us-east-1"
elif location == "EU":
location = "eu-west-1"
return location
def bucket_info(self, uri):
# For now reports only "Location". One day perhaps more.
response = {}
response['bucket-location'] = self.get_bucket_location(uri)
return response
def website_info(self, uri, bucket_location = None):
headers = SortedDict(ignore_case = True)
bucket = uri.bucket()
body = ""
request = self.create_request("BUCKET_LIST", bucket = bucket, extra="?website")
try:
response = self.send_request(request, body)
response['index_document'] = getTextFromXml(response['data'], ".//IndexDocument//Suffix")
response['error_document'] = getTextFromXml(response['data'], ".//ErrorDocument//Key")
response['website_endpoint'] = self.config.website_endpoint % {
"bucket" : uri.bucket(),
"location" : self.get_bucket_location(uri)}
return response
except S3Error, e:
if e.status == 404:
debug("Could not get /?website - website probably not configured for this bucket")
return None
raise
def website_create(self, uri, bucket_location = None):
headers = SortedDict(ignore_case = True)
bucket = uri.bucket()
body = '<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
body += ' <IndexDocument>'
body += (' <Suffix>%s</Suffix>' % self.config.website_index)
body += ' </IndexDocument>'
if self.config.website_error:
body += ' <ErrorDocument>'
body += (' <Key>%s</Key>' % self.config.website_error)
body += ' </ErrorDocument>'
body += '</WebsiteConfiguration>'
request = self.create_request("BUCKET_CREATE", bucket = bucket, extra="?website")
debug("About to send request '%s' with body '%s'" % (request, body))
response = self.send_request(request, body)
debug("Received response '%s'" % (response))
return response
def website_delete(self, uri, bucket_location = None):
headers = SortedDict(ignore_case = True)
bucket = uri.bucket()
body = ""
request = self.create_request("BUCKET_DELETE", bucket = bucket, extra="?website")
debug("About to send request '%s' with body '%s'" % (request, body))
response = self.send_request(request, body)
debug("Received response '%s'" % (response))
if response['status'] != 204:
raise S3ResponseError("Expected status 204: %s" % response)
return response
def object_put(self, filename, uri, extra_headers = None, extra_label = ""):
# TODO TODO
# Make it consistent with stream-oriented object_get()
if uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
if not os.path.isfile(filename):
raise InvalidFileError(u"%s is not a regular file" % unicodise(filename))
try:
file = open(filename, "rb")
size = os.stat(filename)[ST_SIZE]
except (IOError, OSError), e:
raise InvalidFileError(u"%s: %s" % (unicodise(filename), e.strerror))
headers = SortedDict(ignore_case = True)
if extra_headers:
headers.update(extra_headers)
## MIME-type handling
content_type = self.config.mime_type
if not content_type and self.config.guess_mime_type:
content_type = mime_magic(filename)
if not content_type:
content_type = self.config.default_mime_type
debug("Content-Type set to '%s'" % content_type)
headers["content-type"] = content_type
## Other Amazon S3 attributes
if self.config.acl_public:
headers["x-amz-acl"] = "public-read"
if self.config.reduced_redundancy:
headers["x-amz-storage-class"] = "REDUCED_REDUNDANCY"
## Multipart decision
multipart = False
if self.config.enable_multipart:
if size > self.config.multipart_chunk_size_mb * 1024 * 1024:
multipart = True
if multipart:
# Multipart requests are quite different... drop here
return self.send_file_multipart(file, headers, uri, size)
## Not multipart...
headers["content-length"] = size
request = self.create_request("OBJECT_PUT", uri = uri, headers = headers)
labels = { 'source' : unicodise(filename), 'destination' : unicodise(uri.uri()), 'extra' : extra_label }
response = self.send_file(request, file, labels)
return response
def object_get(self, uri, stream, start_position = 0, extra_label = ""):
if uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
request = self.create_request("OBJECT_GET", uri = uri)
labels = { 'source' : unicodise(uri.uri()), 'destination' : unicodise(stream.name), 'extra' : extra_label }
response = self.recv_file(request, stream, labels, start_position)
return response
def object_delete(self, uri):
if uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
request = self.create_request("OBJECT_DELETE", uri = uri)
response = self.send_request(request)
return response
def object_copy(self, src_uri, dst_uri, extra_headers = None):
if src_uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % src_uri.type)
if dst_uri.type != "s3":
raise ValueError("Expected URI type 's3', got '%s'" % dst_uri.type)
headers = SortedDict(ignore_case = True)
headers['x-amz-copy-source'] = "/%s/%s" % (src_uri.bucket(), self.urlencode_string(src_uri.object()))
## TODO: For now COPY, later maybe add a switch?
headers['x-amz-metadata-directive'] = "COPY"
if self.config.acl_public:
headers["x-amz-acl"] = "public-read"
if self.config.reduced_redundancy:
headers["x-amz-storage-class"] = "REDUCED_REDUNDANCY"
# if extra_headers:
# headers.update(extra_headers)
request = self.create_request("OBJECT_PUT", uri = dst_uri, headers = headers)
response = self.send_request(request)
return response
def object_move(self, src_uri, dst_uri, extra_headers = None):
response_copy = self.object_copy(src_uri, dst_uri, extra_headers)
debug("Object %s copied to %s" % (src_uri, dst_uri))
if getRootTagName(response_copy["data"]) == "CopyObjectResult":
response_delete = self.object_delete(src_uri)
debug("Object %s deleted" % src_uri)
return response_copy
def object_info(self, uri):
request = self.create_request("OBJECT_HEAD", uri = uri)
response = self.send_request(request)
return response
def get_acl(self, uri):
if uri.has_object():
request = self.create_request("OBJECT_GET", uri = uri, extra = "?acl")
else:
request = self.create_request("BUCKET_LIST", bucket = uri.bucket(), extra = "?acl")
response = self.send_request(request)
acl = ACL(response['data'])
return acl
def set_acl(self, uri, acl):
if uri.has_object():
request = self.create_request("OBJECT_PUT", uri = uri, extra = "?acl")
else:
request = self.create_request("BUCKET_CREATE", bucket = uri.bucket(), extra = "?acl")
body = str(acl)
debug(u"set_acl(%s): acl-xml: %s" % (uri, body))
response = self.send_request(request, body)
return response
def get_accesslog(self, uri):
request = self.create_request("BUCKET_LIST", bucket = uri.bucket(), extra = "?logging")
response = self.send_request(request)
accesslog = AccessLog(response['data'])
return accesslog
def set_accesslog_acl(self, uri):
acl = self.get_acl(uri)
debug("Current ACL(%s): %s" % (uri.uri(), str(acl)))
acl.appendGrantee(GranteeLogDelivery("READ_ACP"))
acl.appendGrantee(GranteeLogDelivery("WRITE"))
debug("Updated ACL(%s): %s" % (uri.uri(), str(acl)))
self.set_acl(uri, acl)
def set_accesslog(self, uri, enable, log_target_prefix_uri = None, acl_public = False):
request = self.create_request("BUCKET_CREATE", bucket = uri.bucket(), extra = "?logging")
accesslog = AccessLog()
if enable:
accesslog.enableLogging(log_target_prefix_uri)
accesslog.setAclPublic(acl_public)
else:
accesslog.disableLogging()
body = str(accesslog)
debug(u"set_accesslog(%s): accesslog-xml: %s" % (uri, body))
try:
response = self.send_request(request, body)
except S3Error, e:
if e.info['Code'] == "InvalidTargetBucketForLogging":
info("Setting up log-delivery ACL for target bucket.")
self.set_accesslog_acl(S3Uri("s3://%s" % log_target_prefix_uri.bucket()))
response = self.send_request(request, body)
else:
raise
return accesslog, response
## Low level methods
def urlencode_string(self, string, urlencoding_mode = None):
if type(string) == unicode:
string = string.encode("utf-8")
if urlencoding_mode is None:
urlencoding_mode = self.config.urlencoding_mode
if urlencoding_mode == "verbatim":
## Don't do any pre-processing
return string
encoded = ""
## List of characters that must be escaped for S3
## Haven't found this in any official docs
## but my tests show it's more less correct.
## If you start getting InvalidSignature errors
## from S3 check the error headers returned
## from S3 to see whether the list hasn't
## changed.
for c in string: # I'm not sure how to know in what encoding
# 'object' is. Apparently "type(object)==str"
# but the contents is a string of unicode
# bytes, e.g. '\xc4\x8d\xc5\xafr\xc3\xa1k'
# Don't know what it will do on non-utf8
# systems.
# [hope that sounds reassuring ;-)]
o = ord(c)
if (o < 0x20 or o == 0x7f):
if urlencoding_mode == "fixbucket":
encoded += "%%%02X" % o
else:
error(u"Non-printable character 0x%02x in: %s" % (o, string))
error(u"Please report it to s3tools-bugs@lists.sourceforge.net")
encoded += replace_nonprintables(c)
elif (o == 0x20 or # Space and below
o == 0x22 or # "
o == 0x23 or # #
o == 0x25 or # % (escape character)
o == 0x26 or # &
o == 0x2B or # + (or it would become <space>)
o == 0x3C or # <
o == 0x3E or # >
o == 0x3F or # ?
o == 0x60 or # `
o >= 123): # { and above, including >= 128 for UTF-8
encoded += "%%%02X" % o
else:
encoded += c
debug("String '%s' encoded to '%s'" % (string, encoded))
return encoded
def create_request(self, operation, uri = None, bucket = None, object = None, headers = None, extra = None, **params):
resource = { 'bucket' : None, 'uri' : "/" }
if uri and (bucket or object):
raise ValueError("Both 'uri' and either 'bucket' or 'object' parameters supplied")
## If URI is given use that instead of bucket/object parameters
if uri:
bucket = uri.bucket()
object = uri.has_object() and uri.object() or None
if bucket:
resource['bucket'] = str(bucket)
if object:
resource['uri'] = "/" + self.urlencode_string(object)
if extra:
resource['uri'] += extra
method_string = S3.http_methods.getkey(S3.operations[operation] & S3.http_methods["MASK"])
request = S3Request(self, method_string, resource, headers, params)
debug("CreateRequest: resource[uri]=" + resource['uri'])
return request
def _fail_wait(self, retries):
# Wait a few seconds. The more it fails the more we wait.
return (self._max_retries - retries + 1) * 3
def send_request(self, request, body = None, retries = _max_retries):
method_string, resource, headers = request.get_triplet()
debug("Processing request, please wait...")
if not headers.has_key('content-length'):
headers['content-length'] = body and len(body) or 0
try:
# "Stringify" all headers
for header in headers.keys():
headers[header] = str(headers[header])
conn = self.get_connection(resource['bucket'])
uri = self.format_uri(resource)
debug("Sending request method_string=%r, uri=%r, headers=%r, body=(%i bytes)" % (method_string, uri, headers, len(body or "")))
conn.request(method_string, uri, body, headers)
response = {}
http_response = conn.getresponse()
response["status"] = http_response.status
response["reason"] = http_response.reason
response["headers"] = convertTupleListToDict(http_response.getheaders())
response["data"] = http_response.read()
debug("Response: " + str(response))
conn.close()
except Exception, e:
if retries:
warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
return self.send_request(request, body, retries - 1)
else:
raise S3RequestError("Request failed for: %s" % resource['uri'])
if response["status"] == 307:
## RedirectPermanent
redir_bucket = getTextFromXml(response['data'], ".//Bucket")
redir_hostname = getTextFromXml(response['data'], ".//Endpoint")
self.set_hostname(redir_bucket, redir_hostname)
warning("Redirected to: %s" % (redir_hostname))
return self.send_request(request, body)
if response["status"] >= 500:
e = S3Error(response)
if retries:
warning(u"Retrying failed request: %s" % resource['uri'])
warning(unicode(e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
return self.send_request(request, body, retries - 1)
else:
raise e
if response["status"] < 200 or response["status"] > 299:
raise S3Error(response)
return response
def send_file(self, request, file, labels, throttle = 0, retries = _max_retries, offset = 0, chunk_size = -1):
method_string, resource, headers = request.get_triplet()
size_left = size_total = headers.get("content-length")
if self.config.progress_meter:
progress = self.config.progress_class(labels, size_total)
else:
info("Sending file '%s', please wait..." % file.name)
timestamp_start = time.time()
try:
conn = self.get_connection(resource['bucket'])
conn.connect()
conn.putrequest(method_string, self.format_uri(resource))
for header in headers.keys():
conn.putheader(header, str(headers[header]))
conn.endheaders()
except Exception, e:
if self.config.progress_meter:
progress.done("failed")
if retries:
warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
# Connection error -> same throttle value
return self.send_file(request, file, labels, throttle, retries - 1, offset, chunk_size)
else:
raise S3UploadError("Upload failed for: %s" % resource['uri'])
file.seek(offset)
md5_hash = md5()
try:
while (size_left > 0):
#debug("SendFile: Reading up to %d bytes from '%s'" % (self.config.send_chunk, file.name))
data = file.read(min(self.config.send_chunk, size_left))
md5_hash.update(data)
conn.send(data)
if self.config.progress_meter:
progress.update(delta_position = len(data))
size_left -= len(data)
if throttle:
time.sleep(throttle)
md5_computed = md5_hash.hexdigest()
response = {}
http_response = conn.getresponse()
response["status"] = http_response.status
response["reason"] = http_response.reason
response["headers"] = convertTupleListToDict(http_response.getheaders())
response["data"] = http_response.read()
response["size"] = size_total
conn.close()
debug(u"Response: %s" % response)
except Exception, e:
if self.config.progress_meter:
progress.done("failed")
if retries:
if retries < self._max_retries:
throttle = throttle and throttle * 5 or 0.01
warning("Upload failed: %s (%s)" % (resource['uri'], e))
warning("Retrying on lower speed (throttle=%0.2f)" % throttle)
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
# Connection error -> same throttle value
return self.send_file(request, file, labels, throttle, retries - 1, offset, chunk_size)
else:
debug("Giving up on '%s' %s" % (file.name, e))
raise S3UploadError("Upload failed for: %s" % resource['uri'])
timestamp_end = time.time()
response["elapsed"] = timestamp_end - timestamp_start
response["speed"] = response["elapsed"] and float(response["size"]) / response["elapsed"] or float(-1)
if self.config.progress_meter:
## The above conn.close() takes some time -> update() progress meter
## to correct the average speed. Otherwise people will complain that
## 'progress' and response["speed"] are inconsistent ;-)
progress.update()
progress.done("done")
if response["status"] == 307:
## RedirectPermanent
redir_bucket = getTextFromXml(response['data'], ".//Bucket")
redir_hostname = getTextFromXml(response['data'], ".//Endpoint")
self.set_hostname(redir_bucket, redir_hostname)
warning("Redirected to: %s" % (redir_hostname))
return self.send_file(request, file, labels, offset = offset, chunk_size = chunk_size)
# S3 from time to time doesn't send ETag back in a response :-(
# Force re-upload here.
if not response['headers'].has_key('etag'):
response['headers']['etag'] = ''
if response["status"] < 200 or response["status"] > 299:
try_retry = False
if response["status"] >= 500:
## AWS internal error - retry
try_retry = True
elif response["status"] >= 400:
err = S3Error(response)
## Retriable client error?
if err.code in [ 'BadDigest', 'OperationAborted', 'TokenRefreshRequired', 'RequestTimeout' ]:
try_retry = True
if try_retry:
if retries:
warning("Upload failed: %s (%s)" % (resource['uri'], S3Error(response)))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
return self.send_file(request, file, labels, throttle, retries - 1, offset, chunk_size)
else:
warning("Too many failures. Giving up on '%s'" % (file.name))
raise S3UploadError
## Non-recoverable error
raise S3Error(response)
debug("MD5 sums: computed=%s, received=%s" % (md5_computed, response["headers"]["etag"]))
if response["headers"]["etag"].strip('"\'') != md5_hash.hexdigest():
warning("MD5 Sums don't match!")
if retries:
warning("Retrying upload of %s" % (file.name))
return self.send_file(request, file, labels, throttle, retries - 1, offset, chunk_size)
else:
warning("Too many failures. Giving up on '%s'" % (file.name))
raise S3UploadError
return response
def send_file_multipart(self, file, headers, uri, size):
chunk_size = self.config.multipart_chunk_size_mb * 1024 * 1024
upload = MultiPartUpload(self, file, uri, headers)
upload.upload_all_parts()
response = upload.complete_multipart_upload()
response["speed"] = 0 # XXX
response["size"] = size
return response
def recv_file(self, request, stream, labels, start_position = 0, retries = _max_retries):
method_string, resource, headers = request.get_triplet()
if self.config.progress_meter:
progress = self.config.progress_class(labels, 0)
else:
info("Receiving file '%s', please wait..." % stream.name)
timestamp_start = time.time()
try:
conn = self.get_connection(resource['bucket'])
conn.connect()
conn.putrequest(method_string, self.format_uri(resource))
for header in headers.keys():
conn.putheader(header, str(headers[header]))
if start_position > 0:
debug("Requesting Range: %d .. end" % start_position)
conn.putheader("Range", "bytes=%d-" % start_position)
conn.endheaders()
response = {}
http_response = conn.getresponse()
response["status"] = http_response.status
response["reason"] = http_response.reason
response["headers"] = convertTupleListToDict(http_response.getheaders())
debug("Response: %s" % response)
except Exception, e:
if self.config.progress_meter:
progress.done("failed")
if retries:
warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
# Connection error -> same throttle value
return self.recv_file(request, stream, labels, start_position, retries - 1)
else:
raise S3DownloadError("Download failed for: %s" % resource['uri'])
if response["status"] == 307:
## RedirectPermanent
response['data'] = http_response.read()
redir_bucket = getTextFromXml(response['data'], ".//Bucket")
redir_hostname = getTextFromXml(response['data'], ".//Endpoint")
self.set_hostname(redir_bucket, redir_hostname)
warning("Redirected to: %s" % (redir_hostname))
return self.recv_file(request, stream, labels)
if response["status"] < 200 or response["status"] > 299:
raise S3Error(response)
if start_position == 0:
# Only compute MD5 on the fly if we're downloading from beginning
# Otherwise we'd get a nonsense.
md5_hash = md5()
size_left = int(response["headers"]["content-length"])
size_total = start_position + size_left
current_position = start_position
if self.config.progress_meter:
progress.total_size = size_total
progress.initial_position = current_position
progress.current_position = current_position
try:
while (current_position < size_total):
this_chunk = size_left > self.config.recv_chunk and self.config.recv_chunk or size_left
data = http_response.read(this_chunk)
stream.write(data)
if start_position == 0:
md5_hash.update(data)
current_position += len(data)
## Call progress meter from here...
if self.config.progress_meter:
progress.update(delta_position = len(data))
conn.close()
except Exception, e:
if self.config.progress_meter:
progress.done("failed")
if retries:
warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
# Connection error -> same throttle value
return self.recv_file(request, stream, labels, current_position, retries - 1)
else:
raise S3DownloadError("Download failed for: %s" % resource['uri'])
stream.flush()
timestamp_end = time.time()
if self.config.progress_meter:
## The above stream.flush() may take some time -> update() progress meter
## to correct the average speed. Otherwise people will complain that
## 'progress' and response["speed"] are inconsistent ;-)
progress.update()
progress.done("done")
if start_position == 0:
# Only compute MD5 on the fly if we were downloading from the beginning
response["md5"] = md5_hash.hexdigest()
else:
# Otherwise try to compute MD5 of the output file
try:
response["md5"] = hash_file_md5(stream.name)
except IOError, e:
if e.errno != errno.ENOENT:
warning("Unable to open file: %s: %s" % (stream.name, e))
warning("Unable to verify MD5. Assume it matches.")
response["md5"] = response["headers"]["etag"]
response["md5match"] = response["headers"]["etag"].find(response["md5"]) >= 0
response["elapsed"] = timestamp_end - timestamp_start
response["size"] = current_position
response["speed"] = response["elapsed"] and float(response["size"]) / response["elapsed"] or float(-1)
if response["size"] != start_position + long(response["headers"]["content-length"]):
warning("Reported size (%s) does not match received size (%s)" % (
start_position + response["headers"]["content-length"], response["size"]))
debug("ReceiveFile: Computed MD5 = %s" % response["md5"])
if not response["md5match"]:
warning("MD5 signatures do not match: computed=%s, received=%s" % (
response["md5"], response["headers"]["etag"]))
return response
__all__.append("S3")
# vim:et:ts=4:sts=4:ai
| {
"repo_name": "zhm/s3_cmd_bin",
"path": "resources/S3/S3.py",
"copies": "1",
"size": "37631",
"license": "mit",
"hash": 6132634345604718000,
"line_mean": 41.617214043,
"line_max": 139,
"alpha_frac": 0.5663150062,
"autogenerated": false,
"ratio": 4.078799046173857,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5145114052373856,
"avg_score": null,
"num_lines": null
} |
import os
from stat import ST_SIZE
from logging import debug, info, warning, error
from Utils import getTextFromXml, formatSize, unicodise
from Exceptions import S3UploadError
class MultiPartUpload(object):
MIN_CHUNK_SIZE_MB = 5 # 5MB
MAX_CHUNK_SIZE_MB = 5120 # 5GB
MAX_FILE_SIZE = 42949672960 # 5TB
def __init__(self, s3, file, uri, headers_baseline = {}):
self.s3 = s3
self.file = file
self.uri = uri
self.parts = {}
self.headers_baseline = headers_baseline
self.upload_id = self.initiate_multipart_upload()
def initiate_multipart_upload(self):
"""
Begin a multipart upload
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadInitiate.html
"""
request = self.s3.create_request("OBJECT_POST", uri = self.uri, headers = self.headers_baseline, extra = "?uploads")
response = self.s3.send_request(request)
data = response["data"]
self.upload_id = getTextFromXml(data, "UploadId")
return self.upload_id
def upload_all_parts(self):
"""
Execute a full multipart upload on a file
Returns the seq/etag dict
TODO use num_processes to thread it
"""
if not self.upload_id:
raise RuntimeError("Attempting to use a multipart upload that has not been initiated.")
size_left = file_size = os.stat(self.file.name)[ST_SIZE]
self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024
nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)
debug("MultiPart: Uploading %s in %d parts" % (self.file.name, nr_parts))
seq = 1
while size_left > 0:
offset = self.chunk_size * (seq - 1)
current_chunk_size = min(file_size - offset, self.chunk_size)
size_left -= current_chunk_size
labels = {
'source' : unicodise(self.file.name),
'destination' : unicodise(self.uri.uri()),
'extra' : "[part %d of %d, %s]" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
}
try:
self.upload_part(seq, offset, current_chunk_size, labels)
except:
error(u"Upload of '%s' part %d failed. Aborting multipart upload." % (self.file.name, seq))
self.abort_upload()
raise
seq += 1
debug("MultiPart: Upload finished: %d parts", seq - 1)
def upload_part(self, seq, offset, chunk_size, labels):
"""
Upload a file chunk
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadUploadPart.html
"""
# TODO implement Content-MD5
debug("Uploading part %i of %r (%s bytes)" % (seq, self.upload_id, chunk_size))
headers = { "content-length": chunk_size }
query_string = "?partNumber=%i&uploadId=%s" % (seq, self.upload_id)
request = self.s3.create_request("OBJECT_PUT", uri = self.uri, headers = headers, extra = query_string)
response = self.s3.send_file(request, self.file, labels, offset = offset, chunk_size = chunk_size)
self.parts[seq] = response["headers"]["etag"]
return response
def complete_multipart_upload(self):
"""
Finish a multipart upload
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadComplete.html
"""
debug("MultiPart: Completing upload: %s" % self.upload_id)
parts_xml = []
part_xml = "<Part><PartNumber>%i</PartNumber><ETag>%s</ETag></Part>"
for seq, etag in self.parts.items():
parts_xml.append(part_xml % (seq, etag))
body = "<CompleteMultipartUpload>%s</CompleteMultipartUpload>" % ("".join(parts_xml))
headers = { "content-length": len(body) }
request = self.s3.create_request("OBJECT_POST", uri = self.uri, headers = headers, extra = "?uploadId=%s" % (self.upload_id))
response = self.s3.send_request(request, body = body)
return response
def abort_upload(self):
"""
Abort multipart upload
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadAbort.html
"""
debug("MultiPart: Aborting upload: %s" % self.upload_id)
request = self.s3.create_request("OBJECT_DELETE", uri = self.uri, extra = "?uploadId=%s" % (self.upload_id))
response = self.s3.send_request(request)
return response
# vim:et:ts=4:sts=4:ai
| {
"repo_name": "zhm/s3_cmd_bin",
"path": "resources/S3/MultiPart.py",
"copies": "1",
"size": "4721",
"license": "mit",
"hash": -2180814987962205400,
"line_mean": 40.7787610619,
"line_max": 133,
"alpha_frac": 0.6077102309,
"autogenerated": false,
"ratio": 3.5928462709284625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9668430275231849,
"avg_score": 0.006425245319322774,
"num_lines": 113
} |
import os
import sys
from stat import ST_SIZE
from logging import debug, info, warning, error
from Utils import getTextFromXml, getTreeFromXml, formatSize, unicodise, calculateChecksum, parseNodes
from Exceptions import S3UploadError
from collections import defaultdict
class MultiPartUpload(object):
MIN_CHUNK_SIZE_MB = 5 # 5MB
MAX_CHUNK_SIZE_MB = 5120 # 5GB
MAX_FILE_SIZE = 42949672960 # 5TB
def __init__(self, s3, file, uri, headers_baseline = {}):
self.s3 = s3
self.file = file
self.uri = uri
self.parts = {}
self.headers_baseline = headers_baseline
self.upload_id = self.initiate_multipart_upload()
def get_parts_information(self, uri, upload_id):
multipart_response = self.s3.list_multipart(uri, upload_id)
tree = getTreeFromXml(multipart_response['data'])
parts = defaultdict(lambda: None)
for elem in parseNodes(tree):
try:
parts[int(elem['PartNumber'])] = {'checksum': elem['ETag'], 'size': elem['Size']}
except KeyError:
pass
return parts
def get_unique_upload_id(self, uri):
upload_id = None
multipart_response = self.s3.get_multipart(uri)
tree = getTreeFromXml(multipart_response['data'])
for mpupload in parseNodes(tree):
try:
mp_upload_id = mpupload['UploadId']
mp_path = mpupload['Key']
info("mp_path: %s, object: %s" % (mp_path, uri.object()))
if mp_path == uri.object():
if upload_id is not None:
raise ValueError("More than one UploadId for URI %s. Disable multipart upload, or use\n %s multipart %s\nto list the Ids, then pass a unique --upload-id into the put command." % (uri, sys.argv[0], uri))
upload_id = mp_upload_id
except KeyError:
pass
return upload_id
def initiate_multipart_upload(self):
"""
Begin a multipart upload
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadInitiate.html
"""
if self.s3.config.upload_id is not None:
self.upload_id = self.s3.config.upload_id
elif self.s3.config.put_continue:
self.upload_id = self.get_unique_upload_id(self.uri)
else:
self.upload_id = None
if self.upload_id is None:
request = self.s3.create_request("OBJECT_POST", uri = self.uri, headers = self.headers_baseline, extra = "?uploads")
response = self.s3.send_request(request)
data = response["data"]
self.upload_id = getTextFromXml(data, "UploadId")
return self.upload_id
def upload_all_parts(self):
"""
Execute a full multipart upload on a file
Returns the seq/etag dict
TODO use num_processes to thread it
"""
if not self.upload_id:
raise RuntimeError("Attempting to use a multipart upload that has not been initiated.")
self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024
if self.file.name != "<stdin>":
size_left = file_size = os.stat(self.file.name)[ST_SIZE]
nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)
debug("MultiPart: Uploading %s in %d parts" % (self.file.name, nr_parts))
else:
debug("MultiPart: Uploading from %s" % (self.file.name))
remote_statuses = defaultdict(lambda: None)
if self.s3.config.put_continue:
remote_statuses = self.get_parts_information(self.uri, self.upload_id)
seq = 1
if self.file.name != "<stdin>":
while size_left > 0:
offset = self.chunk_size * (seq - 1)
current_chunk_size = min(file_size - offset, self.chunk_size)
size_left -= current_chunk_size
labels = {
'source' : unicodise(self.file.name),
'destination' : unicodise(self.uri.uri()),
'extra' : "[part %d of %d, %s]" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
}
try:
self.upload_part(seq, offset, current_chunk_size, labels, remote_status = remote_statuses[seq])
except:
error(u"\nUpload of '%s' part %d failed. Use\n %s abortmp %s %s\nto abort the upload, or\n %s --upload-id %s put ...\nto continue the upload."
% (self.file.name, seq, sys.argv[0], self.uri, self.upload_id, sys.argv[0], self.upload_id))
raise
seq += 1
else:
while True:
buffer = self.file.read(self.chunk_size)
offset = self.chunk_size * (seq - 1)
current_chunk_size = len(buffer)
labels = {
'source' : unicodise(self.file.name),
'destination' : unicodise(self.uri.uri()),
'extra' : "[part %d, %s]" % (seq, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
}
if len(buffer) == 0: # EOF
break
try:
self.upload_part(seq, offset, current_chunk_size, labels, buffer, remote_status = remote_statuses[seq])
except:
error(u"\nUpload of '%s' part %d failed. Use\n %s abortmp %s %s\nto abort, or\n %s --upload-id %s put ...\nto continue the upload."
% (self.file.name, seq, self.uri, sys.argv[0], self.upload_id, sys.argv[0], self.upload_id))
raise
seq += 1
debug("MultiPart: Upload finished: %d parts", seq - 1)
def upload_part(self, seq, offset, chunk_size, labels, buffer = '', remote_status = None):
"""
Upload a file chunk
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadUploadPart.html
"""
# TODO implement Content-MD5
debug("Uploading part %i of %r (%s bytes)" % (seq, self.upload_id, chunk_size))
if remote_status is not None:
if int(remote_status['size']) == chunk_size:
checksum = calculateChecksum(buffer, self.file, offset, chunk_size, self.s3.config.send_chunk)
remote_checksum = remote_status['checksum'].strip('"')
if remote_checksum == checksum:
warning("MultiPart: size and md5sum match for %s part %d, skipping." % (self.uri, seq))
self.parts[seq] = remote_status['checksum']
return
else:
warning("MultiPart: checksum (%s vs %s) does not match for %s part %d, reuploading."
% (remote_checksum, checksum, self.uri, seq))
else:
warning("MultiPart: size (%d vs %d) does not match for %s part %d, reuploading."
% (int(remote_status['size']), chunk_size, self.uri, seq))
headers = { "content-length": chunk_size }
query_string = "?partNumber=%i&uploadId=%s" % (seq, self.upload_id)
request = self.s3.create_request("OBJECT_PUT", uri = self.uri, headers = headers, extra = query_string)
response = self.s3.send_file(request, self.file, labels, buffer, offset = offset, chunk_size = chunk_size)
self.parts[seq] = response["headers"]["etag"]
return response
def complete_multipart_upload(self):
"""
Finish a multipart upload
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadComplete.html
"""
debug("MultiPart: Completing upload: %s" % self.upload_id)
parts_xml = []
part_xml = "<Part><PartNumber>%i</PartNumber><ETag>%s</ETag></Part>"
for seq, etag in self.parts.items():
parts_xml.append(part_xml % (seq, etag))
body = "<CompleteMultipartUpload>%s</CompleteMultipartUpload>" % ("".join(parts_xml))
headers = { "content-length": len(body) }
request = self.s3.create_request("OBJECT_POST", uri = self.uri, headers = headers, extra = "?uploadId=%s" % (self.upload_id))
response = self.s3.send_request(request, body = body)
return response
def abort_upload(self):
"""
Abort multipart upload
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadAbort.html
"""
debug("MultiPart: Aborting upload: %s" % self.upload_id)
#request = self.s3.create_request("OBJECT_DELETE", uri = self.uri, extra = "?uploadId=%s" % (self.upload_id))
#response = self.s3.send_request(request)
response = None
return response
# vim:et:ts=4:sts=4:ai
| {
"repo_name": "sharethis-github/OpenSource",
"path": "s3cmd/S3/MultiPart.py",
"copies": "2",
"size": "9057",
"license": "apache-2.0",
"hash": 2138174443335985400,
"line_mean": 44.285,
"line_max": 227,
"alpha_frac": 0.5672960141,
"autogenerated": false,
"ratio": 3.77375,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0052811328805443605,
"num_lines": 200
} |
"""Amazon Simple Storage Service (S3) datastore.
.. note::
**Installation**: Use of this module requires the open source boto_
package.
.. _boto: http://code.google.com/p/boto/
"""
from cloud_browser.cloud import boto_base as base
from cloud_browser.common import requires
###############################################################################
# Constants / Conditional Imports
###############################################################################
try:
import boto # pylint: disable=F0401
except ImportError:
boto = None # pylint: disable=C0103
###############################################################################
# Classes
###############################################################################
class AwsObject(base.BotoObject):
"""AWS 'key' object wrapper."""
@classmethod
@requires(boto, 'boto')
def is_key(cls, result):
"""Return ``True`` if result is a key object."""
from boto.s3.key import Key
return isinstance(result, Key)
@classmethod
@requires(boto, 'boto')
def is_prefix(cls, result):
"""Return ``True`` if result is a prefix object."""
from boto.s3.prefix import Prefix
return isinstance(result, Prefix)
class AwsContainer(base.BotoContainer):
"""AWS container wrapper."""
#: Storage object child class.
obj_cls = AwsObject
class AwsConnection(base.BotoConnection):
"""AWS connection wrapper."""
#: Container child class.
cont_cls = AwsContainer
@base.BotoConnection.wrap_boto_errors
@requires(boto, 'boto')
def _get_connection(self):
"""Return native connection object."""
return boto.connect_s3(self.account, self.secret_key)
| {
"repo_name": "UrbanDaddy/django-cloud-browser",
"path": "cloud_browser/cloud/aws.py",
"copies": "1",
"size": "1739",
"license": "mit",
"hash": -9186907538104080000,
"line_mean": 28.4745762712,
"line_max": 79,
"alpha_frac": 0.5399654974,
"autogenerated": false,
"ratio": 4.662198391420912,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5702163888820913,
"avg_score": null,
"num_lines": null
} |
"""Amazon Simple Storage Service (S3) datastore.
.. note::
**Installation**: Use of this module requires the open source boto_
package.
.. _boto: http://code.google.com/p/boto/
"""
from warnings import warn
from cloud_browser.cloud import boto_base as base
from cloud_browser.common import requires
warn(
"The boto driver is deprecated, please move to the Apache Libcloud driver",
DeprecationWarning,
)
###############################################################################
# Constants / Conditional Imports
###############################################################################
try:
import boto # pylint: disable=F0401
except ImportError:
boto = None # pylint: disable=C0103
###############################################################################
# Classes
###############################################################################
class AwsObject(base.BotoObject):
"""AWS 'key' object wrapper."""
@classmethod
@requires(boto, "boto")
def is_key(cls, result):
"""Return ``True`` if result is a key object."""
from boto.s3.key import Key
return isinstance(result, Key)
@classmethod
@requires(boto, "boto")
def is_prefix(cls, result):
"""Return ``True`` if result is a prefix object."""
from boto.s3.prefix import Prefix
return isinstance(result, Prefix)
class AwsContainer(base.BotoContainer):
"""AWS container wrapper."""
#: Storage object child class.
obj_cls = AwsObject
class AwsConnection(base.BotoConnection):
"""AWS connection wrapper."""
#: Container child class.
cont_cls = AwsContainer
@base.BotoConnection.wrap_boto_errors
@requires(boto, "boto")
def _get_connection(self):
"""Return native connection object."""
return boto.connect_s3(self.account, self.secret_key)
| {
"repo_name": "ryan-roemer/django-cloud-browser",
"path": "cloud_browser/cloud/aws.py",
"copies": "1",
"size": "1881",
"license": "mit",
"hash": -149895951638638300,
"line_mean": 26.6617647059,
"line_max": 79,
"alpha_frac": 0.5544922913,
"autogenerated": false,
"ratio": 4.644444444444445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 68
} |
"""Amazon Simple Storage Service (S3) datastore.
.. note::
**Installation**: Use of this module requires the open source boto_
package.
.. _boto: http://code.google.com/p/boto/
"""
import base64
import datetime
import hashlib
import hmac
import json
from cloud_browser.cloud import boto_base as base
from cloud_browser.common import requires
###############################################################################
# Constants / Conditional Imports
###############################################################################
try:
import boto # pylint: disable=F0401
except ImportError:
boto = None # pylint: disable=C0103
###############################################################################
# Classes
###############################################################################
class AwsObject(base.BotoObject):
"""AWS 'key' object wrapper."""
@classmethod
@requires(boto, 'boto')
def is_key(cls, result):
"""Return ``True`` if result is a key object."""
from boto.s3.key import Key
return isinstance(result, Key)
@classmethod
@requires(boto, 'boto')
def is_prefix(cls, result):
"""Return ``True`` if result is a prefix object."""
from boto.s3.prefix import Prefix
return isinstance(result, Prefix)
class AwsContainer(base.BotoContainer):
"""AWS container wrapper."""
#: Storage object child class.
obj_cls = AwsObject
class AwsConnection(base.BotoConnection):
"""AWS connection wrapper."""
#: Container child class.
cont_cls = AwsContainer
@base.BotoConnection.wrap_boto_errors
@requires(boto, 'boto')
def _get_connection(self):
"""Return native connection object."""
return boto.connect_s3(self.account, self.secret_key)
@staticmethod
def _get_policy(container_name, key_prefix,
success_action_redirect, acl, username):
# expires.isoformat() has microseconds.
expires = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
expiration = expires.strftime('%Y-%m-%dT%H:%M:%SZ')
conditions = [
{'bucket': container_name},
['starts-with', '$key', '{}'.format(key_prefix)],
{'acl': acl},
{'success_action_redirect': success_action_redirect},
{'x-amz-meta-modified-by': username},
]
policy = {
'expiration': expiration,
'conditions': conditions,
}
return base64.b64encode(json.dumps(policy))
def _get_signature(self, policy):
return base64.b64encode(
hmac.new(self.secret_key, policy, hashlib.sha1).digest())
# pylint: disable=arguments-differ, too-many-arguments
def get_upload_form(self, container_name=None, key_prefix=None,
success_action_redirect=None, acl=None, username=None):
policy = self._get_policy(container_name, key_prefix,
success_action_redirect, acl, username)
signature = self._get_signature(
self._get_policy(
container_name,
key_prefix,
success_action_redirect,
acl,
username,
)
)
return """
<form action="https://{bucket}.s3.amazonaws.com" \
method="post" enctype="multipart/form-data">
<input type="hidden" name="key" \
value="{key_prefix}${{filename}}">
<input type="hidden" name="AWSAccessKeyId"
value="{access_key_id}"> \
<input type="hidden" name="acl" value="public-read">
<input type="hidden" name="success_action_redirect" \
value="{success_action_redirect}">
<input type="hidden" name="x-amz-meta-modified-by" \
value="{username}">
<input type="hidden" name="policy" value="{policy}">
<input type="hidden" name="signature" value="{signature}">
<input type="file" name="file">
<input type="submit" value="UPLOAD">
</form>
""".format(
bucket=container_name,
key_prefix=key_prefix,
access_key_id=self.account,
success_action_redirect=success_action_redirect,
policy=policy,
signature=signature,
username=username,
)
| {
"repo_name": "lantip/aws-filemanager",
"path": "cloud_browser/cloud/aws.py",
"copies": "1",
"size": "4505",
"license": "mit",
"hash": -7474136084076831000,
"line_mean": 32.6194029851,
"line_max": 79,
"alpha_frac": 0.5327413984,
"autogenerated": false,
"ratio": 4.491525423728813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00037313432835820896,
"num_lines": 134
} |
"""Amazon Simple Storage Service (S3) Driver."""
import logging
from typing import Any, Dict, Iterable, List, TYPE_CHECKING # noqa: F401
from urllib.parse import quote, urljoin
import boto3
from botocore.exceptions import ClientError, ParamValidationError, WaiterError
from inflection import camelize, underscore
from cloudstorage import Blob, Container, Driver, messages
from cloudstorage.exceptions import (
CloudStorageError,
CredentialsError,
IsNotEmptyError,
NotFoundError,
)
from cloudstorage.helpers import file_content_type, validate_file_or_path
from cloudstorage.typed import (
ContentLength,
ExtraOptions,
FileLike,
FormPost,
MetaData,
)
if TYPE_CHECKING:
from cloudstorage.structures import CaseInsensitiveDict # noqa
__all__ = ["S3Driver"]
logger = logging.getLogger(__name__)
class S3Driver(Driver):
"""Driver for interacting with Amazon Simple Storage Service (S3).
.. code-block:: python
from cloudstorage.drivers.amazon import S3Driver
storage = S3Driver(key='<my-aws-access-key-id>',
secret='<my-aws-secret-access-key>',
region='us-east-1')
# <Driver: S3 us-east-1>
References:
* `Boto 3 Docs <https://boto3.amazonaws.com/v1/documentation/api/
latest/index.html>`_
* `Amazon S3 REST API Introduction
<https://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html>`_
:param key: AWS Access Key ID.
:type key: str
:param secret: AWS Secret Access Key.
:type secret: str
:param region: (optional) Region to connect to. Defaults to `us-east-1`.
:type region: str
:param kwargs: (optional) Extra driver options.
:type kwargs: dict
"""
name = "S3"
hash_type = "md5"
url = "https://aws.amazon.com/s3/"
def __init__(
self, key: str, secret: str = None, region: str = "us-east-1", **kwargs: Dict
) -> None:
region = region.lower()
super().__init__(key=key, secret=secret, region=region, **kwargs)
self._session = boto3.Session(
aws_access_key_id=key, aws_secret_access_key=secret, region_name=region
)
# session required for loading regions list
if region not in self.regions:
raise CloudStorageError(messages.REGION_NOT_FOUND % region)
def __iter__(self) -> Iterable[Container]:
for bucket in self.s3.buckets.all():
yield self._make_container(bucket)
def __len__(self) -> int:
buckets = [bucket for bucket in self.s3.buckets.all()]
return len(buckets)
@staticmethod
def _normalize_parameters(
params: Dict[str, str], normalizers: Dict[str, str]
) -> Dict[str, str]:
normalized = params.copy()
for key, value in params.items():
normalized.pop(key)
if not value:
continue
key_inflected = camelize(underscore(key), uppercase_first_letter=True)
# Only include parameters found in normalizers
key_overrider = normalizers.get(key_inflected.lower())
if key_overrider:
normalized[key_overrider] = value
return normalized
def _get_bucket(self, bucket_name: str, validate: bool = True):
"""Get a S3 bucket.
:param bucket_name: The Bucket's name identifier.
:type bucket_name: str
:param validate: If True, verify that the bucket exists.
:type validate: bool
:return: S3 bucket resource object.
:rtype: :class:`boto3.s3.Bucket`
:raises NotFoundError: If the bucket does not exist.
:raises CloudStorageError: Boto 3 client error.
"""
bucket = self.s3.Bucket(bucket_name)
if validate:
try:
response = self.s3.meta.client.head_bucket(Bucket=bucket_name)
logger.debug("response=%s", response)
except ClientError as err:
error_code = int(err.response["Error"]["Code"])
if error_code == 404:
raise NotFoundError(messages.CONTAINER_NOT_FOUND % bucket_name)
raise CloudStorageError(
"%s: %s"
% (err.response["Error"]["Code"], err.response["Error"]["Message"])
)
try:
bucket.wait_until_exists()
except WaiterError as err:
logger.error(err)
return bucket
def _make_blob(self, container: Container, object_summary) -> Blob:
"""Convert S3 Object Summary to Blob instance.
:param container: The container that holds the blob.
:type container: :class:`.Container`
:param object_summary: S3 object summary.
:type object_summary: :class:`boto3.s3.ObjectSummary`
:return: A blob object.
:rtype: :class:`.Blob`
:raise NotFoundError: If the blob object doesn't exist.
"""
try:
name = object_summary.key
#: etag wrapped in quotes
checksum = etag = object_summary.e_tag.replace('"', "")
size = object_summary.size
acl = object_summary.Acl()
meta_data = object_summary.meta.data.get("Metadata", {})
content_disposition = object_summary.meta.data.get(
"ContentDisposition", None
)
content_type = object_summary.meta.data.get("ContentType", None)
cache_control = object_summary.meta.data.get("CacheControl", None)
modified_at = object_summary.last_modified
created_at = None
expires_at = None # TODO: FEATURE: Delete at / expires at
except ClientError as err:
error_code = int(err.response["Error"]["Code"])
if error_code == 404:
raise NotFoundError(
messages.BLOB_NOT_FOUND % (object_summary.key, container.name)
)
raise CloudStorageError(
"%s: %s"
% (err.response["Error"]["Code"], err.response["Error"]["Message"])
)
return Blob(
name=name,
checksum=checksum,
etag=etag,
size=size,
container=container,
driver=self,
acl=acl,
meta_data=meta_data,
content_disposition=content_disposition,
content_type=content_type,
cache_control=cache_control,
created_at=created_at,
modified_at=modified_at,
expires_at=expires_at,
)
def _make_container(self, bucket) -> Container:
"""Convert S3 Bucket to Container.
:param bucket: S3 bucket object.
:type bucket: :class:`boto3.s3.Bucket`
:return: The container if it exists.
:rtype: :class:`.Container`
"""
acl = bucket.Acl()
created_at = bucket.creation_date.astimezone(tz=None)
return Container(
name=bucket.name,
driver=self,
acl=acl,
meta_data=None,
created_at=created_at,
)
def _create_bucket_params(self, params: Dict[Any, Any]) -> Dict[Any, Any]:
"""Process extra create bucket params.
:param params: Default create bucket parameters.
:return: Final create bucket parameters.
"""
# TODO: BUG: Creating S3 bucket in us-east-1
if self.region != "us-east-1":
params["CreateBucketConfiguration"] = {
"LocationConstraint": self.region,
}
return params
@property
def session(self) -> boto3.session.Session:
"""Amazon Web Services session.
:return: AWS session.
:rtype: :class:`boto3.session.Session`
"""
return self._session
# noinspection PyUnresolvedReferences
@property
def s3(self) -> boto3.resources.base.ServiceResource:
"""S3 service resource.
:return: The s3 resource instance.
:rtype: :class:`boto3.resources.base.ServiceResource`
"""
return self.session.resource(service_name="s3", region_name=self.region)
def validate_credentials(self) -> None:
try:
self.session.client("sts").get_caller_identity()
except ClientError as err:
raise CredentialsError(str(err))
@property
def regions(self) -> List[str]:
return self.session.get_available_regions("s3")
def create_container(
self, container_name: str, acl: str = None, meta_data: MetaData = None
) -> Container:
if meta_data:
logger.info(messages.OPTION_NOT_SUPPORTED, "meta_data")
# Required parameters
params = {
"Bucket": container_name,
} # type: Dict[Any, Any]
if acl:
params["ACL"] = acl.lower()
params = self._create_bucket_params(params)
logger.debug("params=%s", params)
try:
bucket = self.s3.create_bucket(**params)
except ParamValidationError as err:
msg = err.kwargs.get("report", messages.CONTAINER_NAME_INVALID)
raise CloudStorageError(msg)
try:
bucket.wait_until_exists()
except WaiterError as err:
logger.error(err)
return self._make_container(bucket)
def get_container(self, container_name: str) -> Container:
bucket = self._get_bucket(container_name)
return self._make_container(bucket)
def patch_container(self, container: Container) -> None:
raise NotImplementedError
def delete_container(self, container: Container) -> None:
bucket = self._get_bucket(container.name, validate=False)
try:
bucket.delete()
except ClientError as err:
error_code = err.response["Error"]["Code"]
if error_code == "BucketNotEmpty":
raise IsNotEmptyError(messages.CONTAINER_NOT_EMPTY % bucket.name)
raise
def container_cdn_url(self, container: Container) -> str:
bucket = self._get_bucket(container.name, validate=False)
endpoint_url = bucket.meta.client.meta.endpoint_url
return "%s/%s" % (endpoint_url, container.name)
def enable_container_cdn(self, container: Container) -> bool:
logger.warning(messages.FEATURE_NOT_SUPPORTED, "enable_container_cdn")
return False
def disable_container_cdn(self, container: Container) -> bool:
logger.warning(messages.FEATURE_NOT_SUPPORTED, "disable_container_cdn")
return False
def upload_blob(
self,
container: Container,
filename: FileLike,
blob_name: str = None,
acl: str = None,
meta_data: MetaData = None,
content_type: str = None,
content_disposition: str = None,
cache_control: str = None,
chunk_size: int = 1024,
extra: ExtraOptions = None,
) -> Blob:
meta_data = {} if meta_data is None else meta_data
extra = {} if extra is None else extra
extra_args = self._normalize_parameters(extra, self._PUT_OBJECT_KEYS)
config = boto3.s3.transfer.TransferConfig(io_chunksize=chunk_size)
# Default arguments
extra_args.setdefault("Metadata", meta_data)
extra_args.setdefault("StorageClass", "STANDARD")
if acl:
extra_args.setdefault("ACL", acl.lower())
if cache_control:
extra_args.setdefault("CacheControl", cache_control)
if content_disposition:
extra_args["ContentDisposition"] = content_disposition
blob_name = blob_name or validate_file_or_path(filename)
# Boto uses application/octet-stream by default
if not content_type:
if isinstance(filename, str):
# TODO: QUESTION: Any advantages between filename vs blob_name?
extra_args["ContentType"] = file_content_type(filename)
else:
extra_args["ContentType"] = file_content_type(blob_name)
else:
extra_args["ContentType"] = content_type
logger.debug("extra_args=%s", extra_args)
if isinstance(filename, str):
self.s3.Bucket(container.name).upload_file(
Filename=filename, Key=blob_name, ExtraArgs=extra_args, Config=config
)
else:
self.s3.Bucket(container.name).upload_fileobj(
Fileobj=filename, Key=blob_name, ExtraArgs=extra_args, Config=config
)
return self.get_blob(container, blob_name)
def get_blob(self, container: Container, blob_name: str) -> Blob:
object_summary = self.s3.ObjectSummary(
bucket_name=container.name, key=blob_name
)
return self._make_blob(container, object_summary)
def get_blobs(self, container: Container) -> Iterable[Blob]:
bucket = self._get_bucket(container.name, validate=False)
for key in bucket.objects.all(): # s3.ObjectSummary
yield self._make_blob(container, key)
def download_blob(self, blob: Blob, destination: FileLike) -> None:
if isinstance(destination, str):
self.s3.Bucket(name=blob.container.name).download_file(
Key=blob.name, Filename=destination, ExtraArgs={}
)
else:
self.s3.Bucket(name=blob.container.name).download_fileobj(
Key=blob.name, Fileobj=destination, ExtraArgs={}
)
def patch_blob(self, blob: Blob) -> None:
raise NotImplementedError
def delete_blob(self, blob: Blob) -> None:
# Required parameters
params = {
"Bucket": blob.container.name,
"Key": blob.name,
}
logger.debug("params=%s", params)
try:
response = self.s3.meta.client.delete_object(**params)
logger.debug("response=%s", response)
except ClientError as err:
error_code = int(err.response["Error"]["Code"])
if error_code != 200 or error_code != 204:
raise NotFoundError(
messages.BLOB_NOT_FOUND % (blob.name, blob.container.name)
)
raise
def blob_cdn_url(self, blob: Blob) -> str:
container_url = self.container_cdn_url(blob.container)
blob_name_cleaned = quote(blob.name)
blob_path = "%s/%s" % (container_url, blob_name_cleaned)
url = urljoin(container_url, blob_path)
return url
def generate_container_upload_url(
self,
container: Container,
blob_name: str,
expires: int = 3600,
acl: str = None,
meta_data: MetaData = None,
content_disposition: str = None,
content_length: ContentLength = None,
content_type: str = None,
cache_control: str = None,
extra: ExtraOptions = None,
) -> FormPost:
meta_data = {} if meta_data is None else meta_data
extra = {} if extra is None else extra
extra_norm = self._normalize_parameters(extra, self._POST_OBJECT_KEYS)
conditions = [] # type: List[Any]
fields = {} # type: Dict[Any, Any]
if acl:
conditions.append({"acl": acl})
fields["acl"] = acl
headers = {
"Content-Disposition": content_disposition,
"Content-Type": content_type,
"Cache-Control": cache_control,
}
for header_name, header_value in headers.items():
if not header_value:
continue
fields[header_name.lower()] = header_value
conditions.append(["eq", "$" + header_name, header_value])
# Add content-length-range which is a tuple
if content_length:
min_range, max_range = content_length
conditions.append(["content-length-range", min_range, max_range])
for meta_name, meta_value in meta_data.items():
meta_name = self._OBJECT_META_PREFIX + meta_name
fields[meta_name] = meta_value
conditions.append({meta_name: meta_value})
# Add extra conditions and fields
for extra_name, extra_value in extra_norm.items():
fields[extra_name] = extra_value
conditions.append({extra_name: extra_value})
return self.s3.meta.client.generate_presigned_post(
Bucket=container.name,
Key=blob_name,
Fields=fields,
Conditions=conditions,
ExpiresIn=int(expires),
)
def generate_blob_download_url(
self,
blob: Blob,
expires: int = 3600,
method: str = "GET",
content_disposition: str = None,
extra: ExtraOptions = None,
) -> str:
extra = extra if extra is not None else {}
params = self._normalize_parameters(extra, self._GET_OBJECT_KEYS)
# Required parameters
params["Bucket"] = blob.container.name
params["Key"] = blob.name
# Optional
if content_disposition:
params["ResponseContentDisposition"] = content_disposition
logger.debug("params=%s", params)
return self.s3.meta.client.generate_presigned_url(
ClientMethod="get_object",
Params=params,
ExpiresIn=int(expires),
HttpMethod=method.lower(),
)
_OBJECT_META_PREFIX = "x-amz-meta-" # type: str
#: `S3.Client.generate_presigned_post
#: <http://boto3.readthedocs.io/en/latest/reference/services/s3.html
#: #S3.Client.generate_presigned_post>`_
_POST_OBJECT_KEYS = {
"acl": "acl",
"cachecontrol": "Cache-Control",
"contenttype": "Content-Type",
"contentdisposition": "Content-Disposition",
"contentencoding": "Content-Encoding",
"expires": "Expires",
"successactionredirect": "success_action_redirect",
"redirect": "redirect",
"successactionstatus": "success_action_status",
"xamzmeta": "x-amz-meta-",
}
#: `#S3.Client.get_object
#: <http://boto3.readthedocs.io/en/latest/reference/services/s3.html
#: #S3.Client.get_object>`_
_GET_OBJECT_KEYS = {
"bucket": "Bucket",
"ifmatch": "IfMatch",
"ifmodifiedsince": "IfModifiedSince",
"ifnonematch": "IfNoneMatch",
"ifunmodifiedsince": "IfUnmodifiedSince",
"key": "Key",
"range": "Range",
"responsecachecontrol": "ResponseCacheControl",
"responsecontentdisposition": "ResponseContentDisposition",
"responsecontentencoding": "ResponseContentEncoding",
"responsecontentlanguage": "ResponseContentLanguage",
"responsecontenttype": "ResponseContentType",
"responseexpires": "ResponseExpires",
"versionid": "VersionId",
"ssecustomeralgorithm": "SSECustomerAlgorithm",
"ssecustomerkey": "SSECustomerKey",
"requestpayer": "RequestPayer",
"partnumber": "PartNumber",
# Extra keys to standarize across all drivers
"cachecontrol": "ResponseCacheControl",
"contentdisposition": "ResponseContentDisposition",
"contentencoding": "ResponseContentEncoding",
"contentlanguage": "ResponseContentLanguage",
"contenttype": "ResponseContentType",
"expires": "ResponseExpires",
}
#: `S3.Client.put_object
#: <http://boto3.readthedocs.io/en/latest/reference/services/s3.html
#: #S3.Client.put_object>`_
_PUT_OBJECT_KEYS = {
"acl": "ACL",
"body": "Body",
"bucket": "Bucket",
"cachecontrol": "CacheControl",
"contentdisposition": "ContentDisposition",
"contentencoding": "ContentEncoding",
"contentlanguage": "ContentLanguage",
"contentlength": "ContentLength",
"contentmd5": "ContentMD5",
"contenttype": "ContentType",
"expires": "Expires",
"grantfullcontrol": "GrantFullControl",
"grantread": "GrantRead",
"grantreadacp": "GrantReadACP",
"grantwriteacp": "GrantWriteACP",
"key": "Key",
"metadata": "Metadata",
"serversideencryption": "ServerSideEncryption",
"storageclass": "StorageClass",
"websiteredirectlocation": "WebsiteRedirectLocation",
"ssecustomeralgorithm": "SSECustomerAlgorithm",
"ssecustomerkey": "SSECustomerKey",
"ssekmskeyid": "SSEKMSKeyId",
"requestpayer": "RequestPayer",
"tagging": "Tagging",
}
#: `S3.Client.delete_object
#: <http://boto3.readthedocs.io/en/latest/reference/services/s3.html
#: #S3.Client.delete_object>`_
_DELETE_OBJECT_KEYS = {
"bucket": "Bucket",
"key": "Key",
"mfa": "MFA",
"versionid": "VersionId",
"requestpayer": "RequestPayer",
}
#: `S3.Bucket.create
#: <http://boto3.readthedocs.io/en/latest/reference/services/s3.html
#: #S3.Bucket.create>`_
_POST_CONTAINER_KEYS = {
"acl": "ACL",
"bucket": "Bucket",
"createbucketconfiguration": "CreateBucketConfiguration",
"locationconstraint": "LocationConstraint",
"grantfullcontrol": "GrantFullControl",
"grantread": "GrantRead",
"grantreadacp": "GrantReadACP",
"grantwrite": "GrantWrite",
"grantwriteacp": "GrantWriteACP",
}
| {
"repo_name": "scottwernervt/cloudstorage",
"path": "src/cloudstorage/drivers/amazon.py",
"copies": "1",
"size": "21466",
"license": "mit",
"hash": -7361190719946671000,
"line_mean": 33.0190174326,
"line_max": 87,
"alpha_frac": 0.5910276717,
"autogenerated": false,
"ratio": 4.0833174814533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00020718800066481341,
"num_lines": 631
} |
"""Amazon SQS Connection."""
from vine import transform
from kombu.asynchronous.aws.connection import AsyncAWSQueryConnection
from .ext import boto3
from .message import AsyncMessage
from .queue import AsyncQueue
__all__ = ('AsyncSQSConnection',)
class AsyncSQSConnection(AsyncAWSQueryConnection):
"""Async SQS Connection."""
def __init__(self, sqs_connection, debug=0, region=None, **kwargs):
if boto3 is None:
raise ImportError('boto3 is not installed')
AsyncAWSQueryConnection.__init__(
self,
sqs_connection,
region_name=region, debug=debug,
**kwargs
)
def create_queue(self, queue_name,
visibility_timeout=None, callback=None):
params = {'QueueName': queue_name}
if visibility_timeout:
params['DefaultVisibilityTimeout'] = format(
visibility_timeout, 'd',
)
return self.get_object('CreateQueue', params,
callback=callback)
def delete_queue(self, queue, force_deletion=False, callback=None):
return self.get_status('DeleteQueue', None, queue.id,
callback=callback)
def get_queue_url(self, queue):
res = self.sqs_connection.get_queue_url(QueueName=queue)
return res['QueueUrl']
def get_queue_attributes(self, queue, attribute='All', callback=None):
return self.get_object(
'GetQueueAttributes', {'AttributeName': attribute},
queue.id, callback=callback,
)
def set_queue_attribute(self, queue, attribute, value, callback=None):
return self.get_status(
'SetQueueAttribute',
{'Attribute.Name': attribute, 'Attribute.Value': value},
queue.id, callback=callback,
)
def receive_message(
self, queue, queue_url, number_messages=1, visibility_timeout=None,
attributes=('ApproximateReceiveCount',), wait_time_seconds=None,
callback=None
):
params = {'MaxNumberOfMessages': number_messages}
if visibility_timeout:
params['VisibilityTimeout'] = visibility_timeout
if attributes:
attrs = {}
for idx, attr in enumerate(attributes):
attrs['AttributeName.' + str(idx + 1)] = attr
params.update(attrs)
if wait_time_seconds is not None:
params['WaitTimeSeconds'] = wait_time_seconds
return self.get_list(
'ReceiveMessage', params, [('Message', AsyncMessage)],
queue_url, callback=callback, parent=queue,
)
def delete_message(self, queue, receipt_handle, callback=None):
return self.delete_message_from_handle(
queue, receipt_handle, callback,
)
def delete_message_batch(self, queue, messages, callback=None):
params = {}
for i, m in enumerate(messages):
prefix = 'DeleteMessageBatchRequestEntry.{}'.format(i + 1)
params.update({
f'{prefix}.Id': m.id,
f'{prefix}.ReceiptHandle': m.receipt_handle,
})
return self.get_object(
'DeleteMessageBatch', params, queue.id,
verb='POST', callback=callback,
)
def delete_message_from_handle(self, queue, receipt_handle,
callback=None):
return self.get_status(
'DeleteMessage', {'ReceiptHandle': receipt_handle},
queue, callback=callback,
)
def send_message(self, queue, message_content,
delay_seconds=None, callback=None):
params = {'MessageBody': message_content}
if delay_seconds:
params['DelaySeconds'] = int(delay_seconds)
return self.get_object(
'SendMessage', params, queue.id,
verb='POST', callback=callback,
)
def send_message_batch(self, queue, messages, callback=None):
params = {}
for i, msg in enumerate(messages):
prefix = 'SendMessageBatchRequestEntry.{}'.format(i + 1)
params.update({
f'{prefix}.Id': msg[0],
f'{prefix}.MessageBody': msg[1],
f'{prefix}.DelaySeconds': msg[2],
})
return self.get_object(
'SendMessageBatch', params, queue.id,
verb='POST', callback=callback,
)
def change_message_visibility(self, queue, receipt_handle,
visibility_timeout, callback=None):
return self.get_status(
'ChangeMessageVisibility',
{'ReceiptHandle': receipt_handle,
'VisibilityTimeout': visibility_timeout},
queue.id, callback=callback,
)
def change_message_visibility_batch(self, queue, messages, callback=None):
params = {}
for i, t in enumerate(messages):
pre = 'ChangeMessageVisibilityBatchRequestEntry.{}'.format(i + 1)
params.update({
f'{pre}.Id': t[0].id,
f'{pre}.ReceiptHandle': t[0].receipt_handle,
f'{pre}.VisibilityTimeout': t[1],
})
return self.get_object(
'ChangeMessageVisibilityBatch', params, queue.id,
verb='POST', callback=callback,
)
def get_all_queues(self, prefix='', callback=None):
params = {}
if prefix:
params['QueueNamePrefix'] = prefix
return self.get_list(
'ListQueues', params, [('QueueUrl', AsyncQueue)],
callback=callback,
)
def get_queue(self, queue_name, callback=None):
# TODO Does not support owner_acct_id argument
return self.get_all_queues(
queue_name,
transform(self._on_queue_ready, callback, queue_name),
)
lookup = get_queue
def _on_queue_ready(self, name, queues):
return next(
(q for q in queues if q.url.endswith(name)), None,
)
def get_dead_letter_source_queues(self, queue, callback=None):
return self.get_list(
'ListDeadLetterSourceQueues', {'QueueUrl': queue.url},
[('QueueUrl', AsyncQueue)],
callback=callback,
)
def add_permission(self, queue, label, aws_account_id, action_name,
callback=None):
return self.get_status(
'AddPermission',
{'Label': label,
'AWSAccountId': aws_account_id,
'ActionName': action_name},
queue.id, callback=callback,
)
def remove_permission(self, queue, label, callback=None):
return self.get_status(
'RemovePermission', {'Label': label}, queue.id, callback=callback,
)
| {
"repo_name": "celery/kombu",
"path": "kombu/asynchronous/aws/sqs/connection.py",
"copies": "1",
"size": "6831",
"license": "bsd-3-clause",
"hash": -4421437971180060700,
"line_mean": 34.3937823834,
"line_max": 78,
"alpha_frac": 0.5701947006,
"autogenerated": false,
"ratio": 4.331642358909321,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 193
} |
"""Amazon SQS queue implementation."""
from vine import transform
from .message import AsyncMessage
_all__ = ['AsyncQueue']
def list_first(rs):
"""Get the first item in a list, or None if list empty."""
return rs[0] if len(rs) == 1 else None
class AsyncQueue():
"""Async SQS Queue."""
def __init__(self, connection=None, url=None, message_class=AsyncMessage):
self.connection = connection
self.url = url
self.message_class = message_class
self.visibility_timeout = None
def _NA(self, *args, **kwargs):
raise NotImplementedError()
count_slow = dump = save_to_file = save_to_filename = save = \
save_to_s3 = load_from_s3 = load_from_file = load_from_filename = \
load = clear = _NA
def get_attributes(self, attributes='All', callback=None):
return self.connection.get_queue_attributes(
self, attributes, callback,
)
def set_attribute(self, attribute, value, callback=None):
return self.connection.set_queue_attribute(
self, attribute, value, callback,
)
def get_timeout(self, callback=None, _attr='VisibilityTimeout'):
return self.get_attributes(
_attr, transform(
self._coerce_field_value, callback, _attr, int,
),
)
def _coerce_field_value(self, key, type, response):
return type(response[key])
def set_timeout(self, visibility_timeout, callback=None):
return self.set_attribute(
'VisibilityTimeout', visibility_timeout,
transform(
self._on_timeout_set, callback,
)
)
def _on_timeout_set(self, visibility_timeout):
if visibility_timeout:
self.visibility_timeout = visibility_timeout
return self.visibility_timeout
def add_permission(self, label, aws_account_id, action_name,
callback=None):
return self.connection.add_permission(
self, label, aws_account_id, action_name, callback,
)
def remove_permission(self, label, callback=None):
return self.connection.remove_permission(self, label, callback)
def read(self, visibility_timeout=None, wait_time_seconds=None,
callback=None):
return self.get_messages(
1, visibility_timeout,
wait_time_seconds=wait_time_seconds,
callback=transform(list_first, callback),
)
def write(self, message, delay_seconds=None, callback=None):
return self.connection.send_message(
self, message.get_body_encoded(), delay_seconds,
callback=transform(self._on_message_sent, callback, message),
)
def write_batch(self, messages, callback=None):
return self.connection.send_message_batch(
self, messages, callback=callback,
)
def _on_message_sent(self, orig_message, new_message):
orig_message.id = new_message.id
orig_message.md5 = new_message.md5
return new_message
def get_messages(self, num_messages=1, visibility_timeout=None,
attributes=None, wait_time_seconds=None, callback=None):
return self.connection.receive_message(
self, number_messages=num_messages,
visibility_timeout=visibility_timeout,
attributes=attributes,
wait_time_seconds=wait_time_seconds,
callback=callback,
)
def delete_message(self, message, callback=None):
return self.connection.delete_message(self, message, callback)
def delete_message_batch(self, messages, callback=None):
return self.connection.delete_message_batch(
self, messages, callback=callback,
)
def change_message_visibility_batch(self, messages, callback=None):
return self.connection.change_message_visibility_batch(
self, messages, callback=callback,
)
def delete(self, callback=None):
return self.connection.delete_queue(self, callback=callback)
def count(self, page_size=10, vtimeout=10, callback=None,
_attr='ApproximateNumberOfMessages'):
return self.get_attributes(
_attr, callback=transform(
self._coerce_field_value, callback, _attr, int,
),
)
| {
"repo_name": "ZoranPavlovic/kombu",
"path": "kombu/asynchronous/aws/sqs/queue.py",
"copies": "2",
"size": "4368",
"license": "bsd-3-clause",
"hash": 3516732837316888000,
"line_mean": 33.125,
"line_max": 78,
"alpha_frac": 0.6220238095,
"autogenerated": false,
"ratio": 4.156041864890581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 128
} |
"""Amazon SQS Transport.
Amazon SQS transport module for Kombu. This package implements an AMQP-like
interface on top of Amazons SQS service, with the goal of being optimized for
high performance and reliability.
The default settings for this module are focused now on high performance in
task queue situations where tasks are small, idempotent and run very fast.
SQS Features supported by this transport:
Long Polling:
https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/
sqs-long-polling.html
Long polling is enabled by setting the `wait_time_seconds` transport
option to a number > 1. Amazon supports up to 20 seconds. This is
enabled with 10 seconds by default.
Batch API Actions:
https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/
sqs-batch-api.html
The default behavior of the SQS Channel.drain_events() method is to
request up to the 'prefetch_count' messages on every request to SQS.
These messages are stored locally in a deque object and passed back
to the Transport until the deque is empty, before triggering a new
API call to Amazon.
This behavior dramatically speeds up the rate that you can pull tasks
from SQS when you have short-running tasks (or a large number of workers).
When a Celery worker has multiple queues to monitor, it will pull down
up to 'prefetch_count' messages from queueA and work on them all before
moving on to queueB. If queueB is empty, it will wait up until
'polling_interval' expires before moving back and checking on queueA.
"""
from __future__ import absolute_import, unicode_literals
import base64
import socket
import string
import uuid
from vine import transform, ensure_promise, promise
from kombu.asynchronous import get_event_loop
from kombu.asynchronous.aws.ext import boto3, exceptions
from kombu.asynchronous.aws.sqs.connection import AsyncSQSConnection
from kombu.asynchronous.aws.sqs.message import AsyncMessage
from kombu.five import Empty, range, string_t, text_t
from kombu.log import get_logger
from kombu.utils import scheduling
from kombu.utils.encoding import bytes_to_str, safe_str
from kombu.utils.json import loads, dumps
from kombu.utils.objects import cached_property
from . import virtual
logger = get_logger(__name__)
# dots are replaced by dash, all other punctuation
# replaced by underscore.
CHARS_REPLACE_TABLE = {
ord(c): 0x5f for c in string.punctuation if c not in '-_.'
}
CHARS_REPLACE_TABLE[0x2e] = 0x2d # '.' -> '-'
#: SQS bulk get supports a maximum of 10 messages at a time.
SQS_MAX_MESSAGES = 10
def maybe_int(x):
"""Try to convert x' to int, or return x' if that fails."""
try:
return int(x)
except ValueError:
return x
class Channel(virtual.Channel):
"""SQS Channel."""
default_region = 'us-east-1'
default_visibility_timeout = 1800 # 30 minutes.
default_wait_time_seconds = 10 # up to 20 seconds max
domain_format = 'kombu%(vhost)s'
_asynsqs = None
_sqs = None
_queue_cache = {}
_noack_queues = set()
def __init__(self, *args, **kwargs):
if boto3 is None:
raise ImportError('boto3 is not installed')
super(Channel, self).__init__(*args, **kwargs)
# SQS blows up if you try to create a new queue when one already
# exists but with a different visibility_timeout. This prepopulates
# the queue_cache to protect us from recreating
# queues that are known to already exist.
self._update_queue_cache(self.queue_name_prefix)
self.hub = kwargs.get('hub') or get_event_loop()
def _update_queue_cache(self, queue_name_prefix):
resp = self.sqs.list_queues(QueueNamePrefix=queue_name_prefix)
for url in resp.get('QueueUrls', []):
queue_name = url.split('/')[-1]
self._queue_cache[queue_name] = url
def basic_consume(self, queue, no_ack, *args, **kwargs):
if no_ack:
self._noack_queues.add(queue)
if self.hub:
self._loop1(queue)
return super(Channel, self).basic_consume(
queue, no_ack, *args, **kwargs
)
def basic_cancel(self, consumer_tag):
if consumer_tag in self._consumers:
queue = self._tag_to_queue[consumer_tag]
self._noack_queues.discard(queue)
return super(Channel, self).basic_cancel(consumer_tag)
def drain_events(self, timeout=None, callback=None, **kwargs):
"""Return a single payload message from one of our queues.
Raises:
Queue.Empty: if no messages available.
"""
# If we're not allowed to consume or have no consumers, raise Empty
if not self._consumers or not self.qos.can_consume():
raise Empty()
# At this point, go and get more messages from SQS
self._poll(self.cycle, callback, timeout=timeout)
def _reset_cycle(self):
"""Reset the consume cycle.
Returns:
FairCycle: object that points to our _get_bulk() method
rather than the standard _get() method. This allows for
multiple messages to be returned at once from SQS (
based on the prefetch limit).
"""
self._cycle = scheduling.FairCycle(
self._get_bulk, self._active_queues, Empty,
)
def entity_name(self, name, table=CHARS_REPLACE_TABLE):
"""Format AMQP queue name into a legal SQS queue name."""
if name.endswith('.fifo'):
partial = name.rstrip('.fifo')
partial = text_t(safe_str(partial)).translate(table)
return partial + '.fifo'
else:
return text_t(safe_str(name)).translate(table)
def canonical_queue_name(self, queue_name):
return self.entity_name(self.queue_name_prefix + queue_name)
def _new_queue(self, queue, **kwargs):
"""Ensure a queue with given name exists in SQS."""
if not isinstance(queue, string_t):
return queue
# Translate to SQS name for consistency with initial
# _queue_cache population.
queue = self.canonical_queue_name(queue)
# The SQS ListQueues method only returns 1000 queues. When you have
# so many queues, it's possible that the queue you are looking for is
# not cached. In this case, we could update the cache with the exact
# queue name first.
if queue not in self._queue_cache:
self._update_queue_cache(queue)
try:
return self._queue_cache[queue]
except KeyError:
attributes = {'VisibilityTimeout': str(self.visibility_timeout)}
if queue.endswith('.fifo'):
attributes['FifoQueue'] = 'true'
resp = self._queue_cache[queue] = self.sqs.create_queue(
QueueName=queue, Attributes=attributes)
self._queue_cache[queue] = resp['QueueUrl']
return resp['QueueUrl']
def _delete(self, queue, *args, **kwargs):
"""Delete queue by name."""
super(Channel, self)._delete(queue)
self._queue_cache.pop(queue, None)
def _put(self, queue, message, **kwargs):
"""Put message onto queue."""
q_url = self._new_queue(queue)
kwargs = {'QueueUrl': q_url,
'MessageBody': AsyncMessage().encode(dumps(message))}
if queue.endswith('.fifo'):
if 'MessageGroupId' in message['properties']:
kwargs['MessageGroupId'] = \
message['properties']['MessageGroupId']
else:
kwargs['MessageGroupId'] = 'default'
if 'MessageDeduplicationId' in message['properties']:
kwargs['MessageDeduplicationId'] = \
message['properties']['MessageDeduplicationId']
else:
kwargs['MessageDeduplicationId'] = str(uuid.uuid4())
self.sqs.send_message(**kwargs)
def _message_to_python(self, message, queue_name, queue):
body = base64.b64decode(message['Body'].encode())
payload = loads(bytes_to_str(body))
if queue_name in self._noack_queues:
queue = self._new_queue(queue_name)
self.asynsqs.delete_message(queue, message['ReceiptHandle'])
else:
try:
properties = payload['properties']
delivery_info = payload['properties']['delivery_info']
except KeyError:
# json message not sent by kombu?
delivery_info = {}
properties = {'delivery_info': delivery_info}
payload.update({
'body': bytes_to_str(body),
'properties': properties,
})
# set delivery tag to SQS receipt handle
delivery_info.update({
'sqs_message': message, 'sqs_queue': queue,
})
properties['delivery_tag'] = message['ReceiptHandle']
return payload
def _messages_to_python(self, messages, queue):
"""Convert a list of SQS Message objects into Payloads.
This method handles converting SQS Message objects into
Payloads, and appropriately updating the queue depending on
the 'ack' settings for that queue.
Arguments:
messages (SQSMessage): A list of SQS Message objects.
queue (str): Name representing the queue they came from.
Returns:
List: A list of Payload objects
"""
q = self._new_queue(queue)
return [self._message_to_python(m, queue, q) for m in messages]
def _get_bulk(self, queue,
max_if_unlimited=SQS_MAX_MESSAGES, callback=None):
"""Try to retrieve multiple messages off ``queue``.
Where :meth:`_get` returns a single Payload object, this method
returns a list of Payload objects. The number of objects returned
is determined by the total number of messages available in the queue
and the number of messages the QoS object allows (based on the
prefetch_count).
Note:
Ignores QoS limits so caller is responsible for checking
that we are allowed to consume at least one message from the
queue. get_bulk will then ask QoS for an estimate of
the number of extra messages that we can consume.
Arguments:
queue (str): The queue name to pull from.
Returns:
List[Message]
"""
# drain_events calls `can_consume` first, consuming
# a token, so we know that we are allowed to consume at least
# one message.
# Note: ignoring max_messages for SQS with boto3
max_count = self._get_message_estimate()
if max_count:
q_url = self._new_queue(queue)
resp = self.sqs.receive_message(
QueueUrl=q_url, MaxNumberOfMessages=max_count,
WaitTimeSeconds=self.wait_time_seconds)
if resp.get('Messages'):
for m in resp['Messages']:
m['Body'] = AsyncMessage(body=m['Body']).decode()
for msg in self._messages_to_python(resp['Messages'], queue):
self.connection._deliver(msg, queue)
return
raise Empty()
def _get(self, queue):
"""Try to retrieve a single message off ``queue``."""
q_url = self._new_queue(queue)
resp = self.sqs.receive_message(
QueueUrl=q_url, MaxNumberOfMessages=1,
WaitTimeSeconds=self.wait_time_seconds)
if resp.get('Messages'):
body = AsyncMessage(body=resp['Messages'][0]['Body']).decode()
resp['Messages'][0]['Body'] = body
return self._messages_to_python(resp['Messages'], queue)[0]
raise Empty()
def _loop1(self, queue, _=None):
self.hub.call_soon(self._schedule_queue, queue)
def _schedule_queue(self, queue):
if queue in self._active_queues:
if self.qos.can_consume():
self._get_bulk_async(
queue, callback=promise(self._loop1, (queue,)),
)
else:
self._loop1(queue)
def _get_message_estimate(self, max_if_unlimited=SQS_MAX_MESSAGES):
maxcount = self.qos.can_consume_max_estimate()
return min(
max_if_unlimited if maxcount is None else max(maxcount, 1),
max_if_unlimited,
)
def _get_bulk_async(self, queue,
max_if_unlimited=SQS_MAX_MESSAGES, callback=None):
maxcount = self._get_message_estimate()
if maxcount:
return self._get_async(queue, maxcount, callback=callback)
# Not allowed to consume, make sure to notify callback..
callback = ensure_promise(callback)
callback([])
return callback
def _get_async(self, queue, count=1, callback=None):
q = self._new_queue(queue)
qname = self.canonical_queue_name(queue)
return self._get_from_sqs(
qname, count=count, connection=self.asynsqs,
callback=transform(self._on_messages_ready, callback, q, queue),
)
def _on_messages_ready(self, queue, qname, messages):
if 'Messages' in messages and messages['Messages']:
callbacks = self.connection._callbacks
for msg in messages['Messages']:
msg_parsed = self._message_to_python(msg, qname, queue)
callbacks[qname](msg_parsed)
def _get_from_sqs(self, queue,
count=1, connection=None, callback=None):
"""Retrieve and handle messages from SQS.
Uses long polling and returns :class:`~vine.promises.promise`.
"""
connection = connection if connection is not None else queue.connection
return connection.receive_message(
queue, number_messages=count,
wait_time_seconds=self.wait_time_seconds,
callback=callback,
)
def _restore(self, message,
unwanted_delivery_info=('sqs_message', 'sqs_queue')):
for unwanted_key in unwanted_delivery_info:
# Remove objects that aren't JSON serializable (Issue #1108).
message.delivery_info.pop(unwanted_key, None)
return super(Channel, self)._restore(message)
def basic_ack(self, delivery_tag, multiple=False):
try:
message = self.qos.get(delivery_tag).delivery_info
sqs_message = message['sqs_message']
except KeyError:
pass
else:
self.asynsqs.delete_message(message['sqs_queue'],
sqs_message['ReceiptHandle'])
super(Channel, self).basic_ack(delivery_tag)
def _size(self, queue):
"""Return the number of messages in a queue."""
url = self._new_queue(queue)
resp = self.sqs.get_queue_attributes(
QueueUrl=url,
AttributeNames=['ApproximateNumberOfMessages'])
return int(resp['Attributes']['ApproximateNumberOfMessages'])
def _purge(self, queue):
"""Delete all current messages in a queue."""
q = self._new_queue(queue)
# SQS is slow at registering messages, so run for a few
# iterations to ensure messages are detected and deleted.
size = 0
for i in range(10):
size += int(self._size(queue))
if not size:
break
self.sqs.purge_queue(QueueUrl=q)
return size
def close(self):
super(Channel, self).close()
# if self._asynsqs:
# try:
# self.asynsqs.close()
# except AttributeError as exc: # FIXME ???
# if "can't set attribute" not in str(exc):
# raise
@property
def sqs(self):
if self._sqs is None:
session = boto3.session.Session(
region_name=self.region,
aws_access_key_id=self.conninfo.userid,
aws_secret_access_key=self.conninfo.password,
)
is_secure = self.is_secure if self.is_secure is not None else True
client_kwargs = {
'use_ssl': is_secure
}
if self.endpoint_url is not None:
client_kwargs['endpoint_url'] = self.endpoint_url
self._sqs = session.client('sqs', **client_kwargs)
return self._sqs
@property
def asynsqs(self):
if self._asynsqs is None:
self._asynsqs = AsyncSQSConnection(
sqs_connection=self.sqs,
region=self.region
)
return self._asynsqs
@property
def conninfo(self):
return self.connection.client
@property
def transport_options(self):
return self.connection.client.transport_options
@cached_property
def visibility_timeout(self):
return (self.transport_options.get('visibility_timeout') or
self.default_visibility_timeout)
@cached_property
def queue_name_prefix(self):
return self.transport_options.get('queue_name_prefix', '')
@cached_property
def supports_fanout(self):
return False
@cached_property
def region(self):
return self.transport_options.get('region') or self.default_region
@cached_property
def regioninfo(self):
return self.transport_options.get('regioninfo')
@cached_property
def is_secure(self):
return self.transport_options.get('is_secure')
@cached_property
def port(self):
return self.transport_options.get('port')
@cached_property
def endpoint_url(self):
if self.conninfo.hostname is not None:
scheme = 'https' if self.is_secure else 'http'
if self.conninfo.port is not None:
port = ':{}'.format(self.conninfo.port)
else:
port = ''
return '{}://{}{}'.format(
scheme,
self.conninfo.hostname,
port
)
@cached_property
def wait_time_seconds(self):
return self.transport_options.get('wait_time_seconds',
self.default_wait_time_seconds)
class Transport(virtual.Transport):
"""SQS Transport."""
Channel = Channel
polling_interval = 1
wait_time_seconds = 0
default_port = None
connection_errors = (
virtual.Transport.connection_errors +
(exceptions.BotoCoreError, socket.error)
)
channel_errors = (
virtual.Transport.channel_errors + (exceptions.BotoCoreError,)
)
driver_type = 'sqs'
driver_name = 'sqs'
implements = virtual.Transport.implements.extend(
asynchronous=True,
exchange_type=frozenset(['direct']),
)
@property
def default_connection_params(self):
return {'port': self.default_port}
| {
"repo_name": "pexip/os-kombu",
"path": "kombu/transport/SQS.py",
"copies": "1",
"size": "19129",
"license": "bsd-3-clause",
"hash": 5350279294612438000,
"line_mean": 35.6455938697,
"line_max": 79,
"alpha_frac": 0.6048930943,
"autogenerated": false,
"ratio": 4.137789314298075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 522
} |
'''Amazon Web Services helpers'''
from django.conf import settings
from M2Crypto import EVP
import base64
import boto.cloudfront
import boto.cloudfront.object
import boto.s3.bucket
import boto.s3.connection
import boto.s3.key
import facade
import logging
import os
import time
import urllib
import urlparse
try:
import json
except ImportError:
import simplejson as json
logger = logging.getLogger('vod.awsutils')
#===============================================================================
# Connection Helpers
#===============================================================================
class CloudFrontConnection(boto.cloudfront.CloudFrontConnection):
'''Helper class to construct a CloudFront Connection with no args'''
def __init__(self):
super(CloudFrontConnection, self).__init__(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
class S3Connection(boto.s3.connection.S3Connection):
'''Helper class to construct an S3 Connection with no args'''
def __init__(self, **kwargs):
super(S3Connection, self).__init__(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY, **kwargs)
def get_my_bucket(self, validate=False):
'''Get my bucket! Handily retrieves the bucket defined in settings.'''
# validate=True will trigger a connection to AWS, which we generally
# don't want
return self.get_bucket(settings.AWS_STORAGE_BUCKET_NAME, validate=validate)
#===============================================================================
# CloudFront Helpers
#===============================================================================
# S3 Buckets can be related to both a Distribution and a
# StreamingDistribution, so it's useful to have both object types
class CloudFrontObject(boto.s3.key.Key):
'''A boto CloudFront "Object" replacement
This doesn't use bucket.distribution, because boto buckets don't have that
property (boto bug)'''
def __init__(self, name):
s3_cxn = S3Connection()
super(CloudFrontObject, self).__init__(s3_cxn.get_my_bucket(), name)
def __repr__(self):
return '<Object: %s>' % (self.name)
def url(self, scheme='https'):
url = '%s://' % scheme
url += settings.AWS_CLOUDFRONT_DIST_HOSTNAME
url += '/'
url += self.name
return url
def generate_url(self, expires=None, begins=None, ipaddress=None):
'''draconian URL generation, uses preset object expiry'''
if expires is None:
expires = int(time.time()) + settings.AWS_URL_LIFETIME
policy = CloudFrontPolicy(self, expires, begins, ipaddress)
return '%s?%s' % (self.url(), policy.query_string)
class CloudFrontStreamingObject(CloudFrontObject):
'''A boto CloudFront "StreamingObject" replacement
This doesn't use bucket.distribution, because boto buckets don't have that
property (boto bug)'''
def __repr__(self):
return '<StreamingObject: %s>' % (self.name)
def url(self, scheme='rtmpe'):
'''Overrides scheme to rtmpe by default, adds mp4: prefix if needed'''
url = '%s://' % scheme
url += settings.AWS_CLOUDFRONT_STREAMING_DIST_HOSTNAME
url += '/cfx/st/'
url += self.name
try:
encoded_video = facade.models.EncodedVideo.objects.get(file=self.name)
# Handle the mp4: prefix for h.264 vids
if encoded_video.video_codec in ('libx264',):
# Quick and easy, but possibly error-prone
url = url.replace('cfx/st/', 'cfx/st/mp4:')
except facade.models.EncodedVideo.DoesNotExist:
logger.debug('Unabled to determine if video %s needs mp4 prefix; guessing based on extension' % self.name)
base, extension = os.path.splitext(url)
# All transcoded videos should be .f4v, so this is a safe guess
if extension in ('.f4v',):
url = url.replace('cfx/st/', 'cfx/st/mp4:')
return url
def generate_url(self, expires=None, begins=None, ipaddress=None):
'''draconian URL generation, uses preset object expiry'''
if expires is None:
expires = int(time.time()) + settings.AWS_URL_LIFETIME
policy = CloudFrontPolicy(self, expires, begins, ipaddress)
return '%s?%s' % (self.url(), policy.query_string)
#=======================================================================
# S3 Helpers
#=======================================================================
# Iterating through the distributions sucks, but there's nothing in the Amazon
# API to make this easier. It would probably be faster/better to define the
# CF distribution ID in the settings file.
class DistBucket(boto.s3.bucket.Bucket):
'''An S3 Bucket with a CloudFront distribution attached'''
def __init__(self, *args, **kwargs):
super(DistBucket, self).__init__(*args, **kwargs)
cf_cxn = CloudFrontConnection()
distributions = cf_cxn.get_all_distributions()
for distribution_summary in distributions:
dist_bucket_name = distribution_summary.origin.split('.')[0]
if dist_bucket_name == self.name:
self.distribution = distribution_summary.get_distribution()
try:
getattr(self, 'distribution')
except AttributeError:
logger.error('No distribution for this bucket')
class StreamingDistBucket(boto.s3.bucket.Bucket):
'''An S3 Bucket with a CloudFront distribution attached'''
def __init__(self, *args, **kwargs):
super(StreamingDistBucket, self).__init__(*args, **kwargs)
cf_cxn = CloudFrontConnection()
distributions = cf_cxn.get_all_streaming_distributions()
for distribution_summary in distributions:
dist_bucket_name = distribution_summary.origin.split('.')[0]
if dist_bucket_name == self.name:
self.distribution = distribution_summary.get_distribution()
try:
getattr(self, 'distribution')
except AttributeError:
logger.error('No streaming distribution for this bucket')
#===============================================================================
# ACL/Signed URL helpers
#===============================================================================
def b64awsencode(s):
'''base64 encode for Amazon AWS, using -~_ instead of +/='''
return base64.b64encode(s, altchars='-~').replace('=', '_')
def b64awsdecode(s):
'''base64 decode for Amazon AWS, using -~_ instead of +/='''
return base64.b64decode(s.replace('_', '='), altchars='-~')
class CloudFrontPolicy(object):
def __init__(self, cf_object, expires, begins=None, ipaddress=None):
self.cf_object = cf_object
self.expires = expires
self.begins = begins
self.ipaddress = ipaddress
def _query_string(self):
'''Generate a signed query string for an Amazon CloudFront Object
:param cf_obj: A cloudfront Object or StreamingObject
:type cf_obj: boto.cloudfront.object.Object
:param json_policy: JSON Policy to be signed in the query string
:type json_policy: str
This is just the query string part of making signed URLs
'''
json_policy = self.json_policy
b64_json_policy = b64awsencode(json_policy)
key = EVP.load_key_string(settings.AWS_CLOUDFRONT_SIGNING_KEY)
# Explicitly use sha1 for signing, per AWS requirements
key.reset_context(md='sha1')
key.sign_init()
key.sign_update(json_policy)
signature = key.sign_final()
b64_signature = b64awsencode(signature)
query_string = "Policy=%s&Signature=%s&Key-Pair-Id=%s" % (
b64_json_policy, b64_signature, settings.AWS_COULDFRONT_SIGNING_KEY_ID
)
return query_string
def _json_policy(self):
'''Create a JSON policy for signing'''
resource = self.cf_object.url()
# Handle cloudfront stream resource string
if resource.lower().startswith('rtmp'):
# Capture everything after /cfx/st/; just need the stream itself
resource = resource.partition('/cfx/st/')[2]
# The mp4: prefix should be excluded from the policy resource entry
resource = resource.lstrip('mp4:')
conditions = dict()
conditions["DateLessThan"] = {"AWS:EpochTime": self.expires}
if self.begins:
conditions["DateGreaterThan"] = {"AWS:EpochTime": self.begins}
if self.ipaddress:
conditions["IpAddress"] = {"AWS:SourceIp": self.ipaddress}
policy = {
"Statement": [
{
"Resource": resource,
"Condition": conditions,
}
]
}
# separators remove whitespace, the strip is probably paranoid
json_policy = json.dumps(policy, separators=(',',':'))
logger.log(logging.getLevelName('TRACE'),
'json custom policy: %s' % json_policy)
return json_policy
# Handy properties
query_string = property(_query_string)
json_policy = property(_json_policy)
| {
"repo_name": "AmericanResearchInstitute/poweru-server",
"path": "vod_aws/awsutils.py",
"copies": "1",
"size": "9217",
"license": "bsd-3-clause",
"hash": -1412536849895908600,
"line_mean": 38.2212765957,
"line_max": 118,
"alpha_frac": 0.5946620375,
"autogenerated": false,
"ratio": 4.2416014726185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023133756661434003,
"num_lines": 235
} |
# AMBIGUITY = 0
def _create_list_from(value, clazz):
result = []
if isinstance(value, list):
for data in value:
new_entry = clazz(data)
result.append(new_entry)
else:
new_entry = clazz(value)
result.append(new_entry)
return result
def _get_qualified_name(module, clazz):
return module + '.' + clazz
def _get_class(module, qualified_name):
path = qualified_name.split('.')
for component in path[1:len(path) - 1]:
module = getattr(module, component)
clazz = getattr(module, path[len(path) - 1])
return clazz
class BugReports(object):
def __init__(self, xml):
if 'root' in xml and 'BugReport' in xml['root']:
module = __import__(self.__module__)
clazz = _get_class(module, _get_qualified_name(self.__module__, BugReport.__name__))
self.reports = _create_list_from(xml['root']['BugReport'], clazz)
@property
def reports(self):
return self._reports
@reports.setter
def reports(self, value):
self._reports = value
def merge(self, entity):
if not isinstance(entity, self.__class__):
raise Exception('Cannot merge data with type: {}'.format(type(entity)))
for bug_report in entity.reports:
if self.__contains__(bug_report):
actual_bug_report = next(iter(self.get(bug_report)))
actual_bug_report.merge(bug_report)
def get(self, item):
return filter(lambda bug_report: bug_report.id == item.id, self.reports)
def map_labels_to_sentences(self, filter_ambiguity=True):
result = {}
for label in SentenceLabel.ordered_labels():
result[label] = self._sentences_with(label, filter_ambiguity)
print "{}: {}".format(label, len(result[label]))
# sentences_count = 0
# bugs_count = 0
# comments_count = 0
# for report in self.reports:
# for text in report.comments:
# for sentence in text.text:
# sentence.label_disambiguity()
# sentences_count += 1
# comments_count += 1
# bugs_count += 1
#
#
# print bugs_count
# print comments_count
# print sentences_count
# print AMBIGUITY
return result
def _sentences_with(self, label, filter_ambiguity=True):
result = []
for report in self.reports:
for text in report.comments:
result += text.sentences_with(label, filter_ambiguity)
return result
def __contains__(self, item):
result = self.get(item)
if result:
return True
return False
def __getattr__(self, attr):
return None
class BugReport(object):
def __init__(self, xml):
module = __import__(self.__module__)
if '@ID' in xml:
self.id = xml['@ID']
if 'Title' in xml:
self.title = xml['Title']
if 'Turn' in xml:
clazz = _get_class(module, _get_qualified_name(self.__module__, Comment.__name__))
self.comments = _create_list_from(xml['Turn'], clazz)
if 'Annotation' in xml:
clazz = _get_class(module, _get_qualified_name(self.__module__, Annotation.__name__))
self.annotations = _create_list_from(xml['Annotation'], clazz)
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def comments(self):
return self._comments
@comments.setter
def comments(self, value):
self._comments = value
@property
def annotations(self):
return self._annotations
@annotations.setter
def annotations(self, value):
self._annotations = value
def merge(self, entity):
if not isinstance(entity, self.__class__):
raise Exception('Cannot merge data with type: {}'.format(type(entity)))
if self.id != entity.id:
raise Exception('Cannot merge data with different IDs: self.{} != entity.{}'.format(self.id, entity.id))
if self.comments is None:
self.comments = entity.comments
if self.annotations is None:
self.annotations = entity.annotations
if self.comments is not None and self.annotations is not None:
self._merge_comments_annotations()
def _merge_comments_annotations(self):
for comment in self.comments:
for sentence in comment.text:
id = sentence.id
labels = self._get_labels_for_sentence(id)
sentence.labels = labels
def _get_labels_for_sentence(self, id):
result = []
for annotation in self.annotations:
partial_labels = annotation.labels_for(id)
result += partial_labels
return result
def __getattr__(self, attr):
return None
class Comment(object):
def __init__(self, xml):
if 'Date' in xml:
self.date = xml['Date']
if 'From' in xml:
self.by = xml['From']
if 'Text' in xml and 'Sentence' in xml['Text']:
module = __import__(self.__module__)
clazz = _get_class(module, _get_qualified_name(self.__module__, Sentence.__name__))
self.text = _create_list_from(xml['Text']['Sentence'], clazz)
@property
def date(self):
return self._date
@date.setter
def date(self, value):
self._date = value
@property
def by(self):
return self._by
@by.setter
def by(self, value):
self._by = value
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
def sentences_with(self, label, filter_ambiguity=True):
if filter_ambiguity:
filtered_sentences = list(filter(lambda sentence: sentence.label_disambiguity() is not None and sentence.label_disambiguity() == label, self.text))
else:
filtered_sentences = list(filter(lambda sentence: label in sentence.labels, self.text))
return [sentence.text for sentence in filtered_sentences]
def __getattr__(self, attr):
return None
class Sentence(object):
def __init__(self, xml):
if '@ID' in xml:
self.id = xml['@ID']
if '#text' in xml:
self.text = xml['#text']
self.labels = []
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, value):
self._labels = value
def label_disambiguity(self):
# global AMBIGUITY
label_frequency = []
for label in SentenceLabel.ordered_labels():
if label in self.labels:
new_entry = (self.labels.count(label), label)
label_frequency.append(new_entry)
min_frequency = 0
final_label = None
for freq, key in label_frequency:
if freq > min_frequency:
final_label = key
min_frequency = freq
if min_frequency == 1:
if len(self.labels) == 1:
return final_label
else:
# AMBIGUITY += 1
return None
elif final_label is None:
return self._defaut_disambiguity()
else:
return final_label
def _defaut_disambiguity(self):
for label in SentenceLabel.ordered_labels():
if label in self.labels:
return label
return None
def __getattr__(self, attr):
return None
class Annotation(object):
def __init__(self, xml):
if 'Labels' in xml:
self.labels = xml['Labels']
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, value):
self._labels = []
for label in SentenceLabel.LABELS:
if label in value:
self._add_sentence_labels(label, value[label])
def _add_sentence_labels(self, key, value):
if isinstance(value, list):
for label in value:
sentence_label = SentenceLabel(key, label)
self._labels.append(sentence_label)
else:
sentence_label = SentenceLabel(key, value)
self._labels.append(sentence_label)
def sentences_with(self, label):
result = list(filter(lambda sentence: sentence.label == label, self.labels))
return result
def sentences_label_map(self):
result = dict()
for label in SentenceLabel.ordered_labels():
result[label] = self.sentences_with(label)
return result
def labels_for(self, id):
result = [sentence.label for sentence in self.labels if sentence.id == id]
return result
def __getattr__(self, attr):
return None
class SentenceLabel(object):
META = 'Meta'
PROBLEM = 'Problem'
FIX = 'Fix'
SUGGESTION = 'Suggestion'
AGREEMENT = 'Agreement'
DISAGREEMENT = 'Disagreement'
LABELS = [SUGGESTION, FIX, PROBLEM, AGREEMENT, DISAGREEMENT, META]
def __init__(self, label, xml):
self.label = label
if '@ID' in xml:
self.id = xml['@ID']
@staticmethod
def ordered_labels():
return SentenceLabel.LABELS
@property
def label(self):
return self._label
@label.setter
def label(self, value):
self._label = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
def __getattr__(self, attr):
return None
| {
"repo_name": "marquesarthur/BugAnalysisRecommender",
"path": "patterny/patterny/data/bug.py",
"copies": "1",
"size": "10155",
"license": "mit",
"hash": -2704607627721126400,
"line_mean": 26.2252010724,
"line_max": 159,
"alpha_frac": 0.5634662728,
"autogenerated": false,
"ratio": 4.052274541101356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5115740813901356,
"avg_score": null,
"num_lines": null
} |
"""Ambry command extension for remote libraries
Commands inlcude:
"""
# Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of
# the Revised BSD License, included in this distribution as LICENSE.txt
__all__ = ['command_name', 'make_parser', 'run_command']
command_name = 'remote'
from ambry.cli import prt, fatal, warn, err
def make_parser(cmd):
config_p = cmd.add_parser(command_name, help='Commands for managing remote libraries')
config_p.set_defaults(command=command_name)
config_p.add_argument('remote_name', nargs=1, type=str, help='Name of remote to operate on')
asp = config_p.add_subparsers(title='Remote commands', help='Remote commands')
sp = asp.add_parser('checkin', help="Check in a bundle")
sp.set_defaults(subcommand=checkin)
sp.add_argument('-n', '--no-partitions', default=False, action='store_true',
help="Don't check in partitions")
sp.add_argument('-r', '--remote', help='Specify remote, rather than using default for bundle')
sp.add_argument('-s', '--source', default=False, action='store_true',
help='Only package source files')
sp.add_argument('bundle_ref', nargs=1, type=str, help='Reference to a bundle')
sp = asp.add_parser('remove', help="Remote a bundle")
sp.set_defaults(subcommand=remove)
sp.add_argument('bundle_ref', nargs=1, type=str, help='Remove a bundle')
sp = asp.add_parser('list', help="List the contents of the remote")
sp.set_defaults(subcommand=remote_list)
sp.add_argument('-s', '--summary',default=False, action='store_true', help="Also display summaries and titles")
sp.add_argument('-c', '--cached', default=False, action='store_true', help="List the contents of the cached Directory listings")
sp = asp.add_parser('info', help="Info about remote or a bundle on a remote")
sp.set_defaults(subcommand=info)
sp.add_argument('bundle_ref', nargs='?', type=str, help='Reference to a bundle')
sp = asp.add_parser('syncremote', help="Send remote and associated account information")
sp.set_defaults(subcommand=syncremote)
sp.add_argument('remotes', nargs='*', type=str, help='Names of remotes to send')
sp = asp.add_parser('syncacct', help="Send account information, without a remote")
sp.set_defaults(subcommand=syncacct)
sp.add_argument('accounts', nargs='*', type=str, help='Names of accounts to send')
sp = asp.add_parser('sync', help="Instruct the remote to checkin a remote bundle")
sp.set_defaults(subcommand=sync)
sp.add_argument('ref', nargs='*', type=str, help='Bundle references')
sp = asp.add_parser('update-listing', help="Cache the list of bundles so they can be listed via HTTP")
sp.set_defaults(subcommand=update_listing)
sp = asp.add_parser('test', help="Call the API's test interface")
sp.set_defaults(subcommand=test)
def run_command(args, rc):
from ambry.library import new_library
from ambry.cli import global_logger
try:
l = new_library(rc)
l.logger = global_logger
except Exception as e:
l = None
args.subcommand(args, l, rc) # Note the calls to sp.set_defaults(subcommand=...)
def get_remote(l, name):
from argparse import Namespace
from ambry.orm.exc import NotFoundError
if isinstance(name, Namespace):
name = name.remote_name[0]
try:
return l.remote(name)
except NotFoundError:
fatal("Unknown remote name: '{}'".format(name))
def checkin(args, l, rc):
from ambry.orm.exc import NotFoundError
remote = get_remote(l, args)
for ref in args.bundle_ref:
b = l.bundle(ref)
package = b.package(rebuild=False, source_only=args.source)
prt('Check in {}'.format(b.identity.fqname))
try:
remote.checkin(package)
except NotFoundError as e:
fatal(e.message)
def remove(args, l, rc):
from ambry_client import Client
from ambry.orm.exc import NotFoundError
remote = get_remote(l, args)
for ref in args.bundle_ref:
try:
remote.remove(ref, cb=l.logger.info)
except NotFoundError as e:
fatal(e.message)
def remote_list(args, l, rc):
remote = get_remote(l,args)
if args.cached:
if 'list' in remote.data:
for k, v in remote.data['list'].items():
print k, v['vname']
else:
for name in remote.list():
if not args.summary:
print name
else:
e = remote.find(name)
print '{:12s} {:40s} {}'.format(e['vid'], e['name'], e.get('title'))
def info(args, l, rc):
from ambry.orm.exc import NotFoundError
remote = get_remote(l,args)
if not args.bundle_ref:
print remote # TODO Print info about the remote
else:
try:
e = remote.find(args.bundle_ref)
for k, v in e.items():
print k, v
except NotFoundError:
fatal("Failed to find bundle for ref: '{}' ".format(args.bundle_ref))
def test(args, l, rc):
from ambry.orm.exc import NotFoundError
remote = get_remote(l, args)
print remote.api_client.test()
def syncremote(args, l, rc):
from ambry.util import parse_url_to_dict
from ambry.orm.exc import NotFoundError
local_remotes = []
local_accounts = {}
for remote_name in args.remotes:
r = l.remote(remote_name)
local_remotes.append(r.dict)
d = parse_url_to_dict(r.url)
try:
a = l.account(d['hostname'])
local_accounts[a.account_id] = a.dict
except NotFoundError:
pass
foreign_remote = get_remote(l, args)
foreign_remote.api_client.library.remotes = local_remotes
foreign_remote.api_client.library.accounts = local_accounts
def syncacct(args, l, rc):
from ambry.util import parse_url_to_dict
from ambry.orm.exc import NotFoundError
local_accounts = {}
for account_name in args.accounts:
try:
a = l.account(account_name)
local_accounts[a.account_id] = a.dict
except NotFoundError:
warn("No account for id '{}' ".format(account_name))
foreign_remote = get_remote(l, args)
foreign_remote.api_client.library.accounts = local_accounts
def sync(args, l, rc):
raise NotImplementedError()
def update_listing(args, l, rc):
remote = get_remote(l, args)
remote._update_fs_list() | {
"repo_name": "CivicKnowledge/ambry-admin",
"path": "ambry_admin/remote.py",
"copies": "1",
"size": "6524",
"license": "bsd-3-clause",
"hash": -5524076725090972000,
"line_mean": 29.6338028169,
"line_max": 132,
"alpha_frac": 0.6362660944,
"autogenerated": false,
"ratio": 3.5865860362836726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47228521306836724,
"avg_score": null,
"num_lines": null
} |
""" Ambry Web Client
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of
the Revised BSD License, included in this distribution as LICENSE.txt
"""
from collections import OrderedDict, Mapping
import requests
from six import iterkeys
# http://pypi.python.org/pypi/layered-yaml-attrdict-config/12.07.1
class AttrDict(OrderedDict):
"""An ordered dictionary with a property interface to all keys"""
def __init__(self, *argz, **kwz):
super(AttrDict, self).__init__(*argz, **kwz)
def __setitem__(self, k, v):
assert not isinstance(k, list)
super(AttrDict, self).__setitem__(k, AttrDict(v)
if (isinstance(v, Mapping) and not isinstance(v, AttrDict)) else v)
def __getattr__(self, k):
assert not isinstance(k, list)
if k.startswith('__') or k.startswith('_OrderedDict__'):
return super(AttrDict, self).__getattr__(k)
return self[k]
def __setattr__(self, k, v):
if k.startswith('__') or k.startswith('_OrderedDict__'):
return super(AttrDict, self).__setattr__(k, v)
self[k] = v
def __iter__(self):
for k in iterkeys(super(OrderedDict, self)):
if not k.startswith('_'):
yield k
def items(self):
for k in self:
if not k.startswith('_'):
yield (k, self[k])
@staticmethod
def flatten_dict(data, path=tuple()):
from six import iteritems
dst = list()
for k, v in iteritems(data):
k = path + (k,)
if isinstance(v, Mapping):
for v in v.flatten(k):
dst.append(v)
else:
dst.append((k, v))
return dst
def flatten(self, path=tuple()):
return self.flatten_dict(self, path=path)
@property
def dict(self):
root = {}
val = self.flatten()
for k, v in val:
dst = root
for slug in k[:-1]:
if dst.get(slug) is None:
dst[slug] = dict()
dst = dst[slug]
if v is not None or not isinstance(dst.get(k[-1]), Mapping):
dst[k[-1]] = v
return root
class NotFoundError(Exception):
pass
class Client(object):
"""Web client object for raw web requests"""
auth_t = "{base_url}/auth"
list_t = "{base_url}/json"
dataset_t = "{base_url}/json/bundle/{ref}"
partition_t = "{base_url}/json/partition/{ref}"
file_t = "{base_url}/file/{ref}.{ct}"
test_t = "{base_url}/auth-test"
resolve_t = "{base_url}/resolve/{ref}"
def __init__(self, url, username=None, secret = None):
self._url = url
self.username = username
self.secret = secret
@property
def library(self):
"""The Library is just a subclass of the Client"""
return Library(self._url, self.username, self.secret)
def test(self, **kwargs):
"""
Test the connection and authentication
:param ref:
:return:
"""
return self._put(self.test_t, data=kwargs)
def list(self):
""" Return a list of all of the datasets in the library
:return: dict
"""
o = self._get(self.list_t)
return [ Dataset( self, b ) for b in o['bundles'] ]
def dataset(self, ref):
"""
Return a dataset, given a vid, id, name or vname
:param ref:
:return:
"""
o = self._get(self.dataset_t, ref=ref )
return Dataset(self, o['dataset'], partitions=o['partitions'], detailed = True)
def bundle(self, ref):
"""
Return a bundle, given a vid, id, name or vname. A bundle is a dataset with additional interfaces
:param ref:
:return:
"""
o = self._get(self.dataset_t, ref=ref)
return Bundle(self, o['dataset'], partitions=o['partitions'], detailed=True)
def partition(self, ref):
"""
Return a partition, fiven a vid, id, name or vname
:param ref:
:return:
"""
o = self._get(self.partition_t, ref=ref)
return Partition(self, o['partition'])
def resolve(self, ref):
"""Return information about either a partition or a bundle
:param ref: Any kind of bundle or partition reference
:return: (bundle, partition). If the ref is to a bundle, the partition will be None
"""
o = self._get(self.resolve_t, ref=ref)
if 'partition' in o:
return (Bundle(self, o['bundle'], partitions=None, detailed=False),
Partition(self, o['partition']) )
else:
return (Bundle(self, o['bundle'], partitions=None, detailed=False), False )
def search(self, query):
"""
Return collections of datasets and partitions that match the given search query
:param query:
:return:
"""
def streamed_file(self, ref, ct):
url = self._make_url(self.file_t, ref=ref, ct=ct)
r = requests.get(url, stream = True)
r.raise_for_status()
for line in r.iter_lines():
yield line
def streamed_file(self, ref, ct):
url = self._make_url(self.file_t, ref=ref, ct=ct)
r = requests.get(url, stream=True, headers=self._headers())
r.raise_for_status()
for line in r.iter_lines():
yield line
def _headers(self, **kwargs):
from jose import jwt
h = {}
if self.username and self.secret:
t = jwt.encode({'u': self.username }, self.secret, algorithm='HS256')
h['Authorization'] = "JWT {}:{}".format(self.username,t)
for k, v in kwargs.items():
k = k.replace('_', '-').capitalize()
h[k] = v
return h
def _process_status(self, r):
if r.status_code == 404:
raise NotFoundError("Not found: {}".format(r.request.url))
try:
r.raise_for_status()
except:
raise
def _put(self, template, data, **kwargs):
import json
url = self._make_url(template, **kwargs)
r = requests.put(url, data=json.dumps(data), headers=self._headers(content_type="application/json"))
self._process_status(r)
return r.json()
def _get(self, template, **kwargs):
url = self._make_url(template, **kwargs)
r = requests.get(url, headers=self._headers())
self._process_status(r)
if r.headers['content-type'] == 'application/json':
return r.json()
else:
return r.content
def _delete(self, template, **kwargs):
url = self._make_url(template, **kwargs)
r = requests.delete(url, headers=self._headers())
self._process_status(r)
if r.headers['content-type'] == 'application/json':
return r.json()
else:
return r.content
def _post_file(self, path, template, **kwargs):
import os
headers = self._headers(
content_type="application/json",
content_length=os.path.getsize(path),
content_transfer_encoding='binary'
)
url = self._make_url(template, **kwargs)
with open(path, 'rb') as f:
r = requests.post(url, data=f, headers=headers)
self._process_status(r)
return r.json()
def _make_url(self, template, **kwargs):
return template.format(base_url=self._url.strip('/'), **kwargs)
class Dataset(AttrDict):
def __init__(self, client, d, partitions = None, detailed = False):
"""
:param client:
:param d:
:param partitions:
:param detailed: If true, the partitions have already been loaded
:return:
"""
super(Dataset, self).__init__(d)
if partitions:
self._partitions = [ Partition(client, p) for p in partitions ]
else:
self._partitions = None
self.__client = client
self.__detailed = detailed
@property
def partitions(self):
if self._partitions:
return self._partitions
elif not self.__detailed:
detailed = self.detailed
return detailed.partitions
else:
return []
@property
def detailed(self):
"""
Refetch the dataset, with all of it's details. Use when expanding a dataset entry
from the list
:return: Dataset """
return self.__client.dataset(self.vid)
@property
def bundle(self):
"""
Refetch the dataset, with all of it's details. Use when expanding a dataset entry
from the list
:return: Dataset """
return self.__client.bundle(self.vid)
class Partition(AttrDict):
def __init__(self, client, d):
super(Partition, self).__init__(d)
self.__client = client
def rows(self):
"""Return an iterator over rows of the data file. The first row is the headers.
Unlike iterating over the CSV file, these rows will have data types that match the schema.
FIXME: Dates are probably broken, though.
"""
raise Exception()
import msgpack
class StreamedBuf(object):
def __init__(self, client, vid):
self.g = client.streamed_file(vid, 'mpack')
def read(self,i):
return next(self.g)
buf = StreamedBuf(self.__client, self.vid)
unpacker = msgpack.Unpacker(buf)
for unpacked in unpacker:
yield unpacked
@property
def csv_lines(self):
"""Return data, as CSV rows"""
for row in self.__client.streamed_file(self.vid, 'csv'):
yield row
def write_csv(self, path, cb=None):
"""Write CSV data to a file or fle-like object"""
import os
with open(path, 'w') as f:
for i, row in enumerate(self.__client.streamed_file(self.vid, 'csv')):
f.write(row)
f.write(os.linesep)
if i % 10000 == 0 and cb:
cb(i)
def write_json_meta(self, path):
"""Write CSV data to a file or file-like object"""
import os
import json
with open(path, 'w') as f:
json.dump(self.dict, f, indent=4)
from .bundle import Bundle
from .library import Library | {
"repo_name": "CivicKnowledge/ambry-client",
"path": "ambry_client/__init__.py",
"copies": "1",
"size": "10477",
"license": "bsd-2-clause",
"hash": -1852897045241049000,
"line_mean": 26.0051546392,
"line_max": 108,
"alpha_frac": 0.5505392765,
"autogenerated": false,
"ratio": 4.0003818251240935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0024350789470604316,
"num_lines": 388
} |
# AMDG
from datetime import datetime
import locale
locale.setlocale(locale.LC_ALL, '')
class Operators(object):
operators = ('+', '-')
@staticmethod
def valid(op):
return op in Operators.operators
class ColumnPositions(object):
AMOUNT = 0
CATEGORY = 1
DESCRIPTION = 2
VENDOR = 3
METHOD = 4
DATE = 5
class Columns(object):
AMOUNT = 'amount'
CATEGORY = 'category'
DESCRIPTION = 'description'
VENDOR = 'vendor'
METHOD = 'method'
DATE = 'date'
@staticmethod
def exists(category):
if category in (Columns.AMOUNT,
Columns.CATEGORY,
Columns.DESCRIPTION,
Columns.VENDOR,
Columns.METHOD,
Columns.DATE):
return True
return False
class Dates(object):
default_format = '%m%d%y'
supported_formats = (default_format,)
@staticmethod
def get_date(date_str):
for fmt in Dates.supported_formats:
try:
parsed_date = datetime.strptime(date_str, fmt)
except ValueError:
continue
else: # date conforms to one of the supported formats
return parsed_date
return None # should only get here if all supported formats fail
NUM_COLUMNS = 6
class Entry(object):
def __init__(self, eid, raw_entry):
ok, msg = self._parse(raw_entry)
if not ok:
raise ValueError("Not a valid entry. Error: %s" % msg)
self._eid = eid
def __str__(self):
symbol = '+' if self.amount >= 0 else ''
amount = locale.currency(self.amount, symbol=False)
amount = symbol + amount
return ':'.join([amount,
self.category,
self.description,
self.vendor,
self.method,
self.date.strftime(Dates.default_format)])
def __repr__(self):
return "Entry(" + str(self.eid) + "," + str(self).__str__() + ")"
def _parse(self, raw_entry):
parts = raw_entry.split(':')
# ensure correct number of columns
length = len(parts)
if length != NUM_COLUMNS:
return (False,
"Expected %s columns, found %s" % (NUM_COLUMNS, length,))
# check that operator is supported
if not Operators.valid(raw_entry[0]):
return (False, "Invalid operator: '%s'" % raw_entry[0])
# check that amount is a number
try:
self._amount = float(parts[ColumnPositions.AMOUNT])
except ValueError:
return (False,
"Amount is not a number: %s" % parts[ColumnPositions.AMOUNT])
# check if date format is valid
self._date = Dates.get_date(parts[ColumnPositions.DATE])
if self._date is None:
return (False,
"Invalid date format: %s" % parts[ColumnPositions.DATE])
self._category = parts[ColumnPositions.CATEGORY]
self._description = parts[ColumnPositions.DESCRIPTION]
self._vendor = parts[ColumnPositions.VENDOR]
self._method = parts[ColumnPositions.METHOD]
return (True, '')
def has_field(self, column):
if column == 'eid':
return True
return Columns.exists(column)
@property
def eid(self):
return self._eid
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def category(self):
return self._category
@property
def description(self):
return self._description
@property
def vendor(self):
return self._vendor
@property
def method(self):
return self._method
@property
def date(self):
return self._date
| {
"repo_name": "pilliq/balance",
"path": "balance/entry.py",
"copies": "1",
"size": "3910",
"license": "mit",
"hash": -7062560903028059000,
"line_mean": 25.4189189189,
"line_max": 77,
"alpha_frac": 0.5557544757,
"autogenerated": false,
"ratio": 4.2685589519650655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005580961087778585,
"num_lines": 148
} |
# AMDG
from entry import Entry
COMMENT_STR='#'
class BasicLoader(object):
def __init__(self, filename):
self.filename = filename
pass
def load(self, return_errors=False):
entries = []
errors = []
with open(self.filename, 'r') as f:
for i, line in enumerate(f):
if line[0] == COMMENT_STR or line == '':
continue
try:
entries.append(Entry(i+1, line.strip()))
except ValueError as e:
errors.append({'error': e, 'entry': line})
continue
if return_errors:
return entries, errors
return entries
class RepayLoader(object):
methods = ('credit', 'mcash', 'mcheck')
def __init__(self, filename):
self.filename = filename
def load(self, return_errors=False):
entries = []
errors = []
with open(self.filename, 'r') as fp:
for i, line in enumerate(fp):
if line[0] == COMMENT_STR:
try:
entry = Entry(i+1, line[1:].strip())
except ValueError as e:
errors.append({'error': e, 'entry': line})
continue
if entry.category == 'repay':
entries.append(entry)
else:
try:
entry = Entry(i+1, line.strip())
except ValueError as e:
errors.append({'error': e, 'entry': line})
continue
if entry.method in RepayLoader.methods:
entries.append(entry)
if return_errors:
return entries, errors
return entries
| {
"repo_name": "pilliq/balance",
"path": "balance/loaders.py",
"copies": "1",
"size": "1827",
"license": "mit",
"hash": -1789382149879327200,
"line_mean": 30.5,
"line_max": 66,
"alpha_frac": 0.4553913519,
"autogenerated": false,
"ratio": 4.7952755905511815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023547254150702427,
"num_lines": 58
} |
# AMDG
import logging
from balance_book import BalanceBook
from itertools import islice
class RepayBook(object):
"""
RepayBook keeps track of entries that have been and need to be repaid to
other people. It keeps these two types of entries in separate BalanceBooks.
When both types of entries are combined into one BalanceBook or list,
entries that have been repaid are negative, while entries that need to be
repaid are positive.
"""
epsilon = 0.00000001
def __init__(self, entries):
self.logger = logging.getLogger(type(self).__name__)
self._raw_entries = entries
self._repay, self._repaid = self._split_entries()
self._expand_repaid()
self._positize_repay()
def _float_equals(self, a, b):
return abs(a - b) < self.epsilon
def _split_entries(self):
repay = []
repaid = []
for e in self._raw_entries:
if e.category == 'repay':
repaid.append(e)
else:
repay.append(e)
return BalanceBook(repay), BalanceBook(repaid)
def _parse_eid(self, raw):
if raw[0] == '#':
try:
return int(raw[1:])
except ValueError:
return None
return None
def _expand_repaid(self):
for e in islice(self._repaid.entries, 0, len(self._repaid.entries)):
repaid_total = 0
for eid_str in e.description.split(' '):
eid = self._parse_eid(eid_str)
if eid is not None:
results = self._repay.filter(eid=eid)
if len(results) == 0:
self.logger.warning(
"Could not find repay entry for eid=%d", eid
)
continue
repay_entry = results[0]
repaid_total += abs(repay_entry.amount)
self._repay.remove(eid=repay_entry.eid)
ok = self._repaid.add(repay_entry)
if not ok:
self.logger.warning(
"Duplicate entry. eid=%d", repay_entry.eid
)
if not self._float_equals(repaid_total, abs(e.amount)):
self.logger.warning(
"Total repay amount does not match total amount of items in repay description. eid=%d expected_repay_amount=%.2f actual_repay_amount=%.2f",
e.eid,
abs(e.amount),
repaid_total
)
self._repaid.remove(eid=e.eid)
def _positize_repay(self):
"""
Make amounts in self.repay positive
"""
for e in self._repay.entries:
e.amount = abs(e.amount)
@property
def entries(self):
return self._repay.entries + self._repaid.entries
@property
def repay(self):
"""
BalanceBook of entries that need to be repaid. Amounts are positive.
"""
return self._repay
@property
def repaid(self):
"""
BalanceBook of entries that have been repaid. Amounts are negative.
"""
return self._repaid
| {
"repo_name": "pilliq/balance",
"path": "balance/repay_book.py",
"copies": "1",
"size": "3259",
"license": "mit",
"hash": 4639831805036486000,
"line_mean": 32.5979381443,
"line_max": 159,
"alpha_frac": 0.5250076711,
"autogenerated": false,
"ratio": 4.125316455696202,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016153562931913448,
"num_lines": 97
} |
# AMDG
import logging
class BalanceBook(object):
def __init__(self, entries):
self.logger = logging.getLogger(type(self).__name__)
self._entries = self._group_by_eid(entries)
def _group_by_eid(self, entries):
eids = {}
for e in entries:
eids[e.eid] = e
return eids
def _remove(self, eid):
del self._entries[eid]
def add(self, entry):
"""
Will return False if entry has same eid as another entry in the
BalanceBook. Else, will add to book and return True.
"""
if entry.eid in self._entries:
return False
self._entries[entry.eid] = entry
return True
@property
def entries(self):
return self._entries.values()
def print_out(self):
for e in self.entries:
print(e)
def remove(self, **kwargs):
entries = self.filter(**kwargs)
for e in entries:
self._remove(e.eid)
def filter(self, **kwargs):
result = []
for e in self.entries:
passes = True
for key, value in kwargs.iteritems():
if e.has_field(key):
if not getattr(e, key) == value:
passes = False
else:
passes = False
if passes:
result.append(e)
return result
| {
"repo_name": "pilliq/balance",
"path": "balance/balance_book.py",
"copies": "1",
"size": "1406",
"license": "mit",
"hash": -2696497883610881500,
"line_mean": 25.037037037,
"line_max": 71,
"alpha_frac": 0.512802276,
"autogenerated": false,
"ratio": 4.197014925373134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006858710562414266,
"num_lines": 54
} |
# AMDG
import unittest
from balance import (
BasicAnalyzer,
BasicLoader,
BalanceBook,
RepayLoader,
RepayBook
)
from base_test import BaseTest
class BasicAnalyzerTests(BaseTest, unittest.TestCase):
def setUp(self):
self.loader = BasicLoader('tests/data/basic_analyzer')
self.bbook = BalanceBook(self.loader.load())
self.analyzer = BasicAnalyzer(self.bbook)
self.rloader = RepayLoader('tests/data/basic_analyzer')
self.rbook = RepayBook(self.rloader.load())
self.ranalyzer = BasicAnalyzer(self.rbook)
def test_balance(self):
self.assertEquals(786.10, self.analyzer.balance())
# one +13, and one -13 entry
self.assertEquals(0, self.ranalyzer.balance())
def test_spend(self):
self.assertEquals(213.90, self.analyzer.spent())
self.assertEquals(13.00, self.ranalyzer.spent())
def test_gained(self):
self.assertEquals(1000.00, self.analyzer.gained())
self.assertEquals(13.00, self.ranalyzer.gained())
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "pilliq/balance",
"path": "tests/test_analyzers.py",
"copies": "1",
"size": "1088",
"license": "mit",
"hash": -3314672420058923500,
"line_mean": 29.2222222222,
"line_max": 63,
"alpha_frac": 0.6626838235,
"autogenerated": false,
"ratio": 3.5324675324675323,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46951513559675323,
"avg_score": null,
"num_lines": null
} |
# AMDG
import unittest
from datetime import datetime
from balance import BasicLoader, RepayLoader
from base_test import BaseTest
class LoaderTests(BaseTest, unittest.TestCase):
def test_basic_loader(self):
loader = BasicLoader('tests/data/basic_loader')
entries, errors = loader.load(return_errors=True)
self.assertEquals(1, len(entries))
entry = entries[0]
self.assertEquals(-5.00, entry.amount)
self.assertEquals(2, len(errors))
self.assertEquals(errors[0]['entry'], '\n')
self.assertTrue(errors[0]['error'].message.startswith('Not a valid entry'))
self.assertEquals(errors[1]['entry'], 'this is a bad line:\n')
self.assertTrue(errors[1]['error'].message.startswith('Not a valid entry'))
def test_repay_loader(self):
loader = RepayLoader('tests/data/repay_loader')
entries, errors = loader.load(return_errors=True)
self.assertEquals(4, len(entries))
entry = entries.pop()
self.assertEquals(-11.00, entry.amount)
self.assertEquals('repay', entry.category)
self.assertEquals('#2', entry.description)
self.assertEquals('Joe', entry.vendor)
self.assertEquals('cash', entry.method)
self.assertEquals(datetime(2014,10,3), entry.date)
for e in entries:
self.assertTrue(e.method in RepayLoader.methods)
self.assertEquals(2, len(errors))
self.assertEquals(errors[0]['entry'], '#hello\n')
self.assertTrue(errors[0]['error'].message.startswith('Not a valid entry'))
self.assertEquals(errors[1]['entry'], 'bad line\n')
self.assertTrue(errors[1]['error'].message.startswith('Not a valid entry'))
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "pilliq/balance",
"path": "tests/test_loaders.py",
"copies": "1",
"size": "1764",
"license": "mit",
"hash": -1669557820197150700,
"line_mean": 42.0243902439,
"line_max": 83,
"alpha_frac": 0.6541950113,
"autogenerated": false,
"ratio": 3.876923076923077,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5031118088223077,
"avg_score": null,
"num_lines": null
} |
# A measure performed and recorded on an individual state
import collections
import yaml
import importlib
import functools
import funtool.analysis
import funtool.state_collection
import funtool.logger
import funtool.lib.config_parse
import funtool.lib.general
import datetime
StateMeasure = collections.namedtuple('StateMeasure',['name','measure_module','measure_function','analysis_selectors','grouping_selectors','parameters'])
# A StateMeasure is used with an AnalysisCollection and a StateCollection to measure each State in the StateCollection
# Each State will have a new key added to it's measures (or updated if the key already exists)
# After measuring the state, an AnalysisCollection is returned
#
# name a string identifying the measure ( from the key in the YAML measure definition )
# measure_module a string indicating where the measure_function is defined
# measure_function a string with the name of the function which measures the state
# analysis_selectors a list of names of selectors which are run sequentially to update the AnalysisCollection
# grouping_selectors a list of names of grouping_selectors which are used to create groups in the StateCollection before any analysis is run
# parameters a dict of parameters passed to the measure
#
# StateMeasures are run through a loop during the actual analysis ( created in the _wrap_measure function ). The full StateMeasure process returns
# a state_collection
def state_measure_process(state_measure, loaded_processes): #returns a function, that accepts a state_collection, to be used as a process
return _wrap_measure(individual_state_measure_process(state_measure), state_measure, loaded_processes)
import_config= functools.partial(funtool.lib.config_parse.import_config, StateMeasure, state_measure_process)
def individual_state_measure_process(state_measure): #returns a function that accepts an analysis_collection and a state_collection
state_measure_module = importlib.import_module(state_measure.measure_module)
return functools.partial( getattr(state_measure_module,state_measure.measure_function), state_measure )
def _wrap_measure(individual_state_measure_process, state_measure, loaded_processes):
"""
Creates a function on a state_collection, which creates analysis_collections for each state in the collection.
Optionally sorts the collection if the state_measure has a sort_by parameter (see funtool.lib.general.sort_states for details)
"""
def wrapped_measure(state_collection,overriding_parameters=None,loggers=None):
if loggers == None:
loggers = funtool.logger.set_default_loggers()
if loaded_processes != None :
if state_measure.grouping_selectors != None:
for grouping_selector_name in state_measure.grouping_selectors:
state_collection= funtool.state_collection.add_grouping(state_collection, grouping_selector_name, loaded_processes)
states= state_collection.states
measure_parameters= get_measure_parameters(state_measure, overriding_parameters)
if 'sort_by' in measure_parameters.keys():
states= funtool.lib.general.sort_states(states, measure_parameters['sort_by'])
for state_index,state in enumerate(states):
step_size= len(states)//20
if state_index % step_size == 0:
loggers.status_logger.warn("{}: {} %".format( datetime.datetime.now(), round((state_index/len(states) * 100 ), 1) ) )
analysis_collection = funtool.analysis.AnalysisCollection(state,None,{},{})
if state_measure.analysis_selectors != None:
for analysis_selector in state_measure.analysis_selectors:
analysis_collection = loaded_processes["analysis_selector"][analysis_selector].process_function(analysis_collection,state_collection)
if analysis_collection != None:
individual_state_measure_process(analysis_collection,state_collection,overriding_parameters)
return state_collection
return wrapped_measure
def state_and_parameter_measure(state_measure_function):
"""
Decorator for State Measures that only require the state and parameters
Saves return value to state.measures with the State Measure's name as the key
"""
def wrapped_function(state_measure, analysis_collection, state_collection, overriding_parameters=None):
measure_parameters = get_measure_parameters(state_measure, overriding_parameters)
measure_value= state_measure_function(analysis_collection.state,measure_parameters)
analysis_collection.state.measures[state_measure.name] = measure_value
return analysis_collection
return wrapped_function
def state_and_parameter_meta(state_measure_function):
"""
Decorator for State Measures that only require the state and parameters
Saves return value to state.meta with the State Measure's name as the key
"""
def wrapped_function(state_measure, analysis_collection, state_collection, overriding_parameters=None):
measure_parameters = get_measure_parameters(state_measure, overriding_parameters)
measure_value= state_measure_function(analysis_collection.state,measure_parameters)
analysis_collection.state.meta[state_measure.name] = measure_value
return analysis_collection
return wrapped_function
def state_and_parameter_data(state_measure_function):
"""
Decorator for State Measures that only require the state and parameters
Saves return value to state.data with the State Measure's name as the key
"""
def wrapped_function(state_measure, analysis_collection, state_collection, overriding_parameters=None):
measure_parameters = get_measure_parameters(state_measure, overriding_parameters)
measure_value= state_measure_function(analysis_collection.state,measure_parameters)
analysis_collection.state.data[state_measure.name] = measure_value
return analysis_collection
return wrapped_function
def analysis_collection_and_parameter_measure(state_measure_function):
"""
Decorator for State Measures that only require the analysis_collection and parameters
Saves return value to analysis_collection.state.measures with the State Measure's name as the key
"""
def wrapped_function(state_measure, analysis_collection, state_collection, overriding_parameters=None):
measure_parameters = get_measure_parameters(state_measure, overriding_parameters)
measure_value= state_measure_function(analysis_collection,measure_parameters)
analysis_collection.state.measures[state_measure.name] = measure_value
return analysis_collection
return wrapped_function
def analysis_collection_and_parameter_meta(state_measure_function):
"""
Decorator for State Measures that only require the analysis_collection and parameters
Saves return value to analysis_collection.state.meta with the State Measure's name as the key
"""
def wrapped_function(state_measure, analysis_collection, state_collection, overriding_parameters=None):
measure_parameters = get_measure_parameters(state_measure, overriding_parameters)
measure_value= state_measure_function(analysis_collection,measure_parameters)
analysis_collection.state.meta[state_measure.name] = measure_value
return analysis_collection
return wrapped_function
def analysis_collection_and_parameter_data(state_measure_function):
"""
Decorator for State Measures that only require the analysis_collection and parameters
Saves return value to analysis_collection.state.data with the State Measure's name as the key
"""
def wrapped_function(state_measure, analysis_collection, state_collection, overriding_parameters=None):
measure_parameters = get_measure_parameters(state_measure, overriding_parameters)
measure_value= state_measure_function(analysis_collection,measure_parameters)
analysis_collection.state.data[state_measure.name] = measure_value
return analysis_collection
return wrapped_function
get_measure_parameters= funtool.lib.general.get_parameters
| {
"repo_name": "pjanis/funtool",
"path": "funtool/state_measure.py",
"copies": "2",
"size": "8334",
"license": "mit",
"hash": -4782947655590415000,
"line_mean": 53.4705882353,
"line_max": 157,
"alpha_frac": 0.7426205904,
"autogenerated": false,
"ratio": 4.478237506716819,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016946733824305373,
"num_lines": 153
} |
# A measure performed and recorded on an individual state
import collections
import yaml
import importlib
import functools
import funtool.state_collection
import funtool.analysis
import funtool.logger
import funtool.lib.config_parse
GroupMeasure = collections.namedtuple('GroupMeasure',['name','measure_module','measure_function','analysis_selectors','grouping_selectors','parameters'])
# A GroupMeasure is used with an AnalysisCollection and a StateCollection to measure a Group in the StateCollection
# Unlike State measures, a grouping selector is required
# Each member of the grouping will have a new key added to it's measures (or updated if the key already exists)
#
# name a string identifying the measure ( from the key in the YAML measure definition )
# measure_module a string indicating where the measure_function is defined
# measure_function a string with the name of the function which measures the group
# analysis_selectors a list of selectors which are run sequentially to update the AnalysisCollection
# grouping_selectors a list of group_selectors which are used to create groups in the StateCollection before any analysis is run
# parameters a dict of parameters passed to the measure
#
# After measuring each member of the grouping, the measure process returns a StateCollection
def group_measure_process(group_measure, loaded_processes): #returns a function, that accepts a state_collection, to be used as a process
return _wrap_measure(individual_group_measure_process(group_measure), group_measure, loaded_processes)
import_config= functools.partial(funtool.lib.config_parse.import_config, GroupMeasure, group_measure_process)
def individual_group_measure_process(group_measure): #returns a function that accepts an analysis_collection and a state_collection
group_measure_module = importlib.import_module(group_measure.measure_module)
return functools.partial( getattr(group_measure_module,group_measure.measure_function), group_measure )
def _wrap_measure(individual_group_measure_process, group_measure, loaded_processes):
"""
Creates a function on a state_collection, which creates analysis_collections for each group in the collection.
"""
def wrapped_measure(state_collection,overriding_parameters=None,loggers=None):
if loggers == None:
loggers = funtool.logger.set_default_loggers()
if loaded_processes != None :
if group_measure.grouping_selectors != None:
for grouping_selector_name in group_measure.grouping_selectors:
state_collection= funtool.state_collection.add_grouping(state_collection, grouping_selector_name, loaded_processes)
for group in funtool.state_collection.groups_in_grouping(state_collection, grouping_selector_name):
analysis_collection = funtool.analysis.AnalysisCollection(None,group,{},{})
if group_measure.analysis_selectors != None:
for analysis_selector in group_measure.analysis_selectors:
analysis_collection = loaded_processes["analysis_selector"][analysis_selector].process_function(analysis_collection,state_collection)
if analysis_collection != None:
individual_group_measure_process(analysis_collection,state_collection)
return state_collection
return wrapped_measure
def get_measure_parameters(group_measure, overriding_parameters):
measure_parameters= group_measure.parameters
if overriding_parameters != None:
if measure_parameters is None:
measure_parameters = {}
for param, val in overriding_parameters.items():
measure_parameters[param] = val
return measure_parameters
| {
"repo_name": "ActiveLearningLab/funtool",
"path": "funtool/group_measure.py",
"copies": "2",
"size": "3861",
"license": "mit",
"hash": -7889376942432989000,
"line_mean": 54.9565217391,
"line_max": 165,
"alpha_frac": 0.7275317275,
"autogenerated": false,
"ratio": 4.580071174377224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6307602901877224,
"avg_score": null,
"num_lines": null
} |
# A measure performed and recorded on an individual state
import collections
import yaml
import importlib
import functools
import measure.analysis
StateMeasure = collections.namedtuple('StateMeasure',['measure_module','measure_function','state_selectors','group_selectors','parameters'])
# A StateMeasure is used with an AnalysisCollection and a StateCollection to measure each State in the StateCollection
# Each State will have a new key added to it's measures (or updated if the key already exists)
# After measuring the state, an AnalysisCollection is returned
#
# measure_module a string indicating where the measure_function is defined
# measure_function a string with the name of the function which measures the state
# state_selectors a list of selectors which are run sequentially to update the AnalysisCollection
# group_selectors a list of group_selectors which are used to create groups in the StateCollection before any analysis is run
# parameters a dict of parameters passed to the measure
def import_config(config_file_location):
new_state_measures={}
with open(config_file_location) as f:
yaml_config= yaml.load(f)
for state_measure_name,state_measure_parameters in yaml_config.items():
new_state_measure= StateMeasure(**state_measure_parameters)
new_state_measures[state_measure_name]= ( new_state_measure, state_measure_process(new_state_measure))
# for ** explination https://docs.python.org/2/tutorial/controlflow.html#tut-unpacking-arguments
return new_state_measures
def state_measure_process(state_measure): #returns a function, that accepts a state_collection, to be used as a process
return _wrap_measure(individual_state_measure_process(state_measure))
def individual_state_measure_process(state_measure):
state_measure_module = importlib.import_module(state_measure.measure_module)
return functools.partial( getattr(state_measure_module,state_measure.measure_function), state_measure )
def _wrap_measure(individual_measure_process):
"""
Should be moved to MeasureSet when it's complete runs a measure over a statecollection instead of an analysis collection
"""
def wrapped_measure(state_collection):
for state in state_collection.states:
analysis_collection = measure.analysis.AnalysisCollection(state,None,[])
individual_measure_process(analysis_collection,state_collection)
return state_collection
return wrapped_measure
def _state_and_parameter_measure(state_measure_function):
def wrapped_function(state_measure, analysis_collection, state_collection, overriding_parameters=None):
measure_parameters = _get_measure_parameters(state_measure, overriding_parameters)
measure_value= state_measure_function(analysis_collection.state,measure_parameters)
analysis_collection.state.measures[measure_parameters.get('measure_name',state_measure_function.__name__)] = measure_value
return analysis_collection
return wrapped_function
def _get_measure_parameters(state_measure, overriding_parameters):
measure_parameters= state_measure.parameters
if overriding_parameters != None:
for param, val in overriding_parameters.items():
measure_parameters[param] = val
return measure_parameters
| {
"repo_name": "pjanis/funtool-development",
"path": "measure/measure/state_measure.py",
"copies": "1",
"size": "3371",
"license": "mit",
"hash": 2851232545256262000,
"line_mean": 47.1571428571,
"line_max": 140,
"alpha_frac": 0.7517057253,
"autogenerated": false,
"ratio": 4.294267515923567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5545973241223566,
"avg_score": null,
"num_lines": null
} |
"""A mechanism to load header files in separate processes using the `multiprocessing` module """
import logging
import os
from multiprocessing import Pool
from multiprocessing.pool import AsyncResult
from typing import Sequence, List, Optional, Tuple
from PyQt5 import QtCore
from sastool.io.credo_cct import Header
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def loadHeader(fsn: int, dirs: Sequence[str], headerfileformat: str) -> Tuple[int, Optional[Header]]:
for d in dirs:
try:
return (fsn, Header.new_from_file(os.path.join(d, headerfileformat.format(fsn))))
except FileNotFoundError:
continue
return (fsn, None)
class HeaderLoader(QtCore.QObject):
TIMERINTERVAL: int = 100 # milliseconds
pool: Optional[Pool]
outstanding: List[AsyncResult]
fsns: Sequence[int]
dirs: Sequence[str]
headerfileformat: str
results: List[Tuple[int, Optional[Header]]]
finished = QtCore.pyqtSignal() # loading finished.
progress = QtCore.pyqtSignal(int, int) # total count, ready count
timerid: int = 0
def __init__(self, fsns: Sequence[int], dirs: Sequence[str], headerfileformat: str):
super().__init__()
self.pool = None # do not create a pool yet
self.outstanding = []
self.fsns = fsns
self.dirs = dirs
self.headerfileformat = headerfileformat
self.results = []
@property
def idle(self) -> bool:
logger.debug('Outstanding jobs in header loader: {}'.format(len(self.outstanding)))
return not self.outstanding
def submit(self):
logger.debug('Submitting header loading jobs for {} fsns.'.format(len(self.fsns)))
self.pool = Pool()
self.results = []
self.outstanding = [self.pool.apply_async(loadHeader, [f, self.dirs, self.headerfileformat]) for f in self.fsns]
self.timerid = self.startTimer(self.TIMERINTERVAL)
self.progress.emit(len(self.fsns), 0)
def timerEvent(self, event: QtCore.QTimerEvent) -> None:
ready = [o for o in self.outstanding if o.ready()] # select those which are ready
if (not ready) and (self.outstanding):
# do not block the event loop
event.accept()
return
self.results.extend([r.get() for r in ready])
self.outstanding = [o for o in self.outstanding if o not in ready]
self.progress.emit(len(self.fsns), len(self.results))
if not self.outstanding:
# we have finished
self.pool.close()
self.pool.join()
self.pool = None
self.killTimer(self.timerid)
self.finished.emit()
event.accept()
def stop(self):
self.pool.terminate()
self.pool.join()
self.pool = None
self.outstanding = []
def isRunning(self) -> bool:
return bool(self.outstanding) or (self.pool is not None)
def headers(self) -> List[Header]:
return sorted([header for fsn, header in self.results if header is not None], key=lambda h: h.fsn)
def setFSNs(self, fsns:Sequence[int]):
if self.isRunning():
raise ValueError('Cannot set FSNs while running')
self.fsns = list(fsns)
self.results = []
self.outstanding = []
def setPath(self, path:List[str]):
self.dirs = list(path)
| {
"repo_name": "awacha/cct",
"path": "cct/processinggui/project/headerloader.py",
"copies": "1",
"size": "3394",
"license": "bsd-3-clause",
"hash": -1299650249425349600,
"line_mean": 34.3541666667,
"line_max": 120,
"alpha_frac": 0.6340601061,
"autogenerated": false,
"ratio": 3.8263810597519727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49604411658519726,
"avg_score": null,
"num_lines": null
} |
"""A medical image analysis pipeline.
The pipeline is used for brain tissue segmentation using a decision forest classifier.
"""
import argparse
import datetime
import os
import sys
import timeit
import SimpleITK as sitk
import numpy as np
from tensorflow.python.platform import app
from sklearn.mixture import GaussianMixture
from scipy import stats as scipy_stats
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path
# fixes the ModuleNotFoundError when executing main.py in the console after code changes (e.g. git pull)
# somehow pip install does not keep track of packages
import mialab.classifier.decision_forest as df
import mialab.data.conversion as conversion
import mialab.data.structure as structure
import mialab.data.loading as load
import mialab.utilities.file_access_utilities as futil
import mialab.utilities.pipeline_utilities as putil
import mialab.utilities.statistic_utilities as statistics
FLAGS = None # the program flags
IMAGE_KEYS = [structure.BrainImageTypes.T1, structure.BrainImageTypes.T2, structure.BrainImageTypes.GroundTruth] # the list of images we will load
TEST_BATCH_SIZE = 2 # 1..30, the higher the faster but more memory usage
NORMALIZE_FEATURES = False # Normalize feature vectors to mean 0 and std 1
def main(_):
"""Brain tissue segmentation using decision forests.
The main routine executes the medical image analysis pipeline:
- Image loading
- Registration
- Pre-processing
- Feature extraction
- Decision forest classifier model building
- Segmentation using the decision forest classifier model on unseen images
- Post-processing of the segmentation
- Evaluation of the segmentation
"""
# load atlas images
putil.load_atlas_images(FLAGS.data_atlas_dir)
print('-' * 5, 'Training...')
# generate a model directory (use datetime to ensure that the directory is empty)
# we need an empty directory because TensorFlow will continue training an existing model if it is not empty
t = datetime.datetime.now().strftime('%Y-%m-%d%H%M%S')
model_dir = os.path.join(FLAGS.model_dir, t)
os.makedirs(model_dir, exist_ok=True)
# crawl the training image directories
crawler = load.FileSystemDataCrawler(FLAGS.data_train_dir,
IMAGE_KEYS,
futil.BrainImageFilePathGenerator(),
futil.DataDirectoryFilter())
data_items = list(crawler.data.items())
train_data_size = len(data_items)
pre_process_params = {'zscore_pre': True,
'coordinates_feature': True,
'intensity_feature': True,
'gradient_intensity_feature': True}
start_time_total_train = timeit.default_timer()
batch_data = dict(data_items)
# load images for training and pre-process
images = putil.pre_process_batch(batch_data, pre_process_params, multi_process=True)
print('pre-processing done')
# generate feature matrix and label vector
data_train = np.concatenate([img.feature_matrix[0] for img in images])
labels_train = np.concatenate([img.feature_matrix[1] for img in images])
if NORMALIZE_FEATURES:
# normalize data (mean 0, std 1)
data_train = scipy_stats.zscore(data_train)
start_time = timeit.default_timer()
# Gaussian mixture model
# ##############################################################################################################
thegmm = GaussianMixture(n_components=3, covariance_type='tied')
thegmm.fit(data_train, labels_train)
# ##############################################################################################################
print(' Time elapsed:', timeit.default_timer() - start_time, 's')
time_total_train = timeit.default_timer() - start_time_total_train
start_time_total_test = timeit.default_timer()
print('-' * 5, 'Testing...')
result_dir = os.path.join(FLAGS.result_dir, t)
os.makedirs(result_dir, exist_ok=True)
# initialize evaluator
evaluator = putil.init_evaluator(result_dir)
# crawl the training image directories
crawler = load.FileSystemDataCrawler(FLAGS.data_test_dir,
IMAGE_KEYS,
futil.BrainImageFilePathGenerator(),
futil.DataDirectoryFilter())
data_items = list(crawler.data.items())
all_probabilities = None
for batch_index in range(0, len(data_items), TEST_BATCH_SIZE):
# slicing manages out of range; no need to worry
batch_data = dict(data_items[batch_index: batch_index + TEST_BATCH_SIZE])
# load images for testing and pre-process
pre_process_params['training'] = False
images_test = putil.pre_process_batch(batch_data, pre_process_params, multi_process=True)
images_prediction = []
images_probabilities = []
for img in images_test:
print('-' * 10, 'Testing', img.id_)
start_time = timeit.default_timer()
# ##############################################################################################################
features = img.feature_matrix[0]
if NORMALIZE_FEATURES:
features = scipy_stats.zscore(features)
predictions = thegmm.predict(features)
probabilities = thegmm.predict_proba(features)
if all_probabilities is None:
all_probabilities = np.array([probabilities])
else:
all_probabilities = np.concatenate((all_probabilities, [probabilities]), axis=0)
# ##############################################################################################################
print(' Time elapsed:', timeit.default_timer() - start_time, 's')
# convert prediction and probabilities back to SimpleITK images
image_prediction = conversion.NumpySimpleITKImageBridge.convert(predictions.astype(np.uint8),
img.image_properties)
image_probabilities = conversion.NumpySimpleITKImageBridge.convert(probabilities, img.image_properties)
# evaluate segmentation without post-processing
evaluator.evaluate(image_prediction, img.images[structure.BrainImageTypes.GroundTruth], img.id_)
images_prediction.append(image_prediction)
images_probabilities.append(image_probabilities)
# post-process segmentation and evaluate with post-processing
post_process_params = {'crf_post': True}
images_post_processed = putil.post_process_batch(images_test, images_prediction, images_probabilities,
post_process_params, multi_process=True)
for i, img in enumerate(images_test):
evaluator.evaluate(images_post_processed[i], img.images[structure.BrainImageTypes.GroundTruth],
img.id_ + '-PP')
# save results
sitk.WriteImage(images_prediction[i], os.path.join(result_dir, images_test[i].id_ + '_SEG.mha'), True)
sitk.WriteImage(images_post_processed[i], os.path.join(result_dir, images_test[i].id_ + '_SEG-PP.mha'), True)
time_total_test = timeit.default_timer() - start_time_total_test
# write summary of parameters to results dir
with open(os.path.join(result_dir, 'summary.txt'), 'w') as summary_file:
print('Result dir: {}'.format(result_dir))
print('Result dir: {}'.format(result_dir), file=summary_file)
print('Training data size: {}'.format(train_data_size), file=summary_file)
print('Total training time: {:.1f}s'.format(time_total_train), file=summary_file)
print('Total testing time: {:.1f}s'.format(time_total_test), file=summary_file)
print('Voxel Filter Mask: {}'.format(putil.FeatureExtractor.VOXEL_MASK_FLT), file=summary_file)
print('Normalize Features: {}'.format(NORMALIZE_FEATURES), file=summary_file)
print('GMM', file=summary_file)
stats = statistics.gather_statistics(os.path.join(result_dir, 'results.csv'))
print('Result statistics:', file=summary_file)
print(stats, file=summary_file)
# all_probabilities.astype(np.float16).dump(os.path.join(result_dir, 'all_probabilities.npy'))
if __name__ == "__main__":
"""The program's entry point."""
script_dir = os.path.dirname(sys.argv[0])
parser = argparse.ArgumentParser(description='Medical image analysis pipeline for brain tissue segmentation')
parser.add_argument(
'--model_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-model')),
help='Base directory for output models.'
)
parser.add_argument(
'--result_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-result')),
help='Directory for results.'
)
parser.add_argument(
'--data_atlas_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/atlas')),
help='Directory with atlas data.'
)
parser.add_argument(
'--data_train_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/train/')),
help='Directory with training data.'
)
parser.add_argument(
'--data_test_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/test/')),
help='Directory with testing data.'
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| {
"repo_name": "mrunibe/MIALab",
"path": "bin/main_GMM.py",
"copies": "1",
"size": "9906",
"license": "apache-2.0",
"hash": 724993540823667300,
"line_mean": 41.5150214592,
"line_max": 147,
"alpha_frac": 0.6143751262,
"autogenerated": false,
"ratio": 4.149979053204859,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5264354179404859,
"avg_score": null,
"num_lines": null
} |
"""A medical image analysis pipeline.
The pipeline is used for brain tissue segmentation using a decision forest classifier.
"""
import argparse
import datetime
import os
import sys
import timeit
import SimpleITK as sitk
import numpy as np
from tensorflow.python.platform import app
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path
# fixes the ModuleNotFoundError when executing main.py in the console after code changes (e.g. git pull)
# somehow pip install does not keep track of packages
import mialab.classifier.decision_forest as df
import mialab.data.conversion as conversion
import mialab.data.structure as structure
import mialab.data.loading as load
import mialab.utilities.file_access_utilities as futil
import mialab.utilities.pipeline_utilities as putil
import mialab.utilities.statistic_utilities as statistics
FLAGS = None # the program flags
IMAGE_KEYS = [structure.BrainImageTypes.T1, structure.BrainImageTypes.T2, structure.BrainImageTypes.GroundTruth] # the list of images we will load
TRAIN_BATCH_SIZE = 5 # 1..70, the higher the faster but more memory usage
TEST_BATCH_SIZE = 5 # 1..30, the higher the faster but more memory usage
USE_PREPROCESS_CACHE = False # cache pre-processed images
def main(_):
"""Brain tissue segmentation using decision forests.
The main routine executes the medical image analysis pipeline:
- Image loading
- Registration
- Pre-processing
- Feature extraction
- Decision forest classifier model building
- Segmentation using the decision forest classifier model on unseen images
- Post-processing of the segmentation
- Evaluation of the segmentation
"""
# load atlas images
putil.load_atlas_images(FLAGS.data_atlas_dir)
print('-' * 5, 'Training...')
# generate a model directory (use datetime to ensure that the directory is empty)
# we need an empty directory because TensorFlow will continue training an existing model if it is not empty
t = datetime.datetime.now().strftime('%Y-%m-%d%H%M%S')
model_dir = os.path.join(FLAGS.model_dir, t)
os.makedirs(model_dir, exist_ok=True)
# crawl the training image directories
crawler = load.FileSystemDataCrawler(FLAGS.data_train_dir,
IMAGE_KEYS,
futil.BrainImageFilePathGenerator(),
futil.DataDirectoryFilter())
data_items = list(crawler.data.items())
pre_process_params = {'zscore_pre': True,
'coordinates_feature': True,
'intensity_feature': True,
'gradient_intensity_feature': True}
# initialize decision forest parameters
df_params = df.DecisionForestParameters()
df_params.num_classes = 4
df_params.num_trees = 3
df_params.max_nodes = 2000
df_params.model_dir = model_dir
forest = None
start_time_total_train = timeit.default_timer()
for batch_index in range(0, len(data_items), TRAIN_BATCH_SIZE):
cache_file_prefix = os.path.normpath(os.path.join(script_dir, './mia-cache/batch-' + str(batch_index) + '-' + str(TRAIN_BATCH_SIZE)))
cache_file_train = cache_file_prefix + '-data_train.npy'
cache_file_labels = cache_file_prefix + '-data_labels.npy'
if(USE_PREPROCESS_CACHE & os.path.exists(cache_file_train)):
print('Using cache from ', cache_file_train)
data_train = np.load(cache_file_train)
labels_train = np.load(cache_file_labels)
else:
# slicing manages out of range; no need to worry
batch_data = dict(data_items[batch_index: batch_index+TRAIN_BATCH_SIZE])
# load images for training and pre-process
images = putil.pre_process_batch(batch_data, pre_process_params, multi_process=True)
print('pre-processing done')
# generate feature matrix and label vector
data_train = np.concatenate([img.feature_matrix[0] for img in images])
labels_train = np.concatenate([img.feature_matrix[1] for img in images])
if(USE_PREPROCESS_CACHE):
print('Writing cache')
if(not os.path.exists(os.path.dirname(cache_file_prefix))):
os.mkdir(os.path.dirname(cache_file_prefix))
data_train.dump(cache_file_train)
labels_train.dump(cache_file_labels)
if forest is None:
df_params.num_features = data_train.shape[1]
forest = df.DecisionForest(df_params)
start_time = timeit.default_timer()
# ##############################################################################################################
print(data_train)
print(np.shape(data_train))
data_train = np.delete(data_train, 1, 1)
print(data_train)
print(np.shape(data_train))
# ##############################################################################################################
forest.train(data_train, labels_train)
print(' Time elapsed:', timeit.default_timer() - start_time, 's')
time_total_train = timeit.default_timer() - start_time_total_train
print('-' * 5, 'Testing...')
result_dir = os.path.join(FLAGS.result_dir, t)
os.makedirs(result_dir, exist_ok=True)
# initialize evaluator
evaluator = putil.init_evaluator(result_dir)
# crawl the training image directories
crawler = load.FileSystemDataCrawler(FLAGS.data_test_dir,
IMAGE_KEYS,
futil.BrainImageFilePathGenerator(),
futil.DataDirectoryFilter())
data_items = list(crawler.data.items())
for batch_index in range(0, len(data_items), TEST_BATCH_SIZE):
# slicing manages out of range; no need to worry
batch_data = dict(data_items[batch_index: batch_index + TEST_BATCH_SIZE])
# load images for testing and pre-process
pre_process_params['training'] = False
images_test = putil.pre_process_batch(batch_data, pre_process_params, multi_process=True)
images_prediction = []
images_probabilities = []
for img in images_test:
print('-' * 10, 'Testing', img.id_)
start_time = timeit.default_timer()
probabilities, predictions = forest.predict(img.feature_matrix[0])
print(' Time elapsed:', timeit.default_timer() - start_time, 's')
# convert prediction and probabilities back to SimpleITK images
image_prediction = conversion.NumpySimpleITKImageBridge.convert(predictions.astype(np.uint8),
img.image_properties)
image_probabilities = conversion.NumpySimpleITKImageBridge.convert(probabilities, img.image_properties)
# evaluate segmentation without post-processing
evaluator.evaluate(image_prediction, img.images[structure.BrainImageTypes.GroundTruth], img.id_)
images_prediction.append(image_prediction)
images_probabilities.append(image_probabilities)
# post-process segmentation and evaluate with post-processing
post_process_params = {'crf_post': True}
images_post_processed = putil.post_process_batch(images_test, images_prediction, images_probabilities,
post_process_params, multi_process=True)
for i, img in enumerate(images_test):
evaluator.evaluate(images_post_processed[i], img.images[structure.BrainImageTypes.GroundTruth],
img.id_ + '-PP')
# save results
sitk.WriteImage(images_prediction[i], os.path.join(result_dir, images_test[i].id_ + '_SEG.mha'), True)
sitk.WriteImage(images_post_processed[i], os.path.join(result_dir, images_test[i].id_ + '_SEG-PP.mha'), True)
# write summary of parameters to results dir
with open(os.path.join(result_dir, 'summary.txt'), 'w') as summary_file:
print('Training data size: {}'.format(len(data_items)), file=summary_file)
print('Total training time: {:.1f}s'.format(time_total_train), file=summary_file)
print('Decision forest', file=summary_file)
print(df_params, file=summary_file)
stats = statistics.gather_statistics(os.path.join(result_dir, 'results.csv'))
print('Result statistics:', file=summary_file)
print(stats, file=summary_file)
if __name__ == "__main__":
"""The program's entry point."""
script_dir = os.path.dirname(sys.argv[0])
parser = argparse.ArgumentParser(description='Medical image analysis pipeline for brain tissue segmentation')
parser.add_argument(
'--model_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-model')),
help='Base directory for output models.'
)
parser.add_argument(
'--result_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-result')),
help='Directory for results.'
)
parser.add_argument(
'--data_atlas_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/atlas')),
help='Directory with atlas data.'
)
parser.add_argument(
'--data_train_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/train/')),
help='Directory with training data.'
)
parser.add_argument(
'--data_test_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/test/')),
help='Directory with testing data.'
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| {
"repo_name": "mrunibe/MIALab",
"path": "bin/feature_evaluation.py",
"copies": "1",
"size": "10024",
"license": "apache-2.0",
"hash": 2291180572795907600,
"line_mean": 41.2953586498,
"line_max": 147,
"alpha_frac": 0.6192138867,
"autogenerated": false,
"ratio": 4.033802816901408,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5153016703601409,
"avg_score": null,
"num_lines": null
} |
"""A medical image analysis pipeline.
The pipeline is used for brain tissue segmentation using a decision forest classifier.
"""
import argparse
import datetime
import os
import sys
import numpy as np
from tensorflow.python.platform import app
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path
# fixes the ModuleNotFoundError when executing main.py in the console after code changes (e.g. git pull)
# somehow pip install does not keep track of packages
import mialab.data.structure as structure
import mialab.data.loading as load
import mialab.utilities.file_access_utilities as futil
import mialab.utilities.pipeline_utilities as putil
FLAGS = None # the program flags
IMAGE_KEYS = [structure.BrainImageTypes.T1, structure.BrainImageTypes.T2, structure.BrainImageTypes.GroundTruth] # the list of images we will load
TRAIN_BATCH_SIZE = 70 # 1..70, the higher the faster but more memory usage
TEST_BATCH_SIZE = 30 # 1..30, the higher the faster but more memory usage
USE_PREPROCESS_CACHE = False # cache pre-processed images
def main(_):
# generate a model directory (use datetime to ensure that the directory is empty)
# we need an empty directory because TensorFlow will continue training an existing model if it is not empty
t = datetime.datetime.now().strftime('%Y-%m-%d%H%M%S')
model_dir = os.path.join(FLAGS.model_dir, t)
os.makedirs(model_dir, exist_ok=True)
# crawl the training image directories
crawler = load.FileSystemDataCrawler(FLAGS.data_train_dir,
IMAGE_KEYS,
futil.BrainImageFilePathGenerator(),
futil.DataDirectoryFilter())
data_items = list(crawler.data.items())
pre_process_params = {'zscore_pre': True,
'coordinates_feature': True,
'intensity_feature': True,
'gradient_intensity_feature': True}
for batch_index in range(0, len(data_items), TRAIN_BATCH_SIZE):
cache_file_prefix = os.path.normpath(os.path.join(script_dir, './mia-cache/batch-' + str(batch_index) + '-' + str(TRAIN_BATCH_SIZE)))
cache_file_train = cache_file_prefix + '-data_train.npy'
cache_file_labels = cache_file_prefix + '-data_labels.npy'
if(USE_PREPROCESS_CACHE & os.path.exists(cache_file_train)):
print('Using cache from ', cache_file_train)
data_train = np.load(cache_file_train)
labels_train = np.load(cache_file_labels)
else:
# slicing manages out of range; no need to worry
batch_data = dict(data_items[batch_index: batch_index+TRAIN_BATCH_SIZE])
# load images for training and pre-process
images = putil.pre_process_batch(batch_data, pre_process_params, multi_process=True)
# generate feature matrix and label vector
data_train = np.concatenate([img.feature_matrix[0] for img in images])
labels_train = np.concatenate([img.feature_matrix[1] for img in images])
# Scatter matrix plot of the train data
data = pd.DataFrame(data_train, columns=['Feat. 1', 'Feat. 2', 'Feat. 3', 'Feat. 4', 'Feat. 5',
'Feat. 6', 'Feat. 7'])
axes = pd.scatter_matrix(data, alpha=0.2, diagonal='hist')
corr = data.corr().as_matrix()
for i, j in zip(*plt.np.triu_indices_from(axes, k=1)):
axes[i, j].annotate("%.2f" % corr[i, j], (0.99, 0.98), size=23, xycoords='axes fraction', ha='right', va='top')
n = len(data.columns)
for x in range(n):
for y in range(n):
# to get the axis of subplots
ax = axes[x, y]
# to make x axis name vertical
ax.xaxis.label.set_rotation(0)
ax.xaxis.label.set_size(17)
ax.xaxis.set_label_coords(0.5, -0.3)
# to make y axis name horizontal
ax.yaxis.label.set_rotation(0)
ax.yaxis.label.set_size(17)
ax.yaxis.set_label_coords(-0.3, 0.5)
# to make sure y axis names are outside the plot area
ax.yaxis.labelpad = 50
# plt.title('Scatter Plot Matrix', fontsize=17, y=7.1, x=-2.5)
plt.show()
if __name__ == "__main__":
"""The program's entry point."""
script_dir = os.path.dirname(sys.argv[0])
parser = argparse.ArgumentParser(description='Medical image analysis pipeline for brain tissue segmentation')
parser.add_argument(
'--model_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-model')),
help='Base directory for output models.'
)
parser.add_argument(
'--result_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-result')),
help='Directory for results.'
)
parser.add_argument(
'--data_atlas_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/atlas')),
help='Directory with atlas data.'
)
parser.add_argument(
'--data_train_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/train/')),
help='Directory with training data.'
)
parser.add_argument(
'--data_test_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/test/')),
help='Directory with testing data.'
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| {
"repo_name": "mrunibe/MIALab",
"path": "bin/scatter_plot.py",
"copies": "1",
"size": "5751",
"license": "apache-2.0",
"hash": 1799847076156905500,
"line_mean": 39.7872340426,
"line_max": 147,
"alpha_frac": 0.618675013,
"autogenerated": false,
"ratio": 3.6818181818181817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.978954642301116,
"avg_score": 0.0021893543614045397,
"num_lines": 141
} |
# amee.py
#
# A simple Python interface to the AMEE API, designed to work on Google App Engine as
# well as standard Python installs. Does not expose the entire API, though the
# AMEE.request method can be used to make arbitrary API calls, and it should be easy
# to extend if necessary.
#
# Example usage:
#
# a = amee.AMEE(username, password)
#
# profile = a.create_profile()
# print "Created AMEE profile with UID %s" % (profile.uid,)
#
# electricity_kwh_per_year = 1000
#
# electricity = profile.create_item(
# "/business/energy/electricity", {"country": "United Kingdom"},
# {"energyPerTime": electricity_kwh_per_year}
# )
#
# print "Electricity: %d kWh per year, resulting in %d kg of CO2" % (electricity_kwh_per_year, electricity.co2())
#
# profile.delete()
# -- Robin Houston and Tom Dyson, January 2010
import logging
import re
import urllib
MEMCACHE_HOSTS = ['127.0.0.1:11211']
class MemcacheWrapper(object):
'''wrapper for memcache, mimicing GAE's wrapper'''
def __init__(self):
import memcache
self.mc = memcache.Client(MEMCACHE_HOSTS, debug=0)
def get(self, key, namespace):
return self.mc.get('%s_%s' % (namespace, key))
def set(self, key, result, namespace):
return self.mc.set('%s_%s' % (namespace, key), result)
def _fetch(uri, method, payload, follow_redirects, deadline, headers):
'''wrapper for urllib2, mimicing GAE's urlfetch'''
import urllib2
req = urllib2.Request(uri, payload, headers)
response = urllib2.urlopen(req)
response.status_code = response.code
response.content = response.read()
return response
try:
from google.appengine.api import (memcache, urlfetch)
fetch = urlfetch.fetch
except ImportError:
fetch = _fetch
memcache = MemcacheWrapper()
try:
from django.utils import simplejson as json
except ImportError:
import json
DEFAULT_SERVER = 'https://stage.co2.dgen.net' # Use an encrypted transport by default
MEMCACHE_NAMESPACE = 'AMEE'
class Error(Exception):
'''An AMEE-specific error, either from this module or from the API itself.'''
class APIError(Error):
'''An error from the AMEE API'''
class AMEE(object):
'''Represents the AMEE API.
'''
def __init__(self, username, password, server=DEFAULT_SERVER):
self.username = username
self.password = password
self.server = server
self.authtoken = None
def get_authtoken(self):
'''Get a new authentication token from AMEE (if the old one has expired,
or we haven't got one yet).
'''
self.authtoken = None # Saves a wasted request later if the next line times out
self.authtoken = self._make_request("/auth", "POST", {
"username": self.username,
"password": self.password,
}).headers["authToken"]
if not self.authtoken:
raise APIError("Failed to authenticate with AMEE")
def _make_request(self, path, method, payload, request_headers={}):
if payload is not None and not isinstance(payload, basestring):
payload = urllib.urlencode(payload)
if re.match(r'https?://', path):
uri = path
else:
if not path.startswith("/"):
raise Error("Path '%s' does not start with /" % (path,))
uri = self.server + path
headers = {
"Accept": "application/json",
"AuthToken": self.authtoken,
'Cache-Control' : 'max-age=0',
}
headers.update(request_headers)
response = fetch(uri,
method=method,
payload=payload,
follow_redirects=False,
deadline=10,
headers=headers
)
if response.status_code not in (200, 201, 401):
logging.error("Error response from AMEE: %s", response.content)
raise APIError("Status code %d from %s to %s" % (response.status_code, method, path))
return response
def request(self, path, method="GET", payload=None, request_headers={}):
'''Make a request to the AMEE API, and return the resulting data structure
(parsed from the JSON response).
'''
if not self.authtoken:
self.get_authtoken()
response = self._make_request(path, method, payload, request_headers)
if response.status_code == 401:
# The authtoken must have expired
logging.info("AMEE authentication token expired")
self.get_authtoken()
response = self._make_request(path, method, payload)
if response.status_code == 401:
raise APIError("AMEE rejected fresh authentication token")
if response.status_code == 201:
return response.headers["Location"]
if not response.content:
return None
return json.loads(response.content)
def create_profile(self):
'''Create a new AMEE profile, and return it.
'''
return Profile(self, self.request("/profiles", "POST", {"profile": "true"})["profile"]["uid"])
def profiles(self):
'''Return a list of all profiles.'''
return [ Profile(self, profile["uid"]) for profile in self.request("/profiles", "GET")["profiles"] ]
def drill(self, path, choices, complete=False):
'''Perform a data item drilldown.
If all necessary choices are specified, returns the UID of the data item;
otherwise, returns the next choice that needs to be made in the form of a dict
with keys "name" and "choices". (The "choices" item is an array of permitted
values.)
If the "complete" argument is true, we raise an Error if the specified choices
are incomplete. In this case the return value will always be the UID.
Results are cached using memcache.
Typical applications will not call this method directly: it is used internally
by Profile.create_item(s). You could call it directly if you wanted to allow
a user to specify data items interactively one choice at a time.
'''
choices_string = urllib.urlencode(choices)
memcache_key = ";".join((self.server, path, choices_string, str(complete)))
cached_result = memcache.get(memcache_key, namespace=MEMCACHE_NAMESPACE)
if cached_result is not None:
return cached_result
result = self._drill(path, choices_string, complete)
memcache.set(memcache_key, result, namespace=MEMCACHE_NAMESPACE)
return result
def _drill(self, path, choices_string, complete=False):
'''Perform the drilldown directly, without caching.
'''
if not path.startswith("/"):
raise Error("Path '%s' does not start with /" % (path,))
r_choices = self.request("/data" + path + "/drill?" + choices_string)["choices"]
# The "choices" item is an array of dicts with keys "name" and "value", which
# appear always to be identical. We simplify this structure by replacing each
# such choice with its name.
r_choices["choices"] = [ choice["name"] for choice in r_choices["choices"] ]
if r_choices["name"] == "uid":
# We've finished drilling. This is it!
if not r_choices["choices"]:
raise Error("No choices returned. Did you specify an invalid value?")
uid = r_choices["choices"][0]
return uid
if complete:
raise Error("Incomplete drilldown, '%s' must be specified; possible values are %s" % (
r_choices["name"], r_choices["choices"]))
return r_choices
def fly(self, path, choices, values):
'''Perform an on-the-fly calculation. The parameters are interpreted as in Profile.create_item,
but this method does not require a profile or create any profile items. The return value is
the total carboon footprint of the item, measured in kg / year of carbon dioxide.
'''
uid = self.drill(path, choices)
values_encoded = urllib.urlencode(values)
return self.request("/data" + path + "/" + uid + "?" + values_encoded,)["amount"]["value"]
class Profile(object):
def __init__(self, api, uid):
self.api = api
self.uid = uid
def delete(self):
'''Delete this profile.
'''
if self.uid is None:
raise Error("Profile has already been deleted")
self.api.request("/profiles/" + self.uid, "DELETE")
self.uid = None
def create_item(self, path, choices, values):
'''Create a profile item, given the path and drilldown choices for the data item.
For example, you could record five long-haul return flights as follows:
long_haul_flights = profile.create_item("/transport/plane/generic",
{ "type": "long haul", "size": "return" },
{ "journeys": 5 }
)
'''
if self.uid is None:
raise Error("Profile has been deleted")
if not path.startswith("/"):
raise Error("Path '%s' does not start with /" % (path,))
data_item_uid = self.api.drill(path, choices, complete=True)
params = {"dataItemUid": data_item_uid}
params.update(values)
item_uri = self.api.request("/profiles/%s%s" % (self.uid, path), "POST", params)
return ProfileItem(api=self.api, uri=item_uri)
def create_items(self, items, common_values={}):
'''
Create a number of profile items. The parameter 'items' should be an array
(or other iterable) whose elements are 3-tuples (path, choices, values).
In other words, p.create_items(items) is roughly equivalent to
[ p.create_item(path, choices, values) for path, choices, values in items ]
but more efficient, because it uses the AMEE batch update API. (One behaviour
difference is that create_items is atomic, so that if one of the items fails then
none of them will be created.)
common_values are values that are passed for each item, unless overridden in
an individual item. (You could pass the startDate and endDate / duration here,
for example.)
The return value is an array of ProfileItem objects, one for each item created.
'''
if self.uid is None:
raise Error("Profile has been deleted")
profile_items = []
for path, choices, item_values in items:
if not path.startswith("/"):
raise Error("Path '%s' does not start with /" % (path,))
h = {}
h.update(common_values)
h["dataItemUid"] = self.api.drill(path, choices, complete=True)
h.update(item_values)
profile_items.append(h)
response = self.api.request("/profiles/" + self.uid, "POST", \
json.dumps({"profileItems": profile_items}), {"Content-Type": "application/json"})
return [ ProfileItem(api=self.api, uri=item["uri"]) for item in response["profileItems"] ]
class ProfileItem(object):
def __init__(self, api, uri):
self.api = api
self.uri = uri
def get(self):
return self.api.request(self.uri, "GET")
def co2(self):
'''The amount of carbon dioxide represented by this profile item,
in kilograms per year.
'''
amount = self.get()["profileItem"]["amount"]
if amount["unit"] != "kg/year":
raise Error("Profile item uses unit '%s' rather than kg/year" % (amount["unit"],))
return amount["value"]
| {
"repo_name": "robinhouston/AMEE-Python-interface",
"path": "amee.py",
"copies": "1",
"size": "10881",
"license": "mit",
"hash": 2877871821242734600,
"line_mean": 33.875,
"line_max": 113,
"alpha_frac": 0.658946788,
"autogenerated": false,
"ratio": 3.77157712305026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9743990449002349,
"avg_score": 0.03730669240958232,
"num_lines": 312
} |
'A menagerie of children.'
from __future__ import unicode_literals, division
from collections import namedtuple
from datetime import datetime
import fcntl
from heapq import heappop, heappush
import os
from select import select
from select import error as SelectError
from signal import signal, SIGHUP, SIGCHLD, SIGINT, SIGKILL, SIGTERM
from subprocess import Popen, PIPE, STDOUT
import sys
from time import time
dev_null = open('/dev/null', 'r')
def log(color_no, name, message):
color_on, color_off = '\033[9%dm' % color_no, '\033[0m'
stamp = datetime.now().strftime('%H:%M:%S')
tag = '%8s' % name
print color_on + stamp + tag + ' | ' + color_off + message
def parse_command(cmd):
return os.path.expandvars(cmd).split()
class BaseProcess(object):
'I keep track of one child.'
def __init__(self, name, command, color_no):
self.name = name
self.command = command
self.color_no = color_no
self.process = None
self.eof = False
self.signalled = False
self.buffered = None
def signal(self, signo):
self.signalled = True
if self.process:
os.killpg(self.process.pid, signo)
@property
def alive(self):
return self.process and self.process.poll() is None
def reap(self):
if self.alive:
return False
if self.process:
self.process.wait()
self.drain()
if self.process.returncode < 0:
self.log('killed by signal %d', -self.process.returncode)
elif self.process.returncode > 0:
self.log('exited with code %d', self.process.returncode)
elif not isinstance(self, Daemon):
self.log('exited normally')
self.process = self.buffered = None
return True
def set_process_group(self):
os.setsid()
def spawn(self):
if self.process:
return
self.process = Popen(parse_command(self.command),
stdin=dev_null, stdout=PIPE, stderr=STDOUT,
preexec_fn=self.set_process_group)
self.signalled = False
self.eof = False
self.buffered = ''
if not isinstance(self, Daemon):
self.log('started with pid %d', self.process.pid)
# Make pipes non-blocking.
fd = self.process.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
def fileno(self):
if self.process:
return self.process.stdout.fileno()
def drain(self):
if not self.process:
return
try:
data = self.process.stdout.read(8192)
except IOError:
return
data = data.decode('utf-8', 'replace')
if data == '':
self.eof = True
self.buffered += data
while '\n' in self.buffered:
line, self.buffered = self.buffered.split('\n', 1)
if line.strip():
self.log('%s', line)
def log(self, message, *args):
log(self.color_no, self.name, message % args)
system_color_no = 7
class Process(BaseProcess):
pass
class Daemon(BaseProcess):
def __init__(self, name, command):
BaseProcess.__init__(self, name, command, system_color_no)
class WatchdogProcess(Daemon):
def __init__(self, directories):
command = '{} -u -m heywood.watchdog {}'.format(sys.executable,
' '.join(directories))
Daemon.__init__(self, 'watch', command)
Deferred = namedtuple('Deferred', 'ready callback')
class ProcessManager(object):
'I keep track of ALL THE CHILDREN.'
def __init__(self):
self.children = []
self.deferred = []
self.shutdown = False
self.auto_respawn = True
def go(self):
self.install_signal_handlers()
self.spawn_all()
try:
self.loop()
finally:
# This is here in case there are bugs inhere somewhere.
self.signal_all(SIGKILL, BaseProcess, silent=True)
def loop(self):
while self.children:
self.do_ready_deferreds()
readable = self.select()
self.drain(readable)
def do_ready_deferreds(self):
while self.deferred and self.deferred[0].ready < time():
job = heappop(self.deferred)
job.callback()
def defer(self, ready, callback):
heappush(self.deferred, Deferred(ready, callback))
def reap_zombies(self):
for child in self.children:
if not child.alive:
child.reap()
if self.shutdown:
continue
if self.auto_respawn or child.signalled:
self.defer(time() + 1, child.spawn)
if self.shutdown:
self.children = [c for c in self.children if c.alive]
def select(self, timeout=1):
pipes = dict((child.fileno(), child)
for child in self.children
if not child.eof)
if not pipes:
return []
fds = filter(None, pipes.keys())
try:
readable, _, _ = select(fds, [], [], timeout)
except SelectError:
readable = []
return [pipes[fd] for fd in readable]
def drain(self, children):
for child in children:
child.drain()
def install_signal_handlers(self):
signal(SIGINT, self.termination_handler)
signal(SIGTERM, self.termination_handler)
signal(SIGHUP, self.restart_handler)
signal(SIGCHLD, self.zombie_handler)
def spawn_all(self):
for child in self.children:
if not child.alive:
child.spawn()
def zombie_handler(self, signo, frame):
self.defer(0, self.reap_zombies)
def restart_all(self):
self.signal_all(SIGTERM, Process)
self.log('re-spawning crashed children')
self.spawn_all()
def restart_handler(self, signo, frame):
self.defer(0, self.restart_all)
def termination_handler(self, signo, frame):
if self.shutdown:
self.signal_all(SIGKILL, BaseProcess)
else:
self.signal_all(SIGTERM, BaseProcess)
self.shutdown = True
def signal_all(self, signo, klass, silent=False):
if not silent:
self.log('sending signal %d to all children', signo)
for child in self.children:
if isinstance(child, klass):
child.signal(signo)
def setup_env(self):
os.environ['PYTHONUNBUFFERED'] = 'true'
def read_env(self, f):
for line in f:
name, value = line.split('=', 1)
os.environ[name.strip()] = value.strip()
def read_procfile(self, f):
for i, line in enumerate(f):
name, command = line.strip().split(':', 1)
color_no = 1 + i % 6
child = Process(name.strip(), command.strip(), color_no)
self.children.append(child)
def log(self, message, *args):
log(system_color_no, 'system', message % args)
def watch(self, directories):
self.children.append(WatchdogProcess(directories))
self.auto_respawn = False
| {
"repo_name": "doptio/heywood",
"path": "src/heywood/manager.py",
"copies": "1",
"size": "7353",
"license": "mit",
"hash": -451419628205296800,
"line_mean": 28.6491935484,
"line_max": 78,
"alpha_frac": 0.5699714402,
"autogenerated": false,
"ratio": 3.929983965793693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4999955405993693,
"avg_score": null,
"num_lines": null
} |
"""A merge sort which accepts an array as input and recursively
splits an array in half and sorts and combines them.
"""
"""https://en.wikipedia.org/wiki/Merge_sort """
def merge(arr: list[int]) -> list[int]:
"""Return a sorted array.
>>> merge([10,9,8,7,6,5,4,3,2,1])
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> merge([1,2,3,4,5,6,7,8,9,10])
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> merge([10,22,1,2,3,9,15,23])
[1, 2, 3, 9, 10, 15, 22, 23]
>>> merge([100])
[100]
>>> merge([])
[]
"""
if len(arr) > 1:
middle_length = len(arr) // 2 # Finds the middle of the array
left_array = arr[
:middle_length
] # Creates an array of the elements in the first half.
right_array = arr[
middle_length:
] # Creates an array of the elements in the second half.
left_size = len(left_array)
right_size = len(right_array)
merge(left_array) # Starts sorting the left.
merge(right_array) # Starts sorting the right
left_index = 0 # Left Counter
right_index = 0 # Right Counter
index = 0 # Position Counter
while (
left_index < left_size and right_index < right_size
): # Runs until the lowers size of the left and right are sorted.
if left_array[left_index] < right_array[right_index]:
arr[index] = left_array[left_index]
left_index = left_index + 1
else:
arr[index] = right_array[right_index]
right_index = right_index + 1
index = index + 1
while (
left_index < left_size
): # Adds the left over elements in the left half of the array
arr[index] = left_array[left_index]
left_index = left_index + 1
index = index + 1
while (
right_index < right_size
): # Adds the left over elements in the right half of the array
arr[index] = right_array[right_index]
right_index = right_index + 1
index = index + 1
return arr
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"repo_name": "TheAlgorithms/Python",
"path": "sorts/recursive_mergesort_array.py",
"copies": "1",
"size": "2195",
"license": "mit",
"hash": 2812399672507918300,
"line_mean": 33.296875,
"line_max": 74,
"alpha_frac": 0.5325740319,
"autogenerated": false,
"ratio": 3.512,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45445740319,
"avg_score": null,
"num_lines": null
} |
# American Community Survey 5-Year Data
# https://www.census.gov/data/developers/data-sets/acs-5year.2016.html
#
# Notes:
# - Examples: http://api.census.gov/data/2016/acs/acs5/examples.html
# - Places: https://www.census.gov/content/dam/Census/data/developers/understandingplace.pdf
#
# Fact Finder:
# - Home: http://factfinder.census.gov/faces/nav/jsf/pages/index.xhtml
# - Durham city, NC: https://factfinder.census.gov/bkmk/table/1.0/en/ACS/16_5YR/DP05/1600000US3719000 # noqa
import census
import pandas as pd
from us import states
from django.conf import settings
from django.db import transaction
from tsdata.models import CensusProfile, STATE_CHOICES
# Variables: http://api.census.gov/data/2016/acs/acs5/variables.json
NC_RACE_VARS = {
'B03002_001E': 'total', # Estimate!!Total
'B03002_003E': 'white', # Estimate!!Total!!Not Hispanic or Latino!!White alone
'B03002_004E': 'black', # Estimate!!Total!!Not Hispanic or Latino!!Black or African American alone # noqa
'B03002_005E': 'native_american', # Estimate!!Total!!Not Hispanic or Latino!!American Indian and Alaska Native alone # noqa
'B03002_006E': 'asian', # Estimate!!Total!!Not Hispanic or Latino!!Asian alone
'B03002_007E': 'native_hawaiian', # Estimate!!Total!!Not Hispanic or Latino!!Native Hawaiian and Other Pacific Islander alone # noqa
'B03002_008E': 'other', # Estimate!!Total!!Not Hispanic or Latino!!Some other race alone # noqa
'B03002_009E': 'two_or_more_races', # Estimate!!Total!!Not Hispanic or Latino!!Two or more races # noqa
'B03002_012E': 'hispanic', # Estimate!!Total!!Hispanic or Latino
'B03002_002E': 'non_hispanic', # Estimate!!Total!!Not Hispanic or Latino
}
OTHER_RACE_VARS = {
'C02003_001E': 'total',
'C02003_003E': 'white',
'C02003_004E': 'black',
'C02003_005E': 'native_american',
'C02003_006E': 'asian',
'C02003_007E': 'native_hawaiian',
'C02003_008E': 'other',
'C02003_009E': 'two_or_more_races',
'B03002_012E': 'hispanic',
'B03002_002E': 'non_hispanic',
}
RACE_VARIABLES = {
settings.NC_KEY.upper(): NC_RACE_VARS,
settings.IL_KEY.upper(): OTHER_RACE_VARS,
settings.MD_KEY.upper(): OTHER_RACE_VARS,
}
class ACS(object):
"""Base class to call ACS API and normalize output"""
source = "ACS 5-Year Data (2012-2016)"
geography = None
drop_columns = None
def __init__(self, key, state_abbr):
self.api = census.Census(key, year=2016)
self.fips = getattr(states, state_abbr).fips
self.state_abbr = state_abbr
self.race_variables = RACE_VARIABLES[state_abbr]
# NAME = geography/location
# GEO_ID = combination of country, state, county
self.variables = ['NAME', 'GEO_ID'] + list(self.race_variables.keys())
def call_api(self):
raise NotImplemented()
def get(self):
# load response (list of dicts) into pandas
df = pd.DataFrame(self.call_api())
# insert metadata
df['state'] = self.state_abbr
df['source'] = self.source
df['geography'] = self.geography
# rename common columns
df.rename(columns={'NAME': 'location', 'GEO_ID': 'id'}, inplace=True)
# replace census variable names with easier to read race labels
df.rename(columns=self.race_variables, inplace=True)
# convert race columns to numerics
num_cols = list(self.race_variables.values())
df[num_cols] = df[num_cols].apply(pd.to_numeric)
# remove unused columns
if self.drop_columns:
df.drop(self.drop_columns, axis=1, inplace=True)
return df
class ACSStateCounties(ACS):
"""
State County Demographics
ex: http://api.census.gov/data/2016/acs/acs5?get=NAME&for=county:*&in=state:24
"""
geography = 'county'
drop_columns = ['county']
def call_api(self):
return self.api.acs5.state_county(self.variables, self.fips, census.ALL)
class ACSStatePlaces(ACS):
"""
State Place Demographics
ex: http://api.census.gov/data/2016/acs/acs5?get=NAME&for=place:*&in=state:24
"""
geography = 'place'
drop_columns = ['place']
def call_api(self):
return self.api.acs5.state_place(self.variables, self.fips, census.ALL)
def get(self):
df = super(ACSStatePlaces, self).get()
# ignore Census Designated Places (CDP)
return df[~df.location.str.contains('CDP')]
def get_state_census_data(key):
"""Download several state Census endpoints into a single DataFrame"""
profiles = []
for state in [abbr.upper() for abbr, name in STATE_CHOICES]:
profiles.append(ACSStateCounties(key, state).get())
profiles.append(ACSStatePlaces(key, state).get())
return pd.concat(profiles)
@transaction.atomic
def refresh_census_models(data):
profiles = []
CensusProfile.objects.all().delete()
for row in data:
profile = CensusProfile(
id=row['id'],
location=row['location'],
geography=row['geography'],
state=row['state'],
source=row['source'],
white=row['white'],
black=row['black'],
native_american=row['native_american'],
asian=row['asian'],
native_hawaiian=row['native_hawaiian'],
other=row['other'],
two_or_more_races=row['two_or_more_races'],
hispanic=row['hispanic'],
non_hispanic=row['non_hispanic'],
total=row['total'],
)
profiles.append(profile)
CensusProfile.objects.bulk_create(profiles)
| {
"repo_name": "OpenDataPolicingNC/Traffic-Stops",
"path": "tsdata/acs.py",
"copies": "1",
"size": "5696",
"license": "mit",
"hash": 508224846142748300,
"line_mean": 35.2802547771,
"line_max": 140,
"alpha_frac": 0.6318469101,
"autogenerated": false,
"ratio": 3.1057797164667393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42376266265667395,
"avg_score": null,
"num_lines": null
} |
# American Magnetics, Inc. (AMI) One Axis magnet with PCS_SN14768
import time
import logging
import numpy as np
# from scipy.optimize import brent
# from math import gcd
# from qcodes import Instrument
from qcodes.utils import validators as vals
# from qcodes.instrument.parameter import ManualParameter
from pycqed.analysis import analysis_toolbox as atools
# from pycqed.utilities.general import add_suffix_to_dict_keys
from pycqed.measurement import detector_functions as det
# from pycqed.measurement import composite_detector_functions as cdet
# from pycqed.measurement import mc_parameter_wrapper as pw
from pycqed.measurement import sweep_functions as swf
# from pycqed.measurement import awg_sweep_functions as awg_swf
# from pycqed.analysis import measurement_analysis as ma
# from pycqed.measurement.calibration_toolbox import mixer_carrier_cancellation_5014
# from pycqed.measurement.calibration_toolbox import mixer_carrier_cancellation_UHFQC
# from pycqed.measurement.calibration_toolbox import mixer_skewness_calibration_5014
# from pycqed.measurement.optimization import nelder_mead
from pycqed.analysis import analysis_toolbox as a_tools
# import pycqed.measurement.pulse_sequences.single_qubit_tek_seq_elts as sq
import logging
import numpy as np
from copy import deepcopy,copy
import qcodes as qc
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import ManualParameter
from pycqed.instrument_drivers.pq_parameters import InstrumentParameter
class AMI_Magnet_PCS_SN14768(Instrument):
'''
Instrument used for translating Fields into current settings
and controlling the persistent current switch.
Initialization when the previous measurement did not have the magnet is
a bit awkward. The driver checks the last measurement for the value and if
this does not exists it fails. To do the first initialization it is necessary
to start everything up while having the 'get_field' function return 0 always.
Make a fake folder with the name
'Switch_is_changed_to_SuperConducting_state'.
Then exit and reinitialize with the
'get_field' function returning what it is supposed to.
'''
def __init__(self, name,
Current_source_magnet_name,
Current_source_heater_name,MC_inst,**kw): # IVVI.dac1
super().__init__(name, **kw)
self.protection_state=False
# Set instrumentss
self.add_parameter('i_magnet', parameter_class=InstrumentParameter)
self.i_magnet = Current_source_magnet_name
self.add_parameter('i_heater', parameter_class=InstrumentParameter)
self.i_heater = Current_source_heater_name
self.MC = MC_inst
# Specifications of One Axis AMI magnet with PCS_14768
self.max_current = 5.0 # Amperes
#### Dirty hack to get the z-axis of the new magnet running
self.field_to_current = 0.0496 # Telsa/Ampere
# (Spec sheet says 0.500 kG/A = 50 mT/A)
self.max_field = self.max_current*self.field_to_current # Tesla
#(Spec sheet says 20 kG = 2.0 T)
# self.Ramp_Rate = 0.05 # Ampere/Second
self.max_ramp_rate = 0.2 # Max ramp rate: 1.32 # Ampere/Second
self.init_ramp_rate = 0.025
self.charging_voltage = 1.0 # Volt
self.inductance = 0.7 # Henry
self.persistent_switch_heater_current = 21e-3 # Ampere (21 mA)
self.heater_mvolt_to_current = 0.020*1e-3 # A/mV (20 mA per 1000 mV)
self.persistent_switch_heater_nominal_resistance = 82 # Ohms
#(Measured at room temperature, thus normal conducing.)
self.magnet_resistanc_in_parallel_with_switch = 35 # Ohms
#(Measured at room temperature, thus normal conducting.)
self.add_parameter('source_current',
get_cmd=self.get_source_current,
set_cmd=self.set_source_current,
label='Source Current',
unit='A',
vals=vals.Numbers(min_value=0.,max_value=self.max_current),
docstring='Current supplied to the magnet')
#ramp rate should not be a parameter
self.add_parameter('ramp_rate',
label='Ramp Rate',
unit='A/s',
initial_value=self.init_ramp_rate,
get_parser=float,
vals=vals.Numbers(min_value=0.,max_value=self.max_ramp_rate),
parameter_class=ManualParameter,
docstring='Ramp Rate of the magnet current source')
self.add_parameter('field',
get_cmd=self.get_field,
set_cmd=self.set_field,
label='Persistent Field',
unit='T',
vals=vals.Numbers(min_value=0.,max_value=self.max_field),
docstring='Persistent magnetic field')
# It would be great if the variable field could only be
# set by the program, not by the used. It should only serve as a memory
# of the previous persistent field.
self.add_parameter('switch_state',
get_cmd=self.get_switch_state,
set_cmd=self.set_switch_state,
label='Switch State',
unit='',
vals=vals.Enum('SuperConducting','NormalConducting'),
docstring='Indicating whether the persistent current\
switch is superconducting or normal conducting')
self.protection_state=True
self.get_all()
'''
You need to heat the persistent current switch to turn it from
a superconductor into a normal conductor. When the persistent \
current switch is superconducting there is a persistent current,
when it is normal conducting there is no persistent current and
you can controll the current with the current source.
!! Thus it is important to heat the persistent current switch if
you want to change the field !!
!! Also important that when you want to switch off the persistent
current that you provide the same current with the current source
on the leads !! BEFORE !! you heat the persistent current switch !!
'''
def get_all(self):
self.get_source_current()
self.get_field()
self.switch_state()
return
def get_source_current(self):
return self.i_magnet.measurei()
def set_source_current(self,current):
self.i_magnet.seti(current)
return 'Current set to '+str(current)+' A'
def get_heater_current(self):
return self.heater_mvolt_to_current*self.i_heater()
def get_switch_state(self):
heater_current = self.get_heater_current()
if 1.05*self.persistent_switch_heater_current>heater_current\
>0.95*self.persistent_switch_heater_current:
return 'NormalConducting'
elif 0.05*self.persistent_switch_heater_current>heater_current\
>-0.05*self.persistent_switch_heater_current:
return 'SuperConducting'
else:
raise ValueError('Switch is not in a well defined state!')
def set_switch_state(self,desired_state):
if desired_state == 'SuperConducting' and\
self.get_switch_state() == 'SuperConducting':
print('Already SuperConducting')
return 'SuperConducting'
elif desired_state == 'NormalConducting' and\
self.get_switch_state() == 'NormalConducting':
print('Already NormalConducting')
return 'NormalConducting'
elif desired_state == 'SuperConducting' and\
self.get_switch_state() == 'NormalConducting':
print('Ramping current down...')
self.i_heater(0)
print('Wait 2 minutes to cool the switch.')
time.sleep(120) # 120
print('Switch is now SuperConducting')
self.fake_folder(folder_name='Switch_is_changed_to_SuperConducting_state')
return 'SuperConducting'
elif desired_state == 'NormalConducting' and\
self.get_switch_state() == 'SuperConducting':
if self.i_magnet.measureR() > 1.:
raise ValueError('Magnet leads not connected!')
else:
supplied_current = self.get_source_current()
if self.field()==None:
print('Sourcing current...')
self.i_heater(self.persistent_switch_heater_current\
/self.heater_mvolt_to_current)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_is_changed_to_NormalConducting_state')
return 'NormalConducting'
elif supplied_current<2e-3 and np.abs(self.field())<1e-4:
print('Sourcing current...')
self.i_heater(self.persistent_switch_heater_current\
/self.heater_mvolt_to_current)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_is_changed_to_NormalConducting_state')
return 'NormalConducting'
elif not 0.98*supplied_current<self.BtoI(self.field())\
<1.02*supplied_current:
raise ValueError('Current is not \
according to the field value! Use \
bring_source_to_field function to \
bring it to the correct value.')
else:
print('Sourcing current...')
self.i_heater(self.persistent_switch_heater_current\
/self.heater_mvolt_to_current)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_is_changed_to_NormalConducting_state')
return 'NormalConducting'
else:
return 'Input SuperConducting or NormalConducting as desired state.'
def set_field(self,field):
if not self.protection_state:
return 0.
if self.switch_state() == 'SuperConducting':
raise ValueError('Switch is SuperConducting. Can not change the field.')
elif self.switch_state() =='NormalConducting':
if self.i_magnet.measurei()==0:
self.step_magfield_to_value(field)
# self.field(field)
field_folder_name = 'Changed_field_to_' + str(field) + '_T'
self.fake_folder(folder_name=field_folder_name)
return 'field at ' +str(field)+' T'
elif self.i_magnet.measureR()>1:
raise ValueError('Magnet leads are not connected \
or manget quenched!')
else:
self.step_magfield_to_value(field)
# self.field(field)
field_folder_name = 'Changed_field_to_' + str(field) + '_T'
self.fake_folder(folder_name=field_folder_name)
return 'field at ' +str(field)+' T'
def get_field(self):
return 0.0 # Only add this line when doing the first initialization!
if self.switch_state()=='SuperConducting':
## get the persistent field from the HDF5 file
timestamp = atools.latest_data(contains='Switch_is_changed_to_SuperConducting_state',
return_timestamp=True)[0]
params_dict = {'field':'Magnet.field'}
numeric_params = ['field']
data = a_tools.get_data_from_timestamp_list([timestamp], params_dict,
numeric_params=numeric_params, filter_no_analysis=False)
return data['field'][0]
else: ## Normal conducting
meas_field = self.measure_field()
return meas_field
def step_magfield_to_value(self, field):
MagCurrRatio = self.field_to_current # Tesla/Ampere
Ramp_rate_I = self.ramp_rate()
step_time = 0.01 # in seconds
current_step = Ramp_rate_I * step_time
I_now = self.get_source_current()
current_target = self.BtoI(field)
if current_target >= I_now:
current_step *= +1
if current_target < I_now:
current_step *= -1
num_steps = int(1.*(current_target-I_now)/(current_step))
sweep_time = step_time*num_steps
print('Sweep time is '+str(np.abs(sweep_time))+' seconds')
for tt in range(num_steps):
time.sleep(step_time)
self.i_magnet.seti(I_now)
I_now += current_step
if self.i_magnet.measureR() > 1:
self.i_magnet.seti(0)
raise ValueError('Switch is not in a well defined state!')
self.i_magnet.seti(self.BtoI(field))
self.source_current()
def disconnect_source(self):
if self.switch_state() == 'SuperConducting':
self.step_magfield_to_value(0)
self.fake_folder(folder_name='Ramped_down_current_you_are_able_to_disconnect_the_source_now')
else:
raise ValueError('Switch is not superconducting!')
def bring_source_to_field(self):
if not self.switch_state() == 'SuperConducting':
raise ValueError('Switch is not superconducting!')
if self.i_magnet.measureR() > 1.:
raise ValueError('Magnet leads not connected!')
target_field = self.field()
self.step_magfield_to_value(target_field)
self.fake_folder(folder_name='Ramped_current_up_to_match_persistent_current')
def measure_field(self):
if self.i_magnet is not None:
I = self.get_source_current()
B = self.ItoB(I)
return B
else:
print('no i_magnet')
def BtoI(self, magfield):
MagCurrRatio = self.field_to_current # Tesla/Ampere
I = magfield/MagCurrRatio
return I
def ItoB(self, current):
MagCurrRatio = self.field_to_current # Tesla/Ampere
B = current*MagCurrRatio
return B
def fake_folder(self,folder_name):
'''
Give folder_name in the form of a string.
Create a fake folder in the nanowire experiments folder with the desired folder name.
This is usefull to use when magnetic fields change or when you start a new measurement cycle.
Such that you can distinguish the different measurement sets.
'''
if isinstance(folder_name,str):
sweep_pts = np.linspace(0, 10, 3)
self.MC.set_sweep_function(swf.None_Sweep())
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
dat = self.MC.run(folder_name)
else:
raise ValueError('Please enter a string as the folder name!')
####################################################
### This is a working version that contains bugs ###
####################################################
# # American Magnetics, Inc. (AMI) One Axis magnet with PCS_SN14768
# import time
# import logging
# import numpy as np
# # from scipy.optimize import brent
# # from math import gcd
# # from qcodes import Instrument
# from qcodes.utils import validators as vals
# # from qcodes.instrument.parameter import ManualParameter
# # from pycqed.utilities.general import add_suffix_to_dict_keys
# # from pycqed.measurement import detector_functions as det
# # from pycqed.measurement import composite_detector_functions as cdet
# # from pycqed.measurement import mc_parameter_wrapper as pw
# # from pycqed.measurement import sweep_functions as swf
# # from pycqed.measurement import awg_sweep_functions as awg_swf
# # from pycqed.analysis import measurement_analysis as ma
# # from pycqed.measurement.calibration_toolbox import mixer_carrier_cancellation_5014
# # from pycqed.measurement.calibration_toolbox import mixer_carrier_cancellation_UHFQC
# # from pycqed.measurement.calibration_toolbox import mixer_skewness_calibration_5014
# # from pycqed.measurement.optimization import nelder_mead
# # import pycqed.measurement.pulse_sequences.single_qubit_tek_seq_elts as sq
# import logging
# import numpy as np
# from copy import deepcopy,copy
# import qcodes as qc
# from qcodes.instrument.base import Instrument
# from qcodes.utils import validators as vals
# from qcodes.instrument.parameter import ManualParameter
# from pycqed.instrument_drivers.pq_parameters import InstrumentParameter
# class AMI_Magnet_with_PCS_SN14768(Instrument):
# '''
# Instrument used for translating fields into current settings
# and controlling the persistent current switch.
# '''
# def __init__(self, name,
# Current_source_magnet_name,
# Current_source_heater_name,**kw): # IVVI.dac1
# super().__init__(name, **kw)
# self.protection_state=False
# # Set instrumentss
# self.add_parameter('I_magnet', parameter_class=InstrumentParameter)
# self.I_magnet = Current_source_magnet_name
# self.add_parameter('I_heater', parameter_class=InstrumentParameter)
# self.I_heater = Current_source_heater_name
# # Specifications of One Axis AMI magnet with PCS_14768
# self.Max_Current = 5.0 # Amperes
# self.Field_to_Current = 5e-2 # Telsa/Ampere
# # (Spec sheet says 0.500 kG/A = 50 mT/A)
# self.Max_Field = self.Max_Current*self.Field_to_Current # Tesla
# #(Spec sheet says 20 kG = 2.0 T)
# # self.Ramp_Rate = 0.05 # Ampere/Second
# self.Max_Ramp_Rate = 0.2 # Max ramp rate: 1.32 # Ampere/Second
# self.Init_Ramp_Rate = 0.025
# self.Charging_Voltage = 1.0 # Volt
# self.Inductance = 0.7 # Henry
# self.Persistent_Switch_Heater_Current = 21e-3 # Ampere (21 mA)
# self.Heater_mVolt_to_Current = 0.020/1e3 # A/mV (20 mA per 1000 mV)
# self.Persistent_Switch_Heater_Nominal_Resistance = 82 # Ohms
# #(Measured at room temperature, thus normal conducing.)
# self.Magnet_Resistanc_in_Parallel_with_Switch = 35 # Ohms
# #(Measured at room temperature, thus normal conducting.)
# self.add_parameter('Source_Current',
# get_cmd=self.get_source_current,
# set_cmd=self.set_source_current,
# label='Source Current',
# unit='A',
# vals=vals.Numbers(min_value=0.,max_value=self.Max_Current),
# docstring='Current supplied to the magnet')
# self.add_parameter('Ramp_Rate',
# label='Ramp Rate',
# unit='A/s',
# initial_value=self.Init_Ramp_Rate,
# get_parser=float,
# vals=vals.Numbers(min_value=0.,max_value=self.Max_Ramp_Rate),
# parameter_class=ManualParameter,
# docstring='Ramp Rate of the magnet current source')
# self.add_parameter('Persistent_Field',
# label='Persistent Field',
# unit='T',
# initial_value=0.,
# get_parser=float,
# vals=vals.Numbers(min_value=0.,max_value=self.Max_Field),
# parameter_class=ManualParameter,
# docstring='Ramp Rate of the magnet current source')
# # It would be great if the variable Persistent_Field could only be
# # set by the program, not by the used. It should only serve as a memory
# # of the previous persistent field.
# self.add_parameter('Switch_State',
# get_cmd=self.get_switch_state,
# set_cmd=self.set_switch_state,
# label='Switch State',
# unit='',
# vals=vals.Enum('SuperConducting','NormalConducting'),
# docstring='Indicating whether the persistent current\
# switch is superconducting or normal conducting')
# self.add_parameter('Field',
# get_cmd=self.get_field,
# set_cmd=self.set_field,
# label='Field',
# unit='T',
# initial_value=0.,
# get_parser=float,
# # initial_value=0,
# vals=vals.Numbers(min_value=0.,max_value=self.Max_Field),
# docstring='Magnetic field')
# self.protection_state=True
# '''
# You need to heat the persistent current switch to turn it from
# a superconductor into a normal conductor. When the persistent \
# current switch is superconducting there is a persistent current,
# when it is normal conducting there is no persistent current and
# you can controll the current with the current source.
# !! Thus it is important to heat the persistent current switch if
# you want to change the field !!
# !! Also important that when you want to switch off the persistent
# current that you provide the same current with the current source
# on the leads !! BEFORE !! you heat the persistent current switch !!
# '''
# def BtoI(self, magfield):
# MagCurrRatio = self.Field_to_Current # Tesla/Ampere
# I = magfield/MagCurrRatio
# return I
# def ItoB(self, current):
# MagCurrRatio = self.Field_to_Current # Tesla/Ampere
# B = current*MagCurrRatio
# return B
# def get_switch_state(self):
# heater_current = self.get_heater_current()
# if 1.05*self.Persistent_Switch_Heater_Current>heater_current\
# >0.95*self.Persistent_Switch_Heater_Current:
# return 'NormalConducting'
# elif 0.05*self.Persistent_Switch_Heater_Current>heater_current\
# >-0.05*self.Persistent_Switch_Heater_Current:
# return 'SuperConducting'
# else:
# raise ValueError('Switch is not in a well defined state!')
# def set_switch_state(self,desired_state):
# if desired_state == 'SuperConducting' and\
# self.get_switch_state() == 'SuperConducting':
# print('Already SuperConducting')
# return 'SuperConducting'
# elif desired_state == 'NormalConducting' and\
# self.get_switch_state() == 'NormalConducting':
# print('Already NormalConducting')
# return 'NormalConducting'
# elif desired_state == 'SuperConducting' and\
# self.get_switch_state() == 'NormalConducting':
# print('Ramping current down...')
# self.I_heater(0)
# print('Wait 2 minutes to cool the switch.')
# time.sleep(120) # 120
# print('Switch is now SuperConducting')
# return 'SuperConducting'
# elif desired_state == 'NormalConducting' and\
# self.get_switch_state() == 'SuperConducting':
# if self.I_magnet.measureR() > 1.:
# raise ValueError('Magnet leads not connected!')
# else:
# supplied_current = self.get_source_current()
# if self.Field()==None:
# print('Sourcing current...')
# self.I_heater(self.Persistent_Switch_Heater_Current\
# /self.Heater_mVolt_to_Current)
# print('Wait 30 seconds to heat up the switch.')
# time.sleep(30) # 30
# print('Switch is now NormalConducting')
# return 'NormalConducting'
# elif supplied_current<2e-3 and np.abs(self.Field())<1e-4:
# print('Sourcing current...')
# self.I_heater(self.Persistent_Switch_Heater_Current\
# /self.Heater_mVolt_to_Current)
# print('Wait 30 seconds to heat up the switch.')
# time.sleep(30) # 30
# print('Switch is now NormalConducting')
# return 'NormalConducting'
# elif not 0.98*supplied_current<self.BtoI(self.Field())\
# <1.02*supplied_current:
# raise ValueError('Current is not \
# according to the field value! Use \
# bring_source_to_field function to \
# bring it to the correct value.')
# else:
# print('Sourcing current...')
# self.I_heater(self.Persistent_Switch_Heater_Current\
# /self.Heater_mVolt_to_Current)
# print('Wait 30 seconds to heat up the switch.')
# time.sleep(30) # 30
# print('Switch is now NormalConducting')
# return 'NormalConducting'
# else:
# return 'Input SuperConducting or NormalConducting as desired state.'
# def get_source_current(self):
# print('Shit be workin yo')
# return self.I_magnet.measurei()
# def set_source_current(self,current):
# self.I_magnet.seti(current)
# return 'Current set to '+str(current)+' A'
# def get_heater_current(self):
# return self.Heater_mVolt_to_Current*self.I_heater()
# def measure_field(self):
# if self.I_magnet is not None:
# I = self.get_source_current()
# B = self.ItoB(I)
# return B
# else:
# print('no I_magnet')
# def set_field(self,field):
# if not self.protection_state:
# return 0.
# if self.Switch_State() == 'SuperConducting':
# raise ValueError('Switch is SuperConducting. Can not change the field.')
# elif self.Switch_State() =='NormalConducting':
# if self.I_magnet.measureR()>1:
# raise ValueError('Magnet leads are not connected \
# or manget quenched!')
# else:
# self.step_magfield_to_value(field)
# self.Persistent_Field(field)
# return 'Field at ' +str(field)+' T'
# def get_field(self):
# if self.Switch_State()=='SuperConducting':
# return self.Persistent_Field()
# else:
# meas_field = self.measure_field()
# return meas_field
# def disconnect_source(self):
# if self.Switch_State() == 'SuperConducting':
# self.step_magfield_to_value(0)
# else:
# raise ValueError('Switch is not superconducting!')
# def bring_source_to_field(self):
# if not self.Switch_State() == 'SuperConducting':
# raise ValueError('Switch is not superconducting!')
# if self.I_magnet.measureR() > 1.:
# raise ValueError('Magnet leads not connected!')
# target_field = self.Persistent_Field()
# self.step_magfield_to_value(target_field)
# def step_magfield_to_value(self, field):
# MagCurrRatio = self.Field_to_Current # Tesla/Ampere
# Ramp_rate_I = self.Ramp_Rate()
# step_time = 0.01 # in seconds
# current_step = Ramp_rate_I * step_time
# I_now = self.get_source_current()
# current_target = self.BtoI(field)
# if current_target >= I_now:
# current_step *= +1
# if current_target < I_now:
# current_step *= -1
# num_steps = int(1.*(current_target-I_now)/(current_step))
# sweep_time = step_time*num_steps
# print('Sweep time is '+str(np.abs(sweep_time))+' seconds')
# for tt in range(num_steps):
# time.sleep(step_time)
# self.I_magnet.seti(I_now)
# I_now += current_step
# if self.I_magnet.measureR() > 1:
# self.I_magnet.seti(0)
# raise ValueError('Switch is not in a well defined state!')
# self.I_magnet.seti(self.BtoI(field))
| {
"repo_name": "DiCarloLab-Delft/PycQED_py3",
"path": "pycqed/instrument_drivers/meta_instrument/AMI_Magnet_PCS_SN14768.py",
"copies": "1",
"size": "29547",
"license": "mit",
"hash": -4690453351410722000,
"line_mean": 40.4403927069,
"line_max": 105,
"alpha_frac": 0.5708870613,
"autogenerated": false,
"ratio": 3.869941060903733,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4940828122203733,
"avg_score": null,
"num_lines": null
} |
# American Magnetics, Inc. (AMI) Two Axis magnet with two PCS, SN14769
import time
import logging
import numpy as np
# from scipy.optimize import brent
# from math import gcd
# from qcodes import Instrument
from qcodes.utils import validators as vals
# from qcodes.instrument.parameter import ManualParameter
from pycqed.analysis import analysis_toolbox as atools
# from pycqed.utilities.general import add_suffix_to_dict_keys
from pycqed.measurement import detector_functions as det
# from pycqed.measurement import composite_detector_functions as cdet
# from pycqed.measurement import mc_parameter_wrapper as pw
from pycqed.measurement import sweep_functions as swf
# from pycqed.measurement import awg_sweep_functions as awg_swf
# from pycqed.analysis import measurement_analysis as ma
# from pycqed.measurement.calibration_toolbox import mixer_carrier_cancellation_5014
# from pycqed.measurement.calibration_toolbox import mixer_carrier_cancellation_UHFQC
# from pycqed.measurement.calibration_toolbox import mixer_skewness_calibration_5014
# from pycqed.measurement.optimization import nelder_mead
from pycqed.analysis import analysis_toolbox as a_tools
# import pycqed.measurement.pulse_sequences.single_qubit_tek_seq_elts as sq
import logging
from copy import deepcopy,copy
import qcodes as qc
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import ManualParameter
from pycqed.instrument_drivers.pq_parameters import InstrumentParameter
class AMI_Two_Axis_Magnet_PCS_SN14769(Instrument):
'''
Instrument used for translating Fields into current settings
and controlling the persistent current switch.
Initialization when the previous measurement did not have the magnet is
a bit awkward. The driver checks the last measurement for the value and if
this does not exists it fails. To do the first initialization it is necessary
to start everything up while having the 'get_field' function return 0 always.
Make a two fake folders with the name
'Switch_Z_is_changed_to_SuperConducting_state' and
'Switch_Y_is_changed_to_SuperConducting_state'.
Then exit and reinitialize with the
'get_field' function returning what it is supposed to.
Coordinate system: Righthanded, Z is the strong axis, Y the weak.
Idea is that 'field' and 'angle' are the two variables that are controlled
by the user. The two underlying variables are field_z and field_y, with the
corresponding currents and switch states.
'''
def __init__(self, name,
Current_source_magnet_Z_name,
Current_source_heater_Z_name,
Current_source_magnet_Y_name,
Current_source_heater_Y_name,
MC_inst,**kw): # IVVI.dac1
super().__init__(name, **kw)
self.protection_state=False
# Set instrumentss
self.add_parameter('i_magnet_z', parameter_class=InstrumentParameter)
self.i_magnet_z = Current_source_magnet_Z_name
self.add_parameter('i_heater_z', parameter_class=InstrumentParameter)
self.i_heater_z = Current_source_heater_Z_name
self.add_parameter('i_magnet_y', parameter_class=InstrumentParameter)
self.i_magnet_y = Current_source_magnet_Y_name
self.add_parameter('i_heater_y', parameter_class=InstrumentParameter)
self.i_heater_y = Current_source_heater_Y_name
self.MC = MC_inst
# Specifications of One Axis AMI magnet with PCS_14768
self.max_current_z = 10.0 # 5.0 # Amperes
self.max_current_y = 10.0 # Amperes
self.field_to_current_z = 0.0496 # Telsa/Ampere
# (Spec sheet says 0.496 kG/A = 49.6 mT/A)
self.field_to_current_y = 0.0132 # Telsa/Ampere
# (Spec sheet says 0.132 kG/A = 13.2 mT/A)
self.max_field_z = self.max_current_z*self.field_to_current_z # Tesla
#(Spec sheet says 20 kG = 2.0 T)
self.max_field_y = self.max_current_y*self.field_to_current_y # Tesla
#(Spec sheet says 5 kG = 0.5 T)
# self.Ramp_Rate = 0.05 # Ampere/Second
self.step_grid_points_mag = 1e-3 # zig-zag step size in mT
self.max_ramp_rate_z = 0.1 # Max ramp rate: 0.677 # Ampere/Second
self.max_ramp_rate_y = 0.1 # Max ramp rate: 0.054 # Ampere/Second
self.init_ramp_rate_z = 0.025
self.init_ramp_rate_y = 0.1
self.charging_voltage_z = 0.5 # Volt
self.charging_voltage_y = 0.1 # Volt
self.inductance_z = 0.7 # Henry
self.inductance_y = 1.8 # Henry
self.persistent_switch_heater_current_z = 19e-3 # Ampere (19 mA)
self.persistent_switch_heater_current_y = 19.7e-3 # Ampere (19.7 mA)
self.heater_mvolt_to_current_z = 0.020*1e-3 # A/mV (20 mA per 1000 mV)
self.heater_mvolt_to_current_y = 0.020*1e-3 # A/mV (20 mA per 1000 mV)
self.persistent_switch_heater_nominal_resistance_z = 78 # Ohms
#(Measured at room temperature, thus normal conducing.)
self.persistent_switch_heater_nominal_resistance_y = 81 # Ohms
#(Measured at room temperature, thus normal conducing.)
self.magnet_resistanc_in_parallel_with_switch_z = 36 # Ohms
#(Measured at room temperature, thus normal conducting.)
self.magnet_resistanc_in_parallel_with_switch_y = 37 # Ohms
#(Measured at room temperature, thus normal conducting.)
self.add_parameter('source_current_z',
get_cmd=self.get_source_current_z,
set_cmd=self.set_source_current_z,
label='Source Current Z',
unit='A',
vals=vals.Numbers(min_value=0.,max_value=self.max_current_z),
docstring='Current supplied to the Z-axis of the magnet')
self.add_parameter('source_current_y',
get_cmd=self.get_source_current_y,
set_cmd=self.set_source_current_y,
label='Source Current Y',
unit='A',
vals=vals.Numbers(min_value=0.,max_value=self.max_current_y),
docstring='Current supplied to the Y-axis of the magnet')
#ramp rate should not be a parameter
self.add_parameter('ramp_rate_z',
label='Ramp Rate Z',
unit='A/s',
initial_value=self.init_ramp_rate_z,
get_parser=float,
vals=vals.Numbers(min_value=0.,max_value=self.max_ramp_rate_z),
parameter_class=ManualParameter,
docstring='Ramp Rate of the Z-axis magnet current source')
self.add_parameter('ramp_rate_y',
label='Ramp Rate Y',
unit='A/s',
initial_value=self.init_ramp_rate_y,
get_parser=float,
vals=vals.Numbers(min_value=0.,max_value=self.max_ramp_rate_y),
parameter_class=ManualParameter,
docstring='Ramp Rate of the Y-axis magnet current source')
self.add_parameter('field',
get_cmd=self.get_field,
set_cmd=self.set_field,
label='Persistent Field',
unit='T',
# vals=vals.Numbers(min_value=0.,max_value=min(self.max_field_y,self.max_field_z)),
vals=vals.Numbers(min_value=0.,max_value=max(self.max_field_y,self.max_field_z)),
docstring='Persistent absolute magnetic field')
self.add_parameter('angle',
get_cmd=self.get_angle,
set_cmd=self.set_angle,
label='Field angle',
unit='deg',
vals=vals.Numbers(min_value=-180.,max_value=180.),
docstring='Angle of the field wrt. Z-axis')
self.add_parameter('field_z',
get_cmd=self.get_field_z,
set_cmd=self.set_field_z,
label='Z-Field',
unit='T',
vals=vals.Numbers(min_value=0.,max_value=self.max_field_z),
docstring='Persistent Z-magnetic field')
self.add_parameter('field_y',
get_cmd=self.get_field_y,
set_cmd=self.set_field_y,
label='Y-Field',
unit='T',
vals=vals.Numbers(min_value=-self.max_field_y,max_value=self.max_field_y),
docstring='Persistent Y-magnetic field')
# It would be great if the variable field could only be
# set by the program, not by the user. It should only serve as a memory
# of the previous persistent field.
self.add_parameter('switch_state_z',
get_cmd=self.get_switch_state_z,
set_cmd=self.set_switch_state_z,
label='Switch State Z',
unit='',
vals=vals.Enum('SuperConducting','NormalConducting'),
docstring='Indicating whether the Z-Axis persistent current\
switch is superconducting or normal conducting')
self.add_parameter('switch_state_y',
get_cmd=self.get_switch_state_y,
set_cmd=self.set_switch_state_y,
label='Switch State Y',
unit='',
vals=vals.Enum('SuperConducting','NormalConducting'),
docstring='Indicating whether the Y-Axis persistent current\
switch is superconducting or normal conducting')
self.protection_state=True
self.get_all()
'''
You need to heat the persistent current switch to turn it from
a superconductor into a normal conductor. When the persistent \
current switch is superconducting there is a persistent current,
when it is normal conducting there is no persistent current and
you can controll the current with the current source.
!! Thus it is important to heat the persistent current switch if
you want to change the field !!
!! Also important that when you want to switch off the persistent
current that you provide the same current with the current source
on the leads !! BEFORE !! you heat the persistent current switch !!
'''
def get_all(self):
self.get_source_current_z()
self.switch_state_z()
self.get_source_current_y()
self.switch_state_y()
self.get_field()
self.get_angle()
self.get_field_z()
self.get_field_y()
return
def get_source_current_z(self):
return self.i_magnet_z.measurei()
def get_source_current_y(self):
return self.i_magnet_y.measurei()
def set_source_current_z(self,current):
self.i_magnet_z.seti(current)
return 'Z-current set to '+str(current)+' A'
def set_source_current_y(self,current):
self.i_magnet_y.seti(current)
return 'Y-current set to '+str(current)+' A'
def get_heater_current_z(self):
return self.heater_mvolt_to_current_z*self.i_heater_z()
def get_heater_current_y(self):
return self.heater_mvolt_to_current_y*self.i_heater_y()
def get_switch_state_z(self):
heater_current = self.get_heater_current_z()
if 1.05*self.persistent_switch_heater_current_z>heater_current\
>0.95*self.persistent_switch_heater_current_z:
return 'NormalConducting'
elif 0.05*self.persistent_switch_heater_current_z>heater_current\
>-0.05*self.persistent_switch_heater_current_z:
return 'SuperConducting'
else:
raise ValueError('Switch is not in a well defined state!')
def get_switch_state_y(self):
heater_current = self.get_heater_current_y()
if 1.05*self.persistent_switch_heater_current_y>heater_current\
>0.95*self.persistent_switch_heater_current_y:
return 'NormalConducting'
elif 0.05*self.persistent_switch_heater_current_y>heater_current\
>-0.05*self.persistent_switch_heater_current_y:
return 'SuperConducting'
else:
raise ValueError('Switch is not in a well defined state!')
def set_switch_state_z(self,desired_state):
if desired_state == 'SuperConducting' and\
self.get_switch_state_z() == 'SuperConducting':
print('Already SuperConducting')
return 'SuperConducting'
elif desired_state == 'NormalConducting' and\
self.get_switch_state_z() == 'NormalConducting':
print('Already NormalConducting')
return 'NormalConducting'
elif desired_state == 'SuperConducting' and\
self.get_switch_state_z() == 'NormalConducting':
print('Ramping current down...')
self.i_heater_z(0)
print('Wait 2 minutes to cool the switch.')
time.sleep(120) # 120
print('Switch is now SuperConducting')
self.fake_folder(folder_name='Switch_Z_is_changed_to_SuperConducting_state')
return 'SuperConducting'
elif desired_state == 'NormalConducting' and\
self.get_switch_state_z() == 'SuperConducting':
if self.i_magnet_z.measureR() > 1.:
raise ValueError('Magnet leads not connected!')
else:
supplied_current_z = self.get_source_current_z()
if self.get_field_z()==None:
print('Sourcing current...')
self.i_heater_z(self.persistent_switch_heater_current_z\
/self.heater_mvolt_to_current_z)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_Z_is_changed_to_NormalConducting_state')
return 'NormalConducting'
elif supplied_current_z<2e-3 and np.abs(self.get_field_z())<1e-4:
print('Sourcing current...')
self.i_heater_z(self.persistent_switch_heater_current_z\
/self.heater_mvolt_to_current_z)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_Z_is_changed_to_NormalConducting_state')
return 'NormalConducting'
elif not 0.98*supplied_current_z<self.BtoI_z(self.get_field_z())\
<1.02*supplied_current_z:
raise ValueError('Current is not \
according to the Z-field value! Use \
bring_source_to_field function to \
bring it to the correct value.')
else:
print('Sourcing current...')
self.i_heater_z(self.persistent_switch_heater_current_z\
/self.heater_mvolt_to_current_z)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_Z_is_changed_to_NormalConducting_state')
return 'NormalConducting'
else:
return 'Input SuperConducting or NormalConducting as desired state.'
def set_switch_state_y(self,desired_state):
if desired_state == 'SuperConducting' and\
self.get_switch_state_y() == 'SuperConducting':
print('Already SuperConducting')
return 'SuperConducting'
elif desired_state == 'NormalConducting' and\
self.get_switch_state_y() == 'NormalConducting':
print('Already NormalConducting')
return 'NormalConducting'
elif desired_state == 'SuperConducting' and\
self.get_switch_state_y() == 'NormalConducting':
print('Ramping current down...')
self.i_heater_y(0)
print('Wait 2 minutes to cool the switch.')
time.sleep(120) # 120
print('Switch is now SuperConducting')
self.fake_folder(folder_name='Switch_Y_is_changed_to_SuperConducting_state')
return 'SuperConducting'
elif desired_state == 'NormalConducting' and\
self.get_switch_state_y() == 'SuperConducting':
if self.i_magnet_y.measureR() > 1.:
raise ValueError('Magnet leads not connected!')
else:
supplied_current_y = self.get_source_current_y()
if self.get_field_y()==None:
print('Sourcing current...')
self.i_heater_y(self.persistent_switch_heater_current_y\
/self.heater_mvolt_to_current_y)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_Y_is_changed_to_NormalConducting_state')
return 'NormalConducting'
elif supplied_current_y<2e-3 and np.abs(self.get_field_y())<1e-4:
print('Sourcing current...')
self.i_heater_y(self.persistent_switch_heater_current_y\
/self.heater_mvolt_to_current_y)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_Y_is_changed_to_NormalConducting_state')
return 'NormalConducting'
elif not 0.98*supplied_current_y<self.BtoI_y(self.get_field_y())\
<1.02*supplied_current_y:
raise ValueError('Current is not \
according to the Y-field value! Use \
bring_source_to_field function to \
bring it to the correct value.')
else:
print('Sourcing current...')
self.i_heater_y(self.persistent_switch_heater_current_y\
/self.heater_mvolt_to_current_y)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_Y_is_changed_to_NormalConducting_state')
return 'NormalConducting'
else:
return 'Input SuperConducting or NormalConducting as desired state.'
def set_field(self,field):
if not self.protection_state:
return 0.
angle = self.angle()
desired_z_field_total = field*np.cos(angle*2.*np.pi/360)
desired_y_field_total = field*np.sin(angle*2.*np.pi/360)
current_field = self.field()
current_z_field = self.field_z()
current_y_field = self.field_y()
step_mag_z = self.step_grid_points_mag*np.cos(angle*2.*np.pi/360)
step_mag_y = self.step_grid_points_mag*np.sin(angle*2.*np.pi/360)
if field>=current_field:
step_z = +1*step_mag_z
step_y = +1*step_mag_y
if field<current_field :
step_z = -1*step_mag_z
step_y = -1*step_mag_y
num_steps = int(np.ceil(np.abs(current_field-field)/self.step_grid_points_mag))
for tt in range(num_steps):
if tt == num_steps-1:
current_z_field = desired_z_field_total
current_y_field = desired_y_field_total
else:
current_z_field += step_z
current_y_field += step_y
# if current_y_field == 0. and self.switch_state_z() == 'SuperConducting':
# raise ValueError('Switch_Z is SuperConducting. Can not change the field.')
# elif current_z_field == 0. and self.switch_state_y() == 'SuperConducting':
# raise ValueError('Switch_Y is SuperConducting. Can not change the field.')
if self.switch_state_z() == 'SuperConducting' or self.switch_state_y() == 'SuperConducting':
raise ValueError('Switch_Y and/or Switch_Z are SuperConducting. Can not change the field.')
elif self.switch_state_z() =='NormalConducting' and self.switch_state_y() =='NormalConducting':
if self.i_magnet_z.measurei()<1e-3:
self.step_z_magfield_to_value(current_z_field)
# self.field(field)
# field_folder_name = 'Changed_Z_field_to_' + str(current_z_field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
# return 'field at ' +str(field)+' T'
elif self.i_magnet_z.measureR()>1:
raise ValueError('Magnet Z leads are not connected \
or manget quenched!')
else:
self.step_z_magfield_to_value(current_z_field)
# self.field(field)
# field_folder_name = 'Changed_field_to_' + str(field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
# return 'field at ' +str(field)+' T'
if np.abs(self.i_magnet_y.measurei())<1e-3:
self.step_y_magfield_to_value(current_y_field)
# self.field(field)
# field_folder_name = 'Changed_y_field_to_' + str(current_y_field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
# return 'field at ' +str(field)+' T'
elif self.i_magnet_y.measureR()>1:
raise ValueError('Magnet Y leads are not connected \
or manget quenched!')
else:
self.step_y_magfield_to_value(current_y_field)
# self.field(field)
# field_folder_name = 'Changed_field_to_' + str(field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
# return 'field at ' +str(field)+' T'
# field_folder_name = 'Changed_field_to_' + str(field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
print('Field at ' +str(self.field())+' T, angle at '+str(self.angle())+' deg.')
return 0.
def get_field(self):
# return 0.0 # Only add this line when doing the first initialization!
if self.switch_state_z()=='SuperConducting':
## get the persistent field from the HDF5 file
timestamp = atools.latest_data(contains='Switch_Z_is_changed_to_SuperConducting_state',
return_timestamp=True)[0]
params_dict = {'field_z':'Magnet.field_z'}
numeric_params = ['field_z']
data = a_tools.get_data_from_timestamp_list([timestamp], params_dict,
numeric_params=numeric_params, filter_no_analysis=False)
field_val_z = data['field_z'][0]
else: ## Normal conducting
field_val_z = self.measure_field_z()
if self.switch_state_y()=='SuperConducting':
## get the persistent field from the HDF5 file
timestamp = atools.latest_data(contains='Switch_Y_is_changed_to_SuperConducting_state',
return_timestamp=True)[0]
params_dict = {'field_y':'Magnet.field_y'}
numeric_params = ['field_y']
data = a_tools.get_data_from_timestamp_list([timestamp], params_dict,
numeric_params=numeric_params, filter_no_analysis=False)
field_val_y = data['field_y'][0]
else: ## Normal conducting
field_val_y = self.measure_field_y()
return np.sqrt(field_val_z**2+field_val_y**2)
def set_angle(self,angle):
if not self.protection_state:
return 0.
field = self.field()
desired_z_field = field*np.cos(angle*2.*np.pi/360)
desired_y_field = field*np.sin(angle*2.*np.pi/360)
if self.switch_state_z() == 'SuperConducting' or self.switch_state_y() == 'SuperConducting':
raise ValueError('Switch is SuperConducting. Can not change the field.')
elif self.switch_state_z() =='NormalConducting' and self.switch_state_y() =='NormalConducting':
if self.i_magnet_z.measurei()<1e-3:
self.field_z(desired_z_field)
# self.field(field)
# field_folder_name = 'Changed_Z_field_to_' + str(desired_z_field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
# return 'field at ' +str(field)+' T'
elif self.i_magnet_z.measureR()>1:
raise ValueError('Magnet Z leads are not connected \
or manget quenched!')
else:
self.field_z(desired_z_field)
# self.field(field)
# field_folder_name = 'Changed_field_to_' + str(field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
# return 'field at ' +str(field)+' T'
if np.abs(self.i_magnet_y.measurei())<1e-3:
self.field_y(desired_y_field)
# self.field(field)
# field_folder_name = 'Changed_y_field_to_' + str(desired_y_field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
# return 'field at ' +str(field)+' T'
elif self.i_magnet_y.measureR()>1:
raise ValueError('Magnet Y leads are not connected \
or manget quenched!')
else:
self.field_y(desired_y_field)
# self.field(field)
# field_folder_name = 'Changed_field_to_' + str(field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
# return 'field at ' +str(field)+' T'
# field_folder_name = 'Changed_angle_to_' + str(angle) + '_deg'
# self.fake_folder(folder_name=field_folder_name)
return 'Field at ' +str(field)+' T, angle at '+str(angle)+' deg.'
def get_angle(self):
# return 0.0 # Only add this line when doing the first initialization!
if self.switch_state_z()=='SuperConducting':
## get the persistent field from the HDF5 file
timestamp = atools.latest_data(contains='Switch_Z_is_changed_to_SuperConducting_state',
return_timestamp=True)[0]
params_dict = {'field_z':'Magnet.field_z'}
numeric_params = ['field_z']
data = a_tools.get_data_from_timestamp_list([timestamp], params_dict,
numeric_params=numeric_params, filter_no_analysis=False)
field_val_z = data['field_z'][0]
else: ## Normal conducting
field_val_z = self.measure_field_z()
if self.switch_state_y()=='SuperConducting':
## get the persistent field from the HDF5 file
timestamp = atools.latest_data(contains='Switch_Y_is_changed_to_SuperConducting_state',
return_timestamp=True)[0]
params_dict = {'field_y':'Magnet.field_y'}
numeric_params = ['field_y']
data = a_tools.get_data_from_timestamp_list([timestamp], params_dict,
numeric_params=numeric_params, filter_no_analysis=False)
field_val_y = data['field_y'][0]
else: ## Normal conducting
field_val_y = self.measure_field_y()
field = np.sqrt(field_val_z**2+field_val_y**2)
if field==0:
return 0
elif field_val_z>=0:
return np.arcsin(field_val_y/field)*360./(2*np.pi)
else:
return -np.arcsin(field_val_y/field)*360./(2*np.pi)
def set_field_z(self,field):
if not self.protection_state:
return 0.
if self.switch_state_z() == 'SuperConducting':
raise ValueError('Z-Switch is SuperConducting. Can not change the field.')
elif self.switch_state_z() =='NormalConducting':
if self.i_magnet_z.measurei()<1e-3:
self.step_z_magfield_to_value(field)
# self.field(field)
# field_folder_name = 'Changed_field_to_' + str(field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
return 'Z-field at ' +str(field)+' T'
elif self.i_magnet_z.measureR()>1:
raise ValueError('Magnet leads are not connected \
or manget quenched!')
else:
self.step_z_magfield_to_value(field)
# self.field(field)
# field_folder_name = 'Changed_field_to_' + str(field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
return 'Z-field at ' +str(field)+' T'
def set_field_y(self,field):
if not self.protection_state:
return 0.
if self.switch_state_y() == 'SuperConducting':
raise ValueError('Y-Switch is SuperConducting. Can not change the field.')
elif self.switch_state_y() =='NormalConducting':
if np.abs(self.i_magnet_y.measurei())<1e-3:
self.step_y_magfield_to_value(field)
# self.field(field)
# field_folder_name = 'Changed_field_to_' + str(field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
return 'Y-field at ' +str(field)+' T'
elif self.i_magnet_y.measureR()>1:
raise ValueError('Magnet leads are not connected \
or manget quenched!')
else:
self.step_y_magfield_to_value(field)
# self.field(field)
# field_folder_name = 'Changed_field_to_' + str(field) + '_T'
# self.fake_folder(folder_name=field_folder_name)
return 'Y-field at ' +str(field)+' T'
def get_field_z(self):
# return 0.0 # Only add this line when doing the first initialization!
if self.switch_state_z()=='SuperConducting':
## get the persistent field from the HDF5 file
timestamp = atools.latest_data(contains='Switch_Z_is_changed_to_SuperConducting_state',
return_timestamp=True)[0]
params_dict = {'field':'Magnet.field_z'}
numeric_params = ['field']
data = a_tools.get_data_from_timestamp_list([timestamp], params_dict,
numeric_params=numeric_params, filter_no_analysis=False)
return data['field'][0]
else: ## Normal conducting
meas_field = self.measure_field_z()
return meas_field
def get_field_y(self):
# return 0.0 # Only add this line when doing the first initialization!
if self.switch_state_y()=='SuperConducting':
## get the persistent field from the HDF5 file
timestamp = atools.latest_data(contains='Switch_Y_is_changed_to_SuperConducting_state',
return_timestamp=True)[0]
params_dict = {'field':'Magnet.field_y'}
numeric_params = ['field']
data = a_tools.get_data_from_timestamp_list([timestamp], params_dict,
numeric_params=numeric_params, filter_no_analysis=False)
return data['field'][0]
else: ## Normal conducting
meas_field = self.measure_field_y()
return meas_field
def step_z_magfield_to_value(self, field):
MagCurrRatio = self.field_to_current_z # Tesla/Ampere
Ramp_rate_I = self.ramp_rate_z()
step_time = 0.01 # in seconds
current_step = Ramp_rate_I * step_time
I_now = self.get_source_current_z()
current_target = self.BtoI_z(field)
if current_target >= I_now:
current_step *= +1
if current_target < I_now:
current_step *= -1
num_steps = int(1.*(current_target-I_now)/(current_step))
sweep_time = step_time*num_steps
print('Sweep time is '+str(np.abs(sweep_time))+' seconds')
for tt in range(num_steps):
time.sleep(step_time)
self.i_magnet_z.seti(I_now)
I_now += current_step
if self.i_magnet_z.measureR() > 1:
if not self.field_z()<1e-3:
self.i_magnet_z.seti(0)
raise ValueError('Switch is not in a well defined state!')
self.i_magnet_z.seti(self.BtoI_z(field))
self.source_current_z()
def step_y_magfield_to_value(self, field):
MagCurrRatio = self.field_to_current_y # Tesla/Ampere
Ramp_rate_I = self.ramp_rate_y()
step_time = 0.05 # in seconds
current_step = Ramp_rate_I * step_time
I_now = self.get_source_current_y()
current_target = self.BtoI_y(field)
if current_target >= I_now:
current_step *= +1
if current_target < I_now:
current_step *= -1
num_steps = int(1.*(current_target-I_now)/(current_step))
sweep_time = step_time*num_steps
print('Sweep time is '+str(np.abs(sweep_time)*3)+' seconds')
for tt in range(num_steps):
time.sleep(step_time)
self.i_magnet_y.seti(I_now)
I_now += current_step
if self.i_magnet_y.measureR() > 1:
if not self.field_y()<1e-3:
self.i_magnet_y.seti(0)
raise ValueError('Switch is not in a well defined state!')
self.i_magnet_y.seti(self.BtoI_y(field))
self.source_current_y()
def disconnect_z_source(self):
if self.switch_state_z() == 'SuperConducting':
self.step_z_magfield_to_value(0)
self.fake_folder(folder_name='Ramped_down_current_you_are_able_to_disconnect_the_Z_source_now')
else:
raise ValueError('Switch is not superconducting!')
def disconnect_y_source(self):
if self.switch_state_y() == 'SuperConducting':
self.step_y_magfield_to_value(0)
self.fake_folder(folder_name='Ramped_down_current_you_are_able_to_disconnect_the_Y_source_now')
else:
raise ValueError('Switch is not superconducting!')
def bring_z_source_to_field(self):
if not self.switch_state_z() == 'SuperConducting':
print('Switch Z is normal conducting.')
return
if self.i_magnet_z.measureR() > 1.:
raise ValueError('Magnet leads not connected!')
target_field = self.field_z()
self.step_z_magfield_to_value(target_field)
self.fake_folder(folder_name='Ramped_Z_current_up_to_match_persistent_current')
def bring_y_source_to_field(self):
if not self.switch_state_y() == 'SuperConducting':
print('Switch Y is normal conducting.')
return
if self.i_magnet_y.measureR() > 1.:
raise ValueError('Magnet leads not connected!')
target_field = self.field_y()
self.step_y_magfield_to_value(target_field)
self.fake_folder(folder_name='Ramped_Y_current_up_to_match_persistent_current')
def measure_field_z(self):
if self.i_magnet_z is not None:
I = self.get_source_current_z()
B = self.ItoB_z(I)
return B
else:
print('no i_magnet_z')
def measure_field_y(self):
if self.i_magnet_y is not None:
I = self.get_source_current_y()
B = self.ItoB_y(I)
return B
else:
print('no i_magnet_y')
def BtoI_z(self, magfield):
MagCurrRatio = self.field_to_current_z # Tesla/Ampere
I = magfield/MagCurrRatio
return I
def BtoI_y(self, magfield):
MagCurrRatio = self.field_to_current_y # Tesla/Ampere
I = magfield/MagCurrRatio
return I
def ItoB_z(self, current):
MagCurrRatio = self.field_to_current_z # Tesla/Ampere
B = current*MagCurrRatio
return B
def ItoB_y(self, current):
MagCurrRatio = self.field_to_current_y # Tesla/Ampere
B = current*MagCurrRatio
return B
def fake_folder(self,folder_name):
'''
Give folder_name in the form of a string.
Create a fake folder in the nanowire experiments folder with the desired folder name.
This is usefull to use when magnetic fields change or when you start a new measurement cycle.
Such that you can distinguish the different measurement sets.
'''
if isinstance(folder_name,str):
sweep_pts = np.linspace(0, 10, 3)
self.MC.set_sweep_function(swf.None_Sweep())
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
dat = self.MC.run(folder_name)
else:
raise ValueError('Please enter a string as the folder name!')
| {
"repo_name": "DiCarloLab-Delft/PycQED_py3",
"path": "pycqed/instrument_drivers/meta_instrument/AMI_Two_Axis_Magnet_with_PCS_SN14769.py",
"copies": "1",
"size": "38699",
"license": "mit",
"hash": 7486693195230117000,
"line_mean": 47.6779874214,
"line_max": 110,
"alpha_frac": 0.5567585726,
"autogenerated": false,
"ratio": 3.771097251997661,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9791898633146119,
"avg_score": 0.007191438290308432,
"num_lines": 795
} |
"""American National Election Survey 1996"""
from numpy import log
from statsmodels.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
http://www.electionstudies.org/
The American National Election Studies.
"""
DESCRSHORT = """This data is a subset of the American National Election Studies of 1996."""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of observations - 944
Number of variables - 10
Variables name definitions::
popul - Census place population in 1000s
TVnews - Number of times per week that respondent watches TV news.
PID - Party identification of respondent.
0 - Strong Democrat
1 - Weak Democrat
2 - Independent-Democrat
3 - Independent-Indpendent
4 - Independent-Republican
5 - Weak Republican
6 - Strong Republican
age : Age of respondent.
educ - Education level of respondent
1 - 1-8 grades
2 - Some high school
3 - High school graduate
4 - Some college
5 - College degree
6 - Master's degree
7 - PhD
income - Income of household
1 - None or less than $2,999
2 - $3,000-$4,999
3 - $5,000-$6,999
4 - $7,000-$8,999
5 - $9,000-$9,999
6 - $10,000-$10,999
7 - $11,000-$11,999
8 - $12,000-$12,999
9 - $13,000-$13,999
10 - $14,000-$14.999
11 - $15,000-$16,999
12 - $17,000-$19,999
13 - $20,000-$21,999
14 - $22,000-$24,999
15 - $25,000-$29,999
16 - $30,000-$34,999
17 - $35,000-$39,999
18 - $40,000-$44,999
19 - $45,000-$49,999
20 - $50,000-$59,999
21 - $60,000-$74,999
22 - $75,000-89,999
23 - $90,000-$104,999
24 - $105,000 and over
vote - Expected vote
0 - Clinton
1 - Dole
The following 3 variables all take the values:
1 - Extremely liberal
2 - Liberal
3 - Slightly liberal
4 - Moderate
5 - Slightly conservative
6 - Conservative
7 - Extremely Conservative
selfLR - Respondent's self-reported political leanings from "Left"
to "Right".
ClinLR - Respondents impression of Bill Clinton's political
leanings from "Left" to "Right".
DoleLR - Respondents impression of Bob Dole's political leanings
from "Left" to "Right".
logpopul - log(popul + .1)
"""
def load_pandas():
"""Load the anes96 data and returns a Dataset class.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_pandas(data, endog_idx=5, exog_idx=[10, 2, 6, 7, 8])
def load(as_pandas=None):
"""Load the anes96 data and returns a Dataset class.
Parameters
----------
as_pandas : bool
Flag indicating whether to return pandas DataFrames and Series
or numpy recarrays and arrays. If True, returns pandas.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas)
def _get_data():
data = du.load_csv(__file__, 'anes96.csv', sep=r'\s')
data = du.strip_column_names(data)
data['logpopul'] = log(data['popul'] + .1)
return data.astype(float)
| {
"repo_name": "jseabold/statsmodels",
"path": "statsmodels/datasets/anes96/data.py",
"copies": "4",
"size": "3931",
"license": "bsd-3-clause",
"hash": 5258582150516469000,
"line_mean": 30.448,
"line_max": 91,
"alpha_frac": 0.5108114983,
"autogenerated": false,
"ratio": 3.719016083254494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6229827581554493,
"avg_score": null,
"num_lines": null
} |
"""American National Election Survey 1996"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
http://www.electionstudies.org/
The American National Election Studies.
"""
DESCRSHORT = """This data is a subset of the American National Election Studies of 1996."""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of observations - 944
Number of variables - 10
Variables name definitions::
popul - Census place population in 1000s
TVnews - Number of times per week that respondent watches TV news.
PID - Party identification of respondent.
0 - Strong Democrat
1 - Weak Democrat
2 - Independent-Democrat
3 - Independent-Indpendent
4 - Independent-Republican
5 - Weak Republican
6 - Strong Republican
age : Age of respondent.
educ - Education level of respondent
1 - 1-8 grades
2 - Some high school
3 - High school graduate
4 - Some college
5 - College degree
6 - Master's degree
7 - PhD
income - Income of household
1 - None or less than $2,999
2 - $3,000-$4,999
3 - $5,000-$6,999
4 - $7,000-$8,999
5 - $9,000-$9,999
6 - $10,000-$10,999
7 - $11,000-$11,999
8 - $12,000-$12,999
9 - $13,000-$13,999
10 - $14,000-$14.999
11 - $15,000-$16,999
12 - $17,000-$19,999
13 - $20,000-$21,999
14 - $22,000-$24,999
15 - $25,000-$29,999
16 - $30,000-$34,999
17 - $35,000-$39,999
18 - $40,000-$44,999
19 - $45,000-$49,999
20 - $50,000-$59,999
21 - $60,000-$74,999
22 - $75,000-89,999
23 - $90,000-$104,999
24 - $105,000 and over
vote - Expected vote
0 - Clinton
1 - Dole
The following 3 variables all take the values:
1 - Extremely liberal
2 - Liberal
3 - Slightly liberal
4 - Moderate
5 - Slightly conservative
6 - Conservative
7 - Extremely Conservative
selfLR - Respondent's self-reported political leanings from "Left"
to "Right".
ClinLR - Respondents impression of Bill Clinton's political
leanings from "Left" to "Right".
DoleLR - Respondents impression of Bob Dole's political leanings
from "Left" to "Right".
logpopul - log(popul + .1)
"""
from numpy import recfromtxt, log
import numpy.lib.recfunctions as nprf
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""Load the anes96 data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=5,
exog_idx=[10, 2, 6, 7, 8],
dtype=float)
def load_pandas():
"""Load the anes96 data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=5,
exog_idx=[10, 2, 6, 7, 8],
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
with open(filepath + '/anes96.csv', "rb") as f:
data = recfromtxt(f, delimiter="\t", names=True, dtype=float)
logpopul = log(data['popul'] + .1)
data = nprf.append_fields(data, 'logpopul', logpopul, usemask=False,
asrecarray=True)
return data
| {
"repo_name": "yl565/statsmodels",
"path": "statsmodels/datasets/anes96/data.py",
"copies": "5",
"size": "4239",
"license": "bsd-3-clause",
"hash": 7992474584893189000,
"line_mean": 32.6428571429,
"line_max": 92,
"alpha_frac": 0.4949280491,
"autogenerated": false,
"ratio": 3.791592128801431,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6786520177901431,
"avg_score": null,
"num_lines": null
} |
A message containing letters from A-Z is being encoded to numbers using the following mapping:
'A' -> 1
'B' -> 2
...
'Z' -> 26
Given an encoded message containing digits, determine the total number of ways to decode it.
For example,
Given encoded message "12", it could be decoded as "AB" (1 2) or "L" (12).
The number of ways decoding "12" is 2.
# Note:
# State: dp[i] means from char 0 to char i-1 how many decode ways
# Initialize: dp[0]=1,dp[1]=1
# Function:
# 1) 当s[i-2:i]这两个字符是10~26但不包括10和20这两个数时,比如21,那么可以有两种编码方式(BA,U),所以dp[i]=dp[i-1]+dp[i-2]
# 2) 当s[i-2:i]等于10或者20时,由于10和20只有一种编码方式,所以dp[i]=dp[i-2]
# 3) 当s[i-2:i]不在以上两个范围时,如09这种,编码方式为0,而31这种,dp[i]=dp[i-1]。
# Result: dp[length]
class Solution:
# @param s, a string
# @return an integer
def numDecodings(self, s):
length = len(s)
if length == 0 or s[0] == '0': return 0
dp = [1,1]
for i in xrange(2,length+1):
if 10 < int(s[i-2:i]) <= 26 and s[i-1] != '0':
dp.append(dp[i-1]+dp[i-2])
elif int(s[i-2:i])==10 or int(s[i-2:i])==20:
dp.append(dp[i-2])
elif s[i-1] != '0':
dp.append(dp[i-1])
else:
return 0
return dp[length]
# test case: '0'--> 0
# '26' --> 2
# '27' --> 1
# '012' --> 0
# Do not need the whole array, only need two elements dp[0] and dp[1]
class Solution:
# @param {string} s
# @return {integer}
def numDecodings(self, s):
lens = len(s)
if lens == 0 or int(s[0]) == 0: return 0
dp = [1, 1]
for i in xrange(2, lens+1):
if int(s[i-2:i]) == 10 or int(s[i-2:i]) == 20:
dp[0], dp[1] = dp[1], dp[0]
#dp.append(dp[i-2])
elif 10 < int(s[i-2:i]) <= 26:
dp[0], dp[1] = dp[1], (dp[0]+dp[1])
#dp.append(dp[i-1]+dp[i-2])
elif s[i-1] != '0':
dp[0], dp[1] = dp[1], dp[1]
#dp.append(dp[i-1])
else:
return 0
return dp[1]
| {
"repo_name": "UmassJin/Leetcode",
"path": "Array/Decode_Ways.py",
"copies": "1",
"size": "2233",
"license": "mit",
"hash": 2531218380236446000,
"line_mean": 30.7384615385,
"line_max": 94,
"alpha_frac": 0.5007270965,
"autogenerated": false,
"ratio": 2.4016298020954596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34023568985954594,
"avg_score": null,
"num_lines": null
} |
# A message containing letters from A-Z is being encoded to numbers using the following mapping:
# 'A' -> 1
# 'B' -> 2
# ...
# 'Z' -> 26
# Given an encoded message containing digits, determine the total number of ways to decode it.
# For example,
# Given encoded message "12", it could be decoded as "AB" (1 2) or "L" (12).
# The number of ways decoding "12" is 2.
class Solution:
# @param s, a string
# @return an integer
def numDecodings(self, s):
if len(s) == 0:
return 0
if len(s) == 1:
return 1
if s =="10" or s=='20':
return 1
if (s[0]=="1" or s[0]=="2") and s[1]=='0':
return self.numDecodings(s[2:])
if s[0]=='0':
return 0
if s[0] =="1" or (s[0]=="2" and int(s[1])<=6):
return self.numDecodings(s[1:]) + self.numDecodings(s[2:])
else:
return self.numDecodings(s[1:])
# s = Solution()
# print s.numDecodings('4757562545844617494555774581341211511296816786586787755257741178599337186486723247528324612117156948')
# print s.numDecodings('1001')
| {
"repo_name": "lijunxyz/leetcode_practice",
"path": "decode_ways_medium/Solution1.py",
"copies": "1",
"size": "1109",
"license": "mit",
"hash": -7180221604527209000,
"line_mean": 29.8055555556,
"line_max": 126,
"alpha_frac": 0.5798016231,
"autogenerated": false,
"ratio": 3.0135869565217392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40933885796217395,
"avg_score": null,
"num_lines": null
} |
# A message containing letters from A-Z is being encoded to numbers using the following mapping way:
# 'A' -> 1
# 'B' -> 2
# ...
# 'Z' -> 26
# Beyond that, now the encoded string can also contain the character '*', which can be treated as one of the numbers from 1 to 9.
# Given the encoded message containing digits and the character '*', return the total number of ways to decode it.
# Also, since the answer may be very large, you should return the output mod 109 + 7.
# Example 1:
# Input: "*"
# Output: 9
# Explanation: The encoded message can be decoded to the string: "A", "B", "C", "D", "E", "F", "G", "H", "I".
# Example 2:
# Input: "1*"
# Output: 9 + 9 = 18
# Note:
# The length of the input string will fit in range [1, 105].
# The input string will only contain the character '*' and digits '0' - '9'.
# So many case need to consider about
# http://www.cnblogs.com/grandyang/p/7279152.html
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
mod = 10**9+7
dp = [0 for i in range(len(s)+1)]
dp[0] = 1
if s[0] =='*':
dp[1] = 9
elif s[0] == '0':
return 0
else:
dp[1] = 1
for i in range(2,len(s)+1):
if s[i-1] == '0':
# only 10,20 two case which cause no other way
if s[i-2] == '1' or s[i-2] == '2':
dp[i] += dp[i-2]
# '*' reperesent 1 or 2
elif s[i-2] == '*':
dp[i] += 2 * dp[i-2]
else:
dp[i] = 0
elif ord('1') <= ord(s[i-1]) and ord(s[i-1]) <= ord('9'):
# can seperate
dp[i] += dp[i-1]
# contains other way
if s[i-2] == '1' or s[i-2] == '2' and ord(s[i-1]) <= ord('6'):
dp[i] += dp[i-2]
elif s[i-2] == '*':
# 10 ~26
if ord(s[i-1]) <= ord('6'):
dp[i] += 2*dp[i-2]
else:
dp[i] += dp[i-2]
else:
dp[i] += 9 * dp[i-1]
# divide 1 + *
if s[i-2] == '1':
dp[i] += 9*dp[i-2]
# 2 + *
elif s[i-2] == '2':
dp[i] += 6*dp[i-2]
# sum case 1 and 2
elif s[i-2] == '*':
dp[i] += 15*dp[i-2]
dp[i] = dp[i] % mod
return dp[-1]
| {
"repo_name": "youhusky/Facebook_Prepare",
"path": "639. Decode Ways II.py",
"copies": "1",
"size": "2651",
"license": "mit",
"hash": 8211960633136565000,
"line_mean": 31.7407407407,
"line_max": 129,
"alpha_frac": 0.4024896266,
"autogenerated": false,
"ratio": 3.3856960408684547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42881856674684543,
"avg_score": null,
"num_lines": null
} |
"""A meta-component that allows a component to be optionally enabled
or disabled. This component is mostly for illustration and is not
used anywhere. This is because it is usually much easier to simply
add a trait in the module to enable/disable a particular component.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Bool, Str, Property
from traitsui.api import View, Group, Item
# Local imports.
from mayavi.core.component import Component
######################################################################
# `Optional` class.
######################################################################
class Optional(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The outputs of this component is a property and not a list.
outputs = Property
# The component that is enabled or disabled.
component = Instance(Component)
# Is the component enabled or not.
enabled = Bool(True, desc='if the component is enabled')
# The label of the checkbox to use in the view.
label = Str
########################################
# The component's view
# This is defined outside the view so that the label may be easily
# changed.
enabled_item = Item(name='enabled')
view = View(Group(Group(enabled_item),
Group(Item(name='component', style='custom',
visible_when='object.enabled'),
show_labels=False)
)
)
######################################################################
# `Component` interface
######################################################################
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
comp = self.component
if self.inputs != comp.inputs:
comp.inputs = self.inputs
self.pipeline_changed = True
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self.data_changed = True
######################################################################
# `Base` interface
######################################################################
def start(self):
"""This is invoked when this object is added to the mayavi
pipeline. Note that when start is invoked, all the other
information for the pipeline should be already set.
"""
# Do nothing if we are already running.
if self.running:
return
super(Optional, self).start()
self.component.start()
def stop(self):
"""Invoked when this object is removed from the mayavi
pipeline.
"""
if not self.running:
return
self.component.stop()
super(Optional, self).stop()
######################################################################
# Non-public methods.
######################################################################
def _get_outputs(self):
if self.enabled:
return self.component.outputs
else:
return self.inputs[0].get_output_object()
def _enabled_changed(self, value):
# Force downstream modules to update.
self.pipeline_changed = True
def _label_changed(self, value):
# Change the displayed label for the enable trait in the view.
item = self.trait_view_elements().content['enabled_item']
item.label = value
def _component_changed(self, old, new):
if old is not None:
old.on_trait_change(self._fire_pipeline_changed,
'pipeline_changed', remove=True)
old.on_trait_change(self._fire_data_changed,
'data_changed', remove=True)
new.on_trait_change(self._fire_pipeline_changed, 'pipeline_changed')
new.on_trait_change(self._fire_data_changed, 'data_changed')
def _fire_pipeline_changed(self):
self.pipeline_changed = True
def _fire_data_changed(self):
self.data_changed = True
| {
"repo_name": "alexandreleroux/mayavi",
"path": "mayavi/components/optional.py",
"copies": "3",
"size": "4594",
"license": "bsd-3-clause",
"hash": 5689203636817179000,
"line_mean": 33.2835820896,
"line_max": 76,
"alpha_frac": 0.53330431,
"autogenerated": false,
"ratio": 4.8872340425531915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007696123488131192,
"num_lines": 134
} |
#a method for ranking sites in an alignment according to GC bias, for filtering purposes. Inspired by the Munoz-Gomez et al. (2018) zed score for amino acid data
from Bio import SeqIO, AlignIO
import sys, operator
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
highGC = []
lowGC = []
ratios = {}
flat_ratios = []
flat_zeds = []
col_zed = {}
#find high and low GC taxa
seqaln = SeqIO.index(sys.argv[1], "fasta")
scores_by_taxa = {}
for taxon in seqaln:
gc = 0
at = 0
sequence = str(seqaln[taxon].seq)
for char in sequence:
if char in 'GC':
gc += 1
elif char in 'AT':
at += 1
ratio = float(gc)/float(at)
scores_by_taxa[taxon] = ratio
ratios[taxon] = ratio
flat_ratios.append(ratio)
if ratio > 1:
highGC.append(taxon)
else:
lowGC.append(taxon)
#rank by ratio and print out list
sorted_taxa = sorted(ratios.items(), key=operator.itemgetter(1), reverse = True)
just_taxa = []
for taxon in sorted_taxa:
print taxon[0] + "\t" + str(taxon[1])
just_taxa.append(taxon[0])
#np_rat = np.array(flat_ratios)
#sns.distplot(np_rat)
#plt.show()
#now compute Z for each column in the alignment
alignment = AlignIO.read(sys.argv[1], "fasta")
for col in range(len(str(alignment[0].seq))):
GARP_high = 0.0
GARP_low = 0.0
FIMNKY_high = 0.0
FIMNKY_low = 0.0
highGC_taxa = float(len(highGC))
lowGC_taxa = float(len(lowGC))
for rec in alignment:
char = str(rec.seq)[col]
if rec.id in highGC:
if char in 'GC':
GARP_high += 1.0/highGC_taxa
elif char in 'AT':
FIMNKY_high += 1.0/highGC_taxa
elif rec.id in lowGC:
if char in 'GC':
GARP_low += 1.0/lowGC_taxa
elif char in 'AT':
FIMNKY_low += 1.0/lowGC_taxa
else:
print "Taxon " + str(rec.id) + " couldn't be assigned to high or low GC..."
quit()
zed = FIMNKY_low - FIMNKY_high + GARP_high - GARP_low
flat_zeds.append(zed)
print str(col) + "\t" + str(zed) + "\t" + str(FIMNKY_low) + "\t" + str(FIMNKY_high) + "\t" + str(GARP_high) + "\t" + str(GARP_low)
print alignment[:, col]
col_zed[col] = zed
#np_z = np.array(flat_zeds)
#sns.distplot(np_z)
#plt.show()
#now write alignments in which top X% of sites by zed have been removed - say 50% for now
sorted_zeds = sorted(col_zed.items(), key=operator.itemgetter(1))
num_cols = len(sorted_zeds)
cols_to_take = int(float(num_cols)/2.0)
print cols_to_take
selected_cols = []
for i in range(cols_to_take):
selected_cols.append(sorted_zeds[i][0])
# for rec in seqaln:
# print rec + "\t" + str(seqaln[rec].seq)[i]
#print sites for most negative zeds
#print alignment[:, selected_cols[0]]
outfile = sys.argv[2]
outh = open(outfile, "w")
for rec in seqaln:
seq_to_print = ''
for col in selected_cols:
seq_to_print += str(seqaln[rec].seq)[col]
outh.write(">" + str(rec) + "\n" + str(seq_to_print) + "\n")
outh.close()
| {
"repo_name": "Tancata/phylo",
"path": "rank_taxa_by_nuc_compo.py",
"copies": "1",
"size": "3109",
"license": "mit",
"hash": 685849526514646400,
"line_mean": 28.8942307692,
"line_max": 161,
"alpha_frac": 0.6063042779,
"autogenerated": false,
"ratio": 2.7933513027852652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3899655580685265,
"avg_score": null,
"num_lines": null
} |
"""a method for the construction of minimum-redundancy codes. Huffman"""
from bit_array import BitArray
from huffman_trie import HuffmanTrie
from collections import defaultdict, Counter
from heapq import heappush, heappop, heapify
class HuffmanCode(object):
"""allows object encoding and decoding"""
def __init__(self, message):
self.code = self.generate_code(self.to_frequency(Counter(message)))
self.sym_length = defaultdict(int)
for (sym, bin_code) in self.code.items():
self.sym_length[sym] = len(bin_code)
def to_frequency(self, symbol_count):
"""get the frequency of each symbol in the message"""
frequencies = []
total = sum(symbol_count.values())
for (symbol, count) in symbol_count.items():
frequencies.append((count / total, [symbol]))
return frequencies
def generate_code(self, frequency_table):
"""generates binary code for each symbol
use the exact same encoding as in the article"""
heapify(frequency_table)
code = defaultdict(list)
while len(frequency_table) > 1:
one = heappop(frequency_table)
zero = heappop(frequency_table)
for sym in one[1]:
code[sym].append(1)
for sym in zero[1]:
code[sym].append(0)
heappush(frequency_table, (one[0] + zero[0], one[1] + zero[1]))
sym_to_string = defaultdict(str)
for (symbol, binary) in code.items():
bin_str = "".join([str(i) for i in reversed(binary)])
sym_to_string[symbol] = bin_str
return sym_to_string
def encode(self, message):
"""huffman code the message into bits"""
bit_size = 0
for sym in message:
bit_size += self.sym_length[sym]
bit_array = BitArray(bit_size)
position = 0
for sym in message:
for bit in self.code[sym]:
if bit == '1':
bit_array.set_bit(position)
position += 1
return bit_array
def decode(self, bit_array):
"""decode the message previously encoded with the code"""
assert isinstance(bit_array, BitArray)
decode_trie = HuffmanTrie()
for (symbol, code) in self.code.items():
decode_trie.add_code(symbol, code)
return decode_trie.decode(bit_array.get_bits())
__all__ = ["HuffmanCode"]
| {
"repo_name": "in3rtial/huffman",
"path": "src/huffman_code.py",
"copies": "2",
"size": "2456",
"license": "cc0-1.0",
"hash": 4573689844093772000,
"line_mean": 33.1111111111,
"line_max": 75,
"alpha_frac": 0.5899837134,
"autogenerated": false,
"ratio": 3.8922345483359746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 72
} |
"""A method of accessing what information about CLTK Core contributors."""
from collections import defaultdict
from collections import OrderedDict
import importlib.machinery
import os
from cltk.utils.cltk_logger import logger
__author__ = 'Kyle P. Johnson <kyle@kyle-p-johnson.com>'
__license__ = 'MIT License. See LICENSE.'
class Contributors:
"""An object which data about available contributors."""
def __init__(self):
"""Upon loading this class, query all modules for "__author__"
variables throughout library."""
self.credits = self._make_authors_dict()
def walk_cltk(self):
"""Walk through either this repo's corpus or the directory where CLTK
is installed. The former is useful when building a contribs file
without before installing and packaging the software.
"""
py_files_list = []
for dir_path, dir_names, files in os.walk('cltk'): # pylint: disable=W0612
for name in files:
if name.lower().endswith('.py') and not name.lower().startswith('__init__'):
py_files_list.append(os.path.join(dir_path, name))
return py_files_list
@staticmethod
def get_module_authors(module):
"""Get "__author__" variables for a module.
TODO: If ImportError occurs (as with word2vec.py), then correctly
get authors' names anyway.
"""
loader = importlib.machinery.SourceFileLoader('__author__', module)
try:
mod = loader.load_module()
except ImportError:
return
if module == 'cltk/vector/word2vec.py':
print(True)
input()
try:
mod.__author__
except AttributeError:
return None
if isinstance(mod.__author__, str):
author_list = [mod.__author__]
elif isinstance(mod.__author__, list):
author_list = mod.__author__
return author_list
def _make_authors_dict(self):
"""Build a dict of 'author': ['modules', 'contributed', 'to']."""
authors_dict = defaultdict(list)
modules = self.walk_cltk()
for _module in modules:
authors = []
authors = self.get_module_authors(_module)
try:
for author in authors:
authors_dict[author].append(_module)
except TypeError as type_err:
if type_err.args[0] == "'NoneType' object is not iterable":
continue
authors_dict = OrderedDict(sorted(authors_dict.items())) # Sort by first name
return authors_dict
def show(self):
"""Print to screen contributor info."""
for contrib in self.credits:
print('# ', contrib)
for module in self.credits[contrib]:
print('* ', module)
print()
def write_contribs(self):
"""Write to file, in current dir, 'contributors.md'."""
file_str = ''
note = '# Contributors\nCLTK Core authors, ordered alphabetically by first name\n\n'
file_str += note
for contrib in self.credits:
file_str += '## ' + contrib + '\n'
for module in self.credits[contrib]:
file_str += '* ' + module + '\n'
file_str += '\n'
file_name = 'contributors.md'
with open(file_name, 'w') as file_open:
file_open.write(file_str)
logger.info('Wrote contribs file at "%s".', file_name)
if __name__ == "__main__":
CONTRIBS = Contributors()
CONTRIBS.write_contribs()
#print(dir(CONTRIBS))
#print(CONTRIBS.credits) # a dict
print(CONTRIBS.credits['Steven Bird <stevenbird1@gmail.com>']) # a list of modules
| {
"repo_name": "coderbhupendra/cltk",
"path": "cltk/utils/contributors.py",
"copies": "2",
"size": "3773",
"license": "mit",
"hash": 5908221647329385000,
"line_mean": 33.3,
"line_max": 92,
"alpha_frac": 0.5759342698,
"autogenerated": false,
"ratio": 4.206243032329989,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5782177302129989,
"avg_score": null,
"num_lines": null
} |
"""A method to get the best hostname for this machine"""
import socket
def besthostname():
"""A method to get the best hostname for this machine"""
#
# First find our preferred network interface address
#
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('google.com', 9999))
except socket.gaierror:
return '127.0.0.1'
ip, _ = s.getsockname()
#
# Now get our hostname, adding '.local' if needed
#
hostname = socket.getfqdn()
if not '.' in hostname:
hostname = hostname + '.local'
#
# See if this hostname matches our external IP address, return if so
#
try:
_, _, ipaddrs = socket.gethostbyname_ex(hostname)
except socket.gaierror:
ipaddrs = []
for extip in ipaddrs:
if extip == ip:
return hostname
#
# Otherwise try a reverse DNS lookup
#
try:
realHostname, _, _ = socket.gethostbyaddr(ip)
return realHostname
except socket.gaierror:
pass
except socket.herror:
pass
#
# If this is a .local name we use the hostname
#
if hostname[-6:] == '.local':
return hostname
#
# Otherwise return the IP address
#
return ip
| {
"repo_name": "cwi-dis/igor",
"path": "igor/besthostname.py",
"copies": "1",
"size": "1275",
"license": "mit",
"hash": -901017262561389800,
"line_mean": 24,
"line_max": 72,
"alpha_frac": 0.5835294118,
"autogenerated": false,
"ratio": 4.022082018927445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5105611430727445,
"avg_score": null,
"num_lines": null
} |
"""A metric counter for time series monitoring."""
import sched
import socket
import sys
import threading
import time
DEFAULT_PUT_FORMAT = 'opentsdb'
try:
import collectors.lib
DEFAULT_PUT_FORMAT = 'tcollector'
except ImportError:
pass
PUT_FORMATS = {
'opentsdb': 'put %(name)s %(timestamp)s %(value)s %(tag_string)s\n',
'tcollector': '%(name)s %(timestamp)s %(value)s %(tag_string)s\n',
}
class MetricCounter(object):
"""MetricCounter."""
# opentsdb's put
fmt = PUT_FORMATS.get(DEFAULT_PUT_FORMAT)
tags = None
tag_string = None
ALL_COUNTERS = []
def __init__(self, name, timespan=15, granularity=1, tags={}, fmt=None,
stream=sys.stdout):
self.stream = stream
self.name = name
self.timespan = timespan
self.granularity = granularity
self.last = 0
self.cells = [0.0] * (timespan + 1)
# Allow for overwriting the host tag.
_tags = {'host': socket.gethostname()}
_tags.update(tags)
if fmt:
self.fmt = fmt
if DEFAULT_PUT_FORMAT == 'tcollector':
# tcollector automatically appends the host tag.
if 'host' in _tags:
_tags.pop('host')
self.set_tags(_tags)
self.ALL_COUNTERS.append(self)
def __del__(self):
self.ALL_COUNTERS.remove(self)
def inc(self):
"""Increment the counter by 1"""
self.add(1)
def add(self, value):
"""Increment the counter by a value."""
self._refresh()
self.cells[self._current_cell()] += value
def set(self, value):
"""Set the value of current cells."""
self._refresh()
self.cells[self._current_cell()] = value
def set_tags(self, tags):
"""Set the value of current cells."""
self.tags = tags
self.tag_string = " ".join('%s=%s' % (k, v) for k, v in tags.items())
def summarize(self, summary_function):
"""Get the sum of values in the counter."""
self._refresh()
return summary_function(self.cells)
def flush(self):
"""Flush the counter."""
for i in range(len(self.cells)):
self.cells[i] = 0.0
def dump(self):
"""Dump counter per granularity."""
self._refresh()
now = self.now()
current_cell = self._current_cell()
cells = self.cells[current_cell+1:] + self.cells[:current_cell]
for part in range(0, self.timespan, self.granularity):
value = sum(cells[part: part + self.granularity])
timestamp = now - self.timespan + part
self.stream.write(
self.fmt % {
'name': self.name,
'timestamp': timestamp,
'value': value,
'tag_string': self.tag_string
}
)
self.stream.flush()
def _refresh(self):
"""Purge outdated cells in the counter."""
now = self.now()
current_cell = self._current_cell()
tdiff = now - self.last
if tdiff > self.timespan:
self.flush()
elif tdiff:
for i in range(current_cell - tdiff + 1, current_cell + 1):
self.cells[i] = 0.0
self.last = now
@staticmethod
def now():
"""Return current timestamp in seconds."""
return int(StopWatch.time())
def _current_cell(self):
"""Return current cell based on current timestamp."""
return self.now() % len(self.cells)
class StopWatch(object):
"""A basic timer/stopwatch class for use with the 'with' statement.
Usage:
with StopWatch() as timer:
do_stuff()
print timer.duration # <-- Duration of the do_stuff() activity
"""
sleep = time.sleep
time = time.time
def __init__(self):
self.start_time = None
self.end_time = None
self.duration = None
def __enter__(self):
self.start_time = self.time()
return self
def __exit__(self, ttype, value, traceback):
self.end_time = self.time()
self.duration = self.end_time - self.start_time
@classmethod
def set_time_function(cls, timefunc):
"""Set an alternative wall clock function."""
cls.time = staticmethod(timefunc)
@classmethod
def set_sleep_function(cls, sleepfunc):
"""Set an alternative sleep function."""
cls.sleep = staticmethod(sleepfunc)
class autodump(object):
"""Automatically dump counter records."""
def __init__(self, *metric_counters):
self.counters = list(metric_counters)
if self.counters == []:
self.counters = MetricCounter.ALL_COUNTERS
self.stopping = False
self._scheduler = sched.scheduler(StopWatch.time, StopWatch.sleep)
self._scheduler.enterabs(MetricCounter.now() + 1,
0, self._dump_reschedule, [])
self.dumper_thread = threading.Thread(target=self._scheduler.run)
self._init_time = MetricCounter.now() + 2
def _dump_reschedule(self):
"""Reschedule dump action at the next granularity and dump values."""
if not self.stopping:
self._scheduler.enterabs(MetricCounter.now() + 1,
0, self._dump_reschedule, [])
for counter in self.counters:
if (self._init_time - MetricCounter.now()) % counter.timespan == 0:
counter.dump()
def __enter__(self):
"""Start auto-dumping on entering 'when' context."""
self.dumper_thread.start()
def __exit__(self, ttype, value, traceback):
"""Cleanup on leaving 'when' context."""
self.stopping = True
self.dumper_thread.join()
self.stopping = False
return False
def _get_next_run_time(interval):
"""Get next run time."""
next_time = StopWatch.time()
while True:
next_time += interval
yield next_time
def run_every_n_seconds(interval, func, args=[], kwargs={}):
"""Run a function at an even interval."""
next_run_time_generator = _get_next_run_time(interval)
for next_time_to_run in next_run_time_generator:
func(*args, **kwargs)
# Fast forward if func run exceeded the duration of the interval.
while next_time_to_run < StopWatch.time():
next_time_to_run = next_run_time_generator.next()
StopWatch.sleep(next_time_to_run - StopWatch.time())
| {
"repo_name": "oozie/metriccounter",
"path": "metriccounter.py",
"copies": "1",
"size": "6512",
"license": "mit",
"hash": 9010494658037804000,
"line_mean": 29.1481481481,
"line_max": 79,
"alpha_frac": 0.5710995086,
"autogenerated": false,
"ratio": 3.918170878459687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9987541550831605,
"avg_score": 0.00034576724561650134,
"num_lines": 216
} |
############ A METTRE AILLEURS ?
import django
from rest_framework import serializers
from django_apogee.serializers import InsAdmEtpInitialSerializer, IndividuSerializer
from duck_examen.models import RattachementCentreExamen, ExamCenter
from django_apogee.models import Adresse, InsAdmEtp
class RattachementCentreExamenSerializer(serializers.ModelSerializer):
centre_label = serializers.SerializerMethodField()
class Meta:
model = RattachementCentreExamen
def get_centre_label(self, obj):
return obj.centre.label
class ExamCenterSerializer(serializers.ModelSerializer):
class Meta:
model = ExamCenter
## SPECIFIC AU DUCK_EXAMEN
class DuckExamenSerializer(serializers.Serializer):
inscription = serializers.SerializerMethodField()
individu = serializers.SerializerMethodField()
adresse = serializers.SerializerMethodField()
rattachements = serializers.SerializerMethodField()
def get_inscription(self, obj):
return InsAdmEtpInitialSerializer(obj).data
def get_individu(self, obj):
return IndividuSerializer(obj.cod_ind, many=False).data
def get_adresse(self, obj):
try:
return str(Adresse.objects.get(cod_anu_ina=obj.cod_ind))
except:
try:
return str(Adresse.objects.get(cod_ind=obj.cod_ind))
except:
return None
def get_rattachements(self, obj):
def make_insadmetp_key_from_insadmetpinitial(inscription):
return ("{cod_anu}|{cod_ind}|{cod_etp}"
"|{cod_vrs_vet}|{num_occ_iae}".format(cod_anu=inscription.cod_anu,
cod_ind=inscription.cod_ind.cod_ind,
cod_etp=inscription.cod_etp,
cod_vrs_vet=inscription.cod_vrs_vet,
num_occ_iae=inscription.num_occ_iae))
inscription_key = make_insadmetp_key_from_insadmetpinitial(obj)
try:
insadmetp = InsAdmEtp.objects.get(pk=inscription_key)
except django.core.exceptions.ObjectDoesNotExist:
return []
return RattachementCentreExamenSerializer(RattachementCentreExamen.objects.filter(inscription=insadmetp),
many=True).data
| {
"repo_name": "iedparis8/duck_examen",
"path": "serializers.py",
"copies": "1",
"size": "2427",
"license": "mit",
"hash": -7852563082680614000,
"line_mean": 36.3384615385,
"line_max": 113,
"alpha_frac": 0.6258755665,
"autogenerated": false,
"ratio": 3.7395993836671804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.486547495016718,
"avg_score": null,
"num_lines": null
} |
from pr_services import exceptions
from pr_services.rpc.service import ServiceManagers
from pyamf.remoting.gateway.django import DjangoGateway
import facade
## Exposes methods to amf
class amf_svc:
def __init__(self):
self._svc_managers = ServiceManagers(facade.managers)
def gateway(self):
return DjangoGateway(self.get_methods(), expose_request = False)
## Get available methods for the gateway from managers dict
def get_methods(self):
gateway_methods = {}
for m in self._svc_managers.exposed_managers:
instance = self._svc_managers.get_manager_class(m)()
methods = instance._get_method_list()
for method in methods:
gateway_methods[m + '.' + method] = getattr(instance, method)
return gateway_methods
gateway = amf_svc().gateway()
# vim:tabstop=4 shiftwidth=4 expandtab
| {
"repo_name": "AmericanResearchInstitute/poweru-server",
"path": "pr_services/rpc/amf.py",
"copies": "1",
"size": "1058",
"license": "bsd-3-clause",
"hash": -7050463171385874000,
"line_mean": 32.0625,
"line_max": 77,
"alpha_frac": 0.6786389414,
"autogenerated": false,
"ratio": 3.712280701754386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9851402085316248,
"avg_score": 0.007903511567627579,
"num_lines": 32
} |
# AMG8833 Overlay Demo
#
# This example shows off how to overlay a heatmap onto your OpenMV Cam's
# live video output from the main camera.
import sensor, image, time, fir, lcd
ALT_OVERLAY = False # Set to True to allocate a second ir image.
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA2)
sensor.skip_frames(time = 2000)
# Initialize the thermal sensor
fir.init(type=fir.FIR_AMG8833)
# Init the lcd.
lcd.init()
# Allocate another frame buffer for smoother video.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture an image
img = sensor.snapshot()
# Capture FIR data
# ta: Ambient temperature
# ir: Object temperatures (IR array)
# to_min: Minimum object temperature
# to_max: Maximum object temperature
ta, ir, to_min, to_max = fir.read_ir()
if not ALT_OVERLAY:
# Scale the image and belnd it with the framebuffer
fir.draw_ir(img, ir)
else:
# Create a secondary image and then blend into the frame buffer.
extra_fb.clear()
fir.draw_ir(extra_fb, ir, alpha=256)
img.blend(extra_fb, alpha=128)
# Draw ambient, min and max temperatures.
img.draw_string(8, 0, "Ta: %0.2f C" % ta, color = (255, 0, 0), mono_space = False)
img.draw_string(8, 8, "To min: %0.2f C" % to_min, color = (255, 0, 0), mono_space = False)
img.draw_string(8, 16, "To max: %0.2f C"% to_max, color = (255, 0, 0), mono_space = False)
lcd.display(img)
# Force high quality streaming...
img.compress(quality=90)
# Print FPS.
print(clock.fps())
| {
"repo_name": "iabdalkader/openmv",
"path": "scripts/examples/12-Thermopile-Shield/AMG8833_overlay_lcd.py",
"copies": "1",
"size": "1696",
"license": "mit",
"hash": -2269184966376177000,
"line_mean": 27.7457627119,
"line_max": 94,
"alpha_frac": 0.6544811321,
"autogenerated": false,
"ratio": 3.00709219858156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.416157333068156,
"avg_score": null,
"num_lines": null
} |
# AMG8833 Overlay Demo
#
# This example shows off how to overlay a heatmap onto your OpenMV Cam's
# live video output from the main camera.
import sensor, image, time, fir
ALT_OVERLAY = False # Set to True to allocate a second ir image.
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
# Initialize the thermal sensor
fir.init(type=fir.FIR_AMG8833)
# Allocate another frame buffer for smoother video.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture an image
img = sensor.snapshot()
# Capture FIR data
# ta: Ambient temperature
# ir: Object temperatures (IR array)
# to_min: Minimum object temperature
# to_max: Maximum object temperature
ta, ir, to_min, to_max = fir.read_ir()
if not ALT_OVERLAY:
# Scale the image and belnd it with the framebuffer
fir.draw_ir(img, ir)
else:
# Create a secondary image and then blend into the frame buffer.
extra_fb.clear()
fir.draw_ir(extra_fb, ir, alpha=256)
img.blend(extra_fb, alpha=128)
# Draw ambient, min and max temperatures.
img.draw_string(8, 0, "Ta: %0.2f C" % ta, color = (255, 0, 0), mono_space = False)
img.draw_string(8, 8, "To min: %0.2f C" % to_min, color = (255, 0, 0), mono_space = False)
img.draw_string(8, 16, "To max: %0.2f C"% to_max, color = (255, 0, 0), mono_space = False)
# Force high quality streaming...
img.compress(quality=90)
# Print FPS.
print(clock.fps())
| {
"repo_name": "iabdalkader/openmv",
"path": "scripts/examples/12-Thermopile-Shield/AMG8833_overlay.py",
"copies": "1",
"size": "1641",
"license": "mit",
"hash": 3960024774352406500,
"line_mean": 28.8363636364,
"line_max": 94,
"alpha_frac": 0.6556977453,
"autogenerated": false,
"ratio": 3.0445269016697587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9173430007656214,
"avg_score": 0.0053589278627088665,
"num_lines": 55
} |
"Amharic config with language-specific information."
from pynini import *
from pynini.lib import byte
from config import utils
GRAPHEMES = union("'", "-",
"ሀ", "ሁ", "ሂ", "ሃ", "ሄ", "ህ", "ሆ",
"ለ", "ሉ", "ሊ", "ላ", "ሌ", "ል", "ሎ", "ሏ",
"ሐ", "ሑ", "ሒ", "ሓ", "ሔ", "ሕ", "ሖ", "ሗ",
"መ", "ሙ", "ሚ", "ማ", "ሜ", "ም", "ሞ", "ሟ",
"ሠ", "ሡ", "ሢ", "ሣ", "ሤ", "ሥ", "ሦ", "ሧ",
"ረ", "ሩ", "ሪ", "ራ", "ሬ", "ር", "ሮ", "ሯ",
"ሰ", "ሱ", "ሲ", "ሳ", "ሴ", "ስ", "ሶ", "ሷ",
"ሸ", "ሹ", "ሺ", "ሻ", "ሼ", "ሽ", "ሾ", "ሿ",
"ቀ", "ቁ", "ቂ", "ቃ", "ቄ", "ቅ",
"ቆ", "ቈ", "ቊ", "ቋ", "ቌ", "ቍ",
"በ", "ቡ", "ቢ", "ባ", "ቤ", "ብ", "ቦ", "ቧ",
"ቨ", "ቩ", "ቪ", "ቫ", "ቬ", "ቭ", "ቮ", "ቯ",
"ተ", "ቱ", "ቲ", "ታ", "ቴ", "ት", "ቶ", "ቷ",
"ቸ", "ቹ", "ቺ", "ቻ", "ቼ", "ች", "ቾ", "ቿ",
"ኀ", "ኁ", "ኂ", "ኃ", "ኄ", "ኅ",
"ኆ", "ኈ", "ኊ", "ኋ", "ኌ", "ኍ",
"ነ", "ኑ", "ኒ", "ና", "ኔ", "ን", "ኖ", "ኗ",
"ኘ", "ኙ", "ኚ", "ኛ", "ኜ", "ኝ", "ኞ", "ኟ",
"አ", "ኡ", "ኢ", "ኣ", "ኤ", "እ", "ኦ", "ኧ",
"ከ", "ኩ", "ኪ", "ካ", "ኬ", "ክ",
"ኮ", "ኰ", "ኲ", "ኳ", "ኴ", "ኵ",
"ኸ", "ኹ", "ኺ", "ኻ", "ኼ", "ኽ",
"ኾ", "ዀ", "ዂ", "ዃ", "ዄ", "ዅ",
"ወ", "ዉ", "ዊ", "ዋ", "ዌ", "ው", "ዎ",
"ዐ", "ዑ", "ዒ", "ዓ", "ዔ", "ዕ", "ዖ",
"ዘ", "ዙ", "ዚ", "ዛ", "ዜ", "ዝ", "ዞ", "ዟ",
"ዠ", "ዡ", "ዢ", "ዣ", "ዤ", "ዥ", "ዦ", "ዧ",
"የ", "ዩ", "ዪ", "ያ", "ዬ", "ይ", "ዮ",
"ደ", "ዱ", "ዲ", "ዳ", "ዴ", "ድ", "ዶ", "ዷ",
"ጀ", "ጁ", "ጂ", "ጃ", "ጄ", "ጅ", "ጆ", "ጇ",
"ገ", "ጉ", "ጊ", "ጋ", "ጌ", "ግ",
"ጎ", "ጐ", "ጒ", "ጓ", "ጔ", "ጕ",
"ጠ", "ጡ", "ጢ", "ጣ", "ጤ", "ጥ", "ጦ", "ጧ",
"ጨ", "ጩ", "ጪ", "ጫ", "ጬ", "ጭ", "ጮ", "ጯ",
"ጰ", "ጱ", "ጲ", "ጳ", "ጴ", "ጵ", "ጶ", "ጷ",
"ጸ", "ጹ", "ጺ", "ጻ", "ጼ", "ጽ", "ጾ", "ጿ",
"ፀ", "ፁ", "ፂ", "ፃ", "ፄ", "ፅ", "ፆ",
"ፈ", "ፉ", "ፊ", "ፋ", "ፌ", "ፍ", "ፎ", "ፏ",
"ፐ", "ፑ", "ፒ", "ፓ", "ፔ", "ፕ", "ፖ", "ፗ")
INITIAL_PUNCTUATION = utils.DEFAULT_INITIAL_PUNCTUATION
FINAL_PUNCTUATION = union(utils.DEFAULT_FINAL_PUNCTUATION,
utils.GEEZ_FINAL_PUNCTUATION)
NUMERALS = union(byte.DIGIT,
utils.GEEZ_NUMERALS)
# Amharic "over-differentiates" H graphemes, emphatic S graphemes, and glottal
# stop graphemes, which were all inherited from Ge'ez. Surveys suggest that
# Amharic speakers prefer one form over the others. These rules convert the
# dispreferred series graphemes to the one preferred series, when available.
# The surveys about grapheme preference come from the paper here:
# https://www.researchgate.net/profile/Fekede_Menuta/publication/312093656_OVER-DIFFERENTIATION_3_Over-differentiation_in_Amharic_Orthography_and_Attitude_towards_Reform/links/586f5d8408ae329d6215fb85/OVER-DIFFERENTIATION-3-Over-differentiation-in-Amharic-Orthography-and-Attitude-towards-Reform.pdf
REDUCE_H = string_map((("ሐ", "ሀ"),
("ሑ", "ሁ"),
("ሒ", "ሂ"),
("ሓ", "ሂ"),
("ሔ", "ሄ"),
("ሕ", "ህ"),
("ሖ", "ሆ"),
#("ሗ", "")
("ኀ", "ሀ"),
("ኁ", "ሁ"),
("ኂ", "ሂ"),
("ኃ", "ሂ"),
("ኄ", "ሄ"),
("ኅ", "ህ"),
("ኆ", "ሆ"),
#("ኈ", ""),
#("ኊ", ""),
#("ኋ", ""),
#("ኌ", ""),
#("ኍ", ""),
("ኸ", "ሀ"),
("ኹ", "ሁ"),
("ኺ", "ሂ"),
("ኻ", "ሂ"),
("ኼ", "ሄ"),
("ኽ", "ህ"),
("ኾ", "ሆ")
#("ዀ", ""),
#("ዂ", ""),
#("ዃ", ""),
#("ዄ", ""),
#("ዅ", "")
))
REDUCE_S = string_map((("ጸ", "ፀ"),
("ጹ", "ፁ"),
("ጺ", "ፂ"),
("ጻ", "ፃ"),
("ጼ", "ፄ"),
("ጽ", "ፅ"),
("ጾ", "ፆ")
#("ጿ", "")
))
REDUCE_A = string_map((("ዐ", "አ"),
("ዑ", "አ"),
("ዒ", "ኢ"),
("ዓ", "ኣ"),
("ዔ", "ኤ"),
("ዕ", "እ"),
("ዖ", "ኦ")
))
REDUCE_OVERDIFFERENTIATION = cdrewrite(
union(REDUCE_H, REDUCE_S, REDUCE_A),
"",
"",
byte.BYTES.closure())
LANGUAGE_SPECIFIC_PREPROCESSING = REDUCE_OVERDIFFERENTIATION
# These files are not in the repo. You will need to change these paths to match
# where you place the data files.
UD = "language_data/am/UD_Amharic-ATT/am_att-ud-test.conllu"
UM = ""
AC = "language_data/am/ac/am-wordbigrams.txt"
OSCAR = "language_data/am/oscar/am.txt"
OSCAR_DEDUP = "language_data/am/oscar/am_dedup.txt"
LCC = "language_data/am/lcc/amh_wikipedia_2016_30K/amh_wikipedia_2016_30K-sentences.txt"
| {
"repo_name": "googleinterns/text-norm-for-low-resource-languages",
"path": "config/am.py",
"copies": "1",
"size": "6266",
"license": "apache-2.0",
"hash": 4836265200953778000,
"line_mean": 40.8787878788,
"line_max": 299,
"alpha_frac": 0.3058972504,
"autogenerated": false,
"ratio": 2.1343629343629344,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.29402601847629345,
"avg_score": null,
"num_lines": null
} |
"""Amicable numbers
Let d(n) be defined as the sum of proper divisors of n (numbers less
than n which divide evenly into n).
If d(a) = b and d(b) = a, where a != b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are
1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110;
therefore d(220) = 284. The proper divisors of 284 are
1, 2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
Answer: 31626
"""
import math
def solve():
LIMIT = 10000
sums = [0] * LIMIT
def sum_divisors(num):
sum_val = 1
num_sqrt = int(math.sqrt(num))
for i in range(2, num_sqrt + 1):
if num % i == 0:
sum_val += i
pair = num // i
if i != pair:
sum_val += pair
sums[num - 1] = sum_val
return sum_val
amicable = 0
for i in range(2, LIMIT + 1):
sum_val = sum_divisors(i)
if sum_val < i:
pair = sums[sum_val - 1]
if pair == i:
amicable += i
amicable += sum_val
return amicable
if __name__ == '__main__':
print(solve())
| {
"repo_name": "yhlam/project-euler",
"path": "project_euler/p021.py",
"copies": "1",
"size": "1233",
"license": "mit",
"hash": 2947249521326490600,
"line_mean": 23.66,
"line_max": 77,
"alpha_frac": 0.5296025953,
"autogenerated": false,
"ratio": 3.236220472440945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9265823067740945,
"avg_score": 0,
"num_lines": 50
} |
# Amicable numbers
# Problem 21
# Let d(n) be defined as the sum of proper divisors of n (numbers less than n
# which divide evenly into n).
# If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair
# and each of a and b are called amicable numbers.
#
# For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110;
# therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
#
# Evaluate the sum of all the amicable numbers under 10000.
#
# Fairly simple to find all proper divisors of a number by factorizing the number into primes,
# then generating all combinations of such primes and summing them.
#
# Much faster than naive solutions of integer factorization.
from Problem10 import optimized_sieve
MAX_NUM = 10000
def factorize(num, prime_sieve):
factors = {}
for prime in prime_sieve:
exponent_count = 0
while num % prime == 0:
exponent_count += 1
num //= prime
if exponent_count > 0:
factors[prime] = exponent_count
if num <= 1:
return factors
if prime * prime > num:
factors[num] = 1
return factors
def sum_divisors(num, prime_sieve):
prime_factors_dictionary = factorize(num, prime_sieve)
divisors = [1]
for entry in prime_factors_dictionary.keys():
new_divisors = []
for i in range(1, prime_factors_dictionary[entry] + 1):
for existing_divisor in divisors:
new_divisors.append((entry ** i) * existing_divisor)
for value in new_divisors:
divisors.append(value)
return sum(divisors) - num
def run():
prime_sieve = list(optimized_sieve(MAX_NUM))
sum_of_factors_dictionary = {}
amicable_sum = 0
for i in range(1, MAX_NUM):
sum_of_factors_dictionary[i] = sum_divisors(i, prime_sieve)
# Use dictionary to avoid computing sum of factors twice
for num in sum_of_factors_dictionary.keys():
if sum_of_factors_dictionary[num] in sum_of_factors_dictionary:
# Check if sum of factors of the sum of factors equals the number, and that the sum of factors
# does not equal the number itself.
if sum_of_factors_dictionary[sum_of_factors_dictionary[num]] == num and \
num != sum_of_factors_dictionary[num]:
if sum_of_factors_dictionary[num] > num:
print("Found amicable pair {0}, {1}".format(num, sum_of_factors_dictionary[num]))
amicable_sum += num
print("The sum of all amicable numbers under {0} is {1}".format(MAX_NUM, amicable_sum))
# Sample Output:
# Found amicable pair 220, 284
# Found amicable pair 1184, 1210
# Found amicable pair 2620, 2924
# Found amicable pair 5020, 5564
# Found amicable pair 6232, 6368
# The sum of all amicable numbers under 10000 is 31626
#
# Total running time for Problem21.py is 0.08649122508988638 seconds
| {
"repo_name": "YangLuGitHub/Euler",
"path": "src/scripts/Problem21.py",
"copies": "1",
"size": "2988",
"license": "mit",
"hash": -5981601729641464000,
"line_mean": 36.325,
"line_max": 106,
"alpha_frac": 0.6430006698,
"autogenerated": false,
"ratio": 3.460023174971031,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4603023844771031,
"avg_score": null,
"num_lines": null
} |
# amicable pair
#
# 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110 = d(220) = 284.
# 1, 2, 4, 71, 142; so d(284) = 220.
#
# amicable pair
# (220, 284)
#
# takes some number n > 0 and finds all the amicable pairs below that number n
#
# 0 < n
# n = 4
#
# (220, 284),
# (1184, 1210),
# (2620, 2924),
# (5020, 5564),
# (6232, 6368)
#
dividerTable = {} # {6: [1, 2, 3]}
def getDividers(n):
if n in dividerTable:
return dividerTable[n]
else:
newDividers = []
for i in xrange(1, n / 2 + 1):
if n % i == 0:
newDividers.append(i)
dividerTable[n] = newDividers
return newDividers
sumTable = {} # {220: 284, 284: 220}
def sumOfDividers(num):
if num in sumTable:
return sumTable[num]
summ = 0
if num in dividerTable:
summ = sum(dividerTable[num])
else:
summ = sum(getDividers(num))
sumTable[num] = summ
return summ
def getPairs(num):
result = []
for i in xrange(2, num):
summ = sumOfDividers(i)
if i == sumOfDividers(summ) and (not i == summ):
if not (((i, summ) in result) or ((summ, i) in result)):
result.append((i, summ))
return result
print getPairs(8000) | {
"repo_name": "honghaoz/CrackingTheCodingInterview",
"path": "My Questions/AmicablePair.py",
"copies": "1",
"size": "1164",
"license": "mit",
"hash": 7745386416717603000,
"line_mean": 19.8035714286,
"line_max": 78,
"alpha_frac": 0.5764604811,
"autogenerated": false,
"ratio": 2.4149377593360994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34913982404360994,
"avg_score": null,
"num_lines": null
} |
"""AMICI HDF5 I/O tests"""
import os
import random
import amici
import pytest
def _modify_solver_attrs(solver):
# change to non-default values
for attr in dir(solver):
if not attr.startswith('set'):
continue
val = getattr(solver, attr.replace('set', 'get'))()
if isinstance(val, bool):
cval = not val
elif attr == 'setStabilityLimitFlag':
cval = 0
elif attr == 'setReturnDataReportingMode':
cval = amici.RDataReporting.likelihood
elif isinstance(val, int):
cval = val + 1
else:
cval = val + random.random()
getattr(solver, attr)(cval)
@pytest.mark.skipif(not amici.hdf5_enabled,
reason='AMICI was compiled without HDF5')
def test_solver_hdf5_roundtrip(sbml_example_presimulation_module):
"""TestCase class for AMICI HDF5 I/O"""
model = sbml_example_presimulation_module.getModel()
solver = model.getSolver()
_modify_solver_attrs(solver)
hdf5file = 'solverSettings.hdf5'
amici.writeSolverSettingsToHDF5(solver, hdf5file, 'ssettings')
new_solver = model.getSolver()
# check that we changed everything
for attr in dir(solver):
if not attr.startswith('set'):
continue
assert getattr(solver, attr.replace('set', 'get'))() \
!= getattr(new_solver, attr.replace('set', 'get'))(), attr
amici.readSolverSettingsFromHDF5(hdf5file, new_solver, 'ssettings')
# check that reading in settings worked
for attr in dir(solver):
if not attr.startswith('set'):
continue
assert getattr(solver, attr.replace('set', 'get'))() \
== pytest.approx(
getattr(new_solver, attr.replace('set', 'get'))()), attr
os.remove(hdf5file)
| {
"repo_name": "FFroehlich/AMICI",
"path": "python/tests/test_hdf5.py",
"copies": "3",
"size": "1832",
"license": "bsd-2-clause",
"hash": 8905633227255195000,
"line_mean": 26.7575757576,
"line_max": 72,
"alpha_frac": 0.6102620087,
"autogenerated": false,
"ratio": 3.816666666666667,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5926928675366667,
"avg_score": null,
"num_lines": null
} |
"""AMICI model package setup"""
import os
import sys
from typing import List
from amici import amici_path, hdf5_enabled, compiledWithOpenMP
from amici.custom_commands import (set_compiler_specific_extension_options,
compile_parallel)
from amici.setuptools import (get_blas_config,
get_hdf5_config,
add_coverage_flags_if_required,
add_debug_flags_if_required,
add_openmp_flags,
)
from setuptools import find_packages, setup, Extension
from setuptools.command.build_ext import build_ext
class ModelBuildExt(build_ext):
"""Custom build_ext"""
def build_extension(self, ext):
# Work-around for compiler-specific build options
set_compiler_specific_extension_options(
ext, self.compiler.compiler_type)
# Monkey-patch compiler instance method for parallel compilation
# except for Windows, where this seems to be incompatible with
# providing swig files. Not investigated further...
if sys.platform != 'win32':
import distutils.ccompiler
self.compiler.compile = compile_parallel.__get__(
self.compiler, distutils.ccompiler.CCompiler)
build_ext.build_extension(self, ext)
def find_swig(self) -> str:
"""Find SWIG executable
Overrides horribly outdated distutils function."""
from amici.swig import find_swig
return find_swig()
def get_model_sources() -> List[str]:
"""Get list of source files for the amici base library"""
import glob
model_sources = glob.glob('*.cpp')
try:
model_sources.remove('main.cpp')
except ValueError:
pass
return model_sources
def get_amici_libs() -> List[str]:
"""
Get list of libraries for the amici base library
"""
return ['amici', 'sundials', 'suitesparse']
def get_extension() -> Extension:
"""Get distutils extension object for this AMICI model package"""
cxx_flags = []
linker_flags = []
if compiledWithOpenMP():
# Only build model with OpenMP support if AMICI base packages was built
# that way
add_openmp_flags(cxx_flags=cxx_flags, ldflags=linker_flags)
add_coverage_flags_if_required(cxx_flags, linker_flags)
add_debug_flags_if_required(cxx_flags, linker_flags)
h5pkgcfg = get_hdf5_config()
blaspkgcfg = get_blas_config()
linker_flags.extend(blaspkgcfg.get('extra_link_args', []))
libraries = [*get_amici_libs(), *blaspkgcfg['libraries']]
if hdf5_enabled:
libraries.extend(['hdf5_hl_cpp', 'hdf5_hl', 'hdf5_cpp', 'hdf5'])
sources = [os.path.join("swig", "TPL_MODELNAME.i"), *get_model_sources()]
# compiler and linker flags for libamici
if 'AMICI_CXXFLAGS' in os.environ:
cxx_flags.extend(os.environ['AMICI_CXXFLAGS'].split(' '))
if 'AMICI_LDFLAGS' in os.environ:
linker_flags.extend(os.environ['AMICI_LDFLAGS'].split(' '))
ext_include_dirs = [
os.getcwd(),
os.path.join(amici_path, 'include'),
os.path.join(amici_path, "ThirdParty", "gsl"),
os.path.join(amici_path, "ThirdParty", "sundials", "include"),
os.path.join(amici_path, "ThirdParty", "SuiteSparse", "include"),
*h5pkgcfg['include_dirs'],
*blaspkgcfg['include_dirs']
]
ext_library_dirs = [
*h5pkgcfg['library_dirs'],
*blaspkgcfg['library_dirs'],
os.path.join(amici_path, 'libs')
]
# Build shared object
ext = Extension(
'TPL_MODELNAME._TPL_MODELNAME',
sources=sources,
include_dirs=ext_include_dirs,
libraries=libraries,
library_dirs=ext_library_dirs,
swig_opts=[
'-c++', '-modern', '-outdir', 'TPL_MODELNAME',
'-I%s' % os.path.join(amici_path, 'swig'),
'-I%s' % os.path.join(amici_path, 'include'),
],
extra_compile_args=cxx_flags,
extra_link_args=linker_flags
)
# see `set_compiler_specific_extension_options`
ext.extra_compile_args_mingw32 = ['-std=c++14']
ext.extra_compile_args_unix = ['-std=c++14']
ext.extra_compile_args_msvc = ['/std:c++14']
return ext
# Change working directory to setup.py location
os.chdir(os.path.dirname(os.path.abspath(__file__)))
MODEL_EXT = get_extension()
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: C++',
'Topic :: Scientific/Engineering :: Bio-Informatics',
]
CMDCLASS = {
# For parallel compilation and custom swig finder
'build_ext': ModelBuildExt,
}
# Install
setup(
name='TPL_MODELNAME',
cmdclass=CMDCLASS,
version='TPL_PACKAGE_VERSION',
description='AMICI-generated module for model TPL_MODELNAME',
url='https://github.com/AMICI-dev/AMICI',
author='model-author-todo',
author_email='model-author-todo',
# license = 'BSD',
ext_modules=[MODEL_EXT],
packages=find_packages(),
install_requires=['amici==TPL_AMICI_VERSION'],
extras_require={'wurlitzer': ['wurlitzer']},
python_requires='>=3.7',
package_data={},
zip_safe=False,
include_package_data=True,
classifiers=CLASSIFIERS,
)
| {
"repo_name": "AMICI-developer/AMICI",
"path": "python/amici/setup.template.py",
"copies": "2",
"size": "5471",
"license": "bsd-2-clause",
"hash": 6584032925617524000,
"line_mean": 30.0852272727,
"line_max": 79,
"alpha_frac": 0.6207274721,
"autogenerated": false,
"ratio": 3.5969756738987506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00007783312577833126,
"num_lines": 176
} |
"""A microservice acting as a source of info on all SpaceX flights to date. This content is not endorsed by SpaceX
and should not be treated as official information. This is a personal project only.
See:
https://github.com/KieranWynn/spacex-manifest
"""
from setuptools import setup
setup(
name='spacex_manifest',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='A fully featured, pythonic library for representing and using quaternions.',
long_description="A fully featured, pythonic library for quaternion representation, manipulation, 3D animation and geometry.",
# The project's main homepage.
#download_url='https://github.com/KieranWynn/spacex-manifest/tarball/0.1.0',
url='https://github.com/KieranWynn/spacex-manifest',
# Author details
author='Kieran Wynn',
author_email='KieranWynn@users.noreply.github.com',
# Choose your license
license='MIT',
# What does your project relate to?
keywords=['spacex', 'physics'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['spacex_manifest'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'Flask-API',
'sqlalchemy',
'psycopg2'
],
# Set Pytest as a requirement for running tests
tests_require=[
'pytest',
'requests',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={},
)
| {
"repo_name": "KieranWynn/spacex-manifest",
"path": "setup.py",
"copies": "1",
"size": "2834",
"license": "mit",
"hash": -3773636072257485000,
"line_mean": 35.3333333333,
"line_max": 130,
"alpha_frac": 0.6965419901,
"autogenerated": false,
"ratio": 3.9525801952580197,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008149186892559777,
"num_lines": 78
} |
'''A microservice for running lights on a launchpad mini midi device.'''
#The one's for you and me, living out our dream
#And we're right where we should be.
#With my arms open wide, I look to the sky
#And all I want to see is a sky full of lighters. A sky full of lighteerssss
#def lighter is a vague idea for LED based displays.
# you create a lighter server, just like how you create a league server.
# And then you send light messages to it from other tools to make it light up
# I think my first idea is just gonna be to make something animate to make
# a visually interesting thing to stream with.
#But long term I want to play with the idea of having a "calling board"
# You're not gonna use this class so don't worry about it mang
import mido
#style of display for questions on twitch. :) :) :)
import numpy
import json
#You don't need anything reasonable
lights = numpy.zeros((8,8))
CONTROLLER = "Launchpad Mini"
font = json.loads(open('font8x8_basic_cleaned.json').read())
letters = font['letters']
def text_to_bitmap(text):
return [char_to_bitmap(ch) for ch in text]
def char_to_bitmap(char):
bm = numpy.zeros((8,8))
rows = letters[char]
for y, row in enumerate(rows):
for x, val in enumerate(row):
bm[x,y] = int(val)
return bm
color_map = {}
def off_message(key):
return mido.Message('note_off', note=key)
def green_message(key):
return mido.Message('note_on', note=key, velocity=60)
def red_message(key):
return mido.Message('note_on', note=key, velocity=13)
def bright_red_message(key):
return mido.Message('note_on', note=key, velocity=15)
color_map[0] = off_message #Off = green for now
color_map[1] = red_message #1 = green
def coord_to_key(x,y):
return 16 * x + y
def messages_for_bitmap(bitmap):
x_size,y_size = bitmap.shape
#Todo: This is a comprehension mang. Probably an easier way to do this too
#ugh this song is sooooo disgustingly good.
#tolisten: Mr Fijiwiji, Direct. Entropy Monstercat 19
#What's a functional programmer's favorite meal? Lamb, duh
message_predicate = lambda x,y: color_map[bitmap[x,y]]
messages_to_send = [message_predicate(x,y)(coord_to_key(x,y)) for x in range(0,x_size) for y in range(0, y_size)]
return messages_to_send
def send_lights_to_output(the_lights, output):
to_send = messages_for_bitmap(the_lights)
for msg in to_send:
output.send(msg)
from time import sleep
def say(message, times = 1):
message = message + " "
with mido.open_output(CONTROLLER) as output:
print("saying \"{}\"".format(message))
delay = lambda: sleep(0.25)
for i in range(0,times):
for frame in text_to_bitmap(message):
send_lights_to_output(frame, output)
delay()
delay()
def scroll(message):
print("in scroll")
print(message)
times = 1
#third time's a charm. ;)
say(message, 1)
def test_run():
message = "This has been a test of the LED broadcast System. @Polytomous"
scroll(message)
| {
"repo_name": "MrTomWhite/literal-dj-sona",
"path": "py/lighter.py",
"copies": "1",
"size": "3052",
"license": "mit",
"hash": 6710203671888826000,
"line_mean": 29.52,
"line_max": 117,
"alpha_frac": 0.6749672346,
"autogenerated": false,
"ratio": 3.1659751037344397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9272891541474997,
"avg_score": 0.013610159371888449,
"num_lines": 100
} |
"""A middleware class for player login checking and tracking."""
import re
import datetime
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from apps.managers.challenge_mgr import challenge_mgr
from apps.managers.player_mgr.models import create_user_profile
class LoginMiddleware(object):
"""This middleware does the following checks and tracking:
* checks if today is in the competition period
* checks if user has completed the setup
* tracks how many days in a row the user has come to the site.
"""
def process_request(self, request):
"""Check the competition period and that setup is completed."""
#time_start = datetime.datetime.now()
path = request.path
# pass through for trivial requests
pattern = "^/(log|site_media|favicon.ico)/"
if re.compile(pattern).match(path):
return None
# load the db settings if not done yet.
challenge_mgr.init()
# pass through for trivial requests
pattern = "^/(home\/restricted|admin|about|account)/"
if re.compile(pattern).match(path):
return None
user = request.user
if not user.is_authenticated():
return None
# user logged in
# if user logged in and go to landing page, re-direct to home page
if path.startswith("/landing/"):
return HttpResponseRedirect(reverse("home_index"))
# now the user is authenticated and going to the non-trivial pages.
response = self.check_competition_period(request)
if response is None:
response = self.check_setup_completed(request)
if response is None:
response = self.track_login(request)
#time_end = datetime.datetime.now()
#print "%s time: %s" % ("player middleware", (time_end - time_start))
return response
def track_login(self, request):
"""Checks if the user is logged in and updates the tracking field."""
profile = request.user.profile
last_visit = profile.last_visit_date
today = datetime.date.today()
if last_visit:
day_diff = today - last_visit
else:
day_diff = datetime.timedelta(days=30)
if day_diff > datetime.timedelta(days=0):
# if it is the first visit of the day
if day_diff == datetime.timedelta(days=1):
# consecutive day visit, increase daily login count
profile.daily_visit_count += 1
else:
# gap day visit, reset the daily login count.
profile.daily_visit_count = 1
profile.last_visit_date = today
profile.save()
return None
def check_setup_completed(self, request):
""" Check to see if setup has been completed."""
user = request.user
path = request.path
try:
profile = user.profile
except User.DoesNotExist:
if not self.is_staff(request):
return HttpResponseRedirect(reverse("restricted"))
else:
# if it is admin, recreate the profile
create_user_profile(user)
profile = user.profile
# We need to check if the user is going to the home page so we don't
# get caught in a redirect loop. We do need to filter out requests
# for CSS and other resources.
pattern = "^/home/"
if not profile.setup_complete and \
not re.compile(pattern).match(path):
return HttpResponseRedirect(reverse("home_index"))
return None
def check_competition_period(self, request):
"""Checks if we are still in the competition. If the user is logged in,
they are redirected to a competition status page.
"""
staff_user = self.is_staff(request)
if not staff_user and \
not challenge_mgr.in_competition():
return HttpResponseRedirect(reverse("home_restricted"))
return None
def is_staff(self, request):
"""check if a user is staff or superuser."""
return request.user.is_superuser or request.user.is_staff
| {
"repo_name": "csdl/makahiki",
"path": "makahiki/apps/managers/player_mgr/middleware.py",
"copies": "2",
"size": "4314",
"license": "mit",
"hash": 1473557169892802300,
"line_mean": 34.0731707317,
"line_max": 79,
"alpha_frac": 0.6133518776,
"autogenerated": false,
"ratio": 4.584484590860787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005968776279726233,
"num_lines": 123
} |
"""A middleware class for player login checking and tracking."""
import re
import datetime
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from apps.managers.challenge_mgr import challenge_mgr
class LoginMiddleware(object):
"""This middleware does the following checks and tracking:
* checks if today is in the competition period
* checks if user has completed the setup
* tracks how many days in a row the user has come to the site.
"""
def process_request(self, request):
"""Check the competition period and that setup is completed."""
#time_start = datetime.datetime.now()
path = request.path
# pass through for trivial requests
pattern = "^/(log|site_media|favicon.ico)/"
if re.compile(pattern).match(path):
return None
# load the db settings if not done yet.
challenge_mgr.init()
# pass through for trivial requests
pattern = "^/(home\/restricted|admin|about|account)/"
if re.compile(pattern).match(path):
return None
user = request.user
if not user.is_authenticated():
return None
# user logged in
# if user logged in and go to landing page, re-direct to home page
if path.startswith("/landing/"):
return HttpResponseRedirect(reverse("home_index"))
# now the user is authenticated and going to the non-trivial pages.
response = self.check_competition_period(request)
if response is None:
response = self.check_setup_completed(request)
if response is None:
response = self.track_login(request)
#time_end = datetime.datetime.now()
#print "%s time: %s" % ("player middleware", (time_end - time_start))
return response
def track_login(self, request):
"""Checks if the user is logged in and updates the tracking field."""
profile = request.user.get_profile()
last_visit = request.user.get_profile().last_visit_date
today = datetime.date.today()
if last_visit:
day_diff = today - last_visit
else:
day_diff = datetime.timedelta(days=30)
if day_diff > datetime.timedelta(days=0):
# if it is the first visit of the day
if day_diff == datetime.timedelta(days=1):
# consecutive day visit, increase daily login count
profile.daily_visit_count += 1
else:
# gap day visit, reset the daily login count.
profile.daily_visit_count = 1
profile.last_visit_date = today
profile.save()
return None
def check_setup_completed(self, request):
""" Check to see if setup has been completed."""
user = request.user
path = request.path
profile = user.get_profile()
# We need to check if the user is going to the home page so we don't
# get caught in a redirect loop. We do need to filter out requests
# for CSS and other resources.
pattern = "^/home/"
if not profile.setup_complete and \
not re.compile(pattern).match(path):
return HttpResponseRedirect(reverse("home_index"))
return None
def check_competition_period(self, request):
"""Checks if we are still in the competition. If the user is logged in,
they are redirected to a competition status page.
"""
staff_user = request.user.is_staff or request.session.get('staff', False)
if not staff_user and \
not challenge_mgr.in_competition():
return HttpResponseRedirect(reverse("home_restricted"))
return None
| {
"repo_name": "MakahikiKTUH/makahiki-ktuh",
"path": "makahiki/apps/managers/player_mgr/middleware.py",
"copies": "7",
"size": "3807",
"license": "mit",
"hash": -7137554595269649000,
"line_mean": 33.9266055046,
"line_max": 81,
"alpha_frac": 0.6151825584,
"autogenerated": false,
"ratio": 4.526753864447087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007854226623105705,
"num_lines": 109
} |
"""A middleware class to support logging of interactions with logged in users."""
import traceback
#import datetime
from django.core.signals import got_request_exception
from django.dispatch.dispatcher import receiver
import re
# Filter out requests to media and site_media.
from apps.managers.log_mgr import log_mgr
MEDIA_REGEXP = r'^\/site_media'
URL_FILTER = ("/favicon.ico", "/admin/jsi18n/")
class LoggingMiddleware(object):
"""Provides logging of logged in user interactions."""
def process_response(self, request, response):
"""Log the actions of logged in users."""
#time_start = datetime.datetime.now()
# Filter out the following paths. Logs will not be created for these
# paths.
if re.match(MEDIA_REGEXP, request.path) or \
request.path in URL_FILTER:
return response
log_mgr.write_log_entry(request=request, response_status_code=response.status_code)
#time_end = datetime.datetime.now()
#print "%s time: %s" % ("logging", (time_end - time_start))
#print "%s timestamp: %s" % ("End logging middleware", time_end)
return response
@receiver(got_request_exception)
def log_request_exception(sender, **kwargs):
"""log the exception with traceback."""
_ = sender
exception = traceback.format_exc()
request = kwargs["request"]
log_mgr.write_log_entry(request=request, response_status_code=500, exception=exception)
| {
"repo_name": "justinslee/Wai-Not-Makahiki",
"path": "makahiki/apps/managers/log_mgr/middleware.py",
"copies": "9",
"size": "1464",
"license": "mit",
"hash": 7505056811377571000,
"line_mean": 32.2727272727,
"line_max": 91,
"alpha_frac": 0.6823770492,
"autogenerated": false,
"ratio": 3.914438502673797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9096815551873798,
"avg_score": null,
"num_lines": null
} |
# A middleware for parsing GET and POST params.
import re
from pump.util import codec
# Middleware to parse GET and POST params. Adds the following keys to the
# request:
#
# - get_params
# - post_params
# - params
#
# You can specify an encoding to decode the URL-encoded params with. If not
# specified, uses the character encoding specified in the request, or UTF-8 by
# default.
def wrap_params(app, options={}):
def wrapped_app(request):
encoding = (options.get('encoding') or
request.get('character_encoding') or
"utf8")
if not request.get('get_params'):
request = parse_get_params(request, encoding)
if not request.get('post_params'):
request = parse_post_params(request, encoding)
return app(request)
return wrapped_app
# Parse params from the query string.
def parse_get_params(request, encoding):
if request.get("query_string"):
params = parse_params(request["query_string"], encoding)
else:
params = {}
return _recursive_merge(request, {'get_params': params, 'params': params})
# Parse params from the request body.
def parse_post_params(request, encoding):
if _does_have_urlencoded_form(request) and request.get("body"):
params = parse_params(request["body"].read(), encoding)
else:
params = {}
return _recursive_merge(request, {'post_params': params, 'params': params})
# Parse params from a string (e.g. "a=b&c=d") into a dict.
def parse_params(params_string, encoding):
def _parse(params_dict, encoded_param):
match = re.compile(r'([^=]+)=(.*)').match(encoded_param)
if not match:
return params_dict
key, val = match.groups()
return set_param(params_dict,
codec.url_decode(key, encoding), codec.url_decode(val or '', encoding))
return reduce(_parse, params_string.split('&'), {})
# Set a value for a key. If it already has a value, make a list of values.
def set_param(params, key, val):
cur = params.get(key)
if cur:
if isinstance(cur, list):
params[key].append(val)
else:
params[key] = [cur, val]
else:
params[key] = val
return params
# Check whether a urlencoded form was submitted.
def _does_have_urlencoded_form(request):
return request.get('content_type', '').startswith(
'application/x-www-form-urlencoded')
# Merge two dicts recursively.
def _recursive_merge(x, y):
z = x.copy()
for key, val in y.iteritems():
if isinstance(val, dict):
if not z.has_key(key):
z[key] = {}
z[key] = _recursive_merge(z[key], val)
else:
z[key] = val
return z
| {
"repo_name": "adeel/pump",
"path": "pump/middleware/params.py",
"copies": "1",
"size": "2586",
"license": "mit",
"hash": -3181945033776847400,
"line_mean": 29.0697674419,
"line_max": 78,
"alpha_frac": 0.6589327146,
"autogenerated": false,
"ratio": 3.4664879356568363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4625420650256836,
"avg_score": null,
"num_lines": null
} |
# A middleware for parsing nested params, like
# {"a[b]": 'c'} => {'a': {'b': 'c'}}.
import re
from itertools import chain
def wrap_nested_params(app, options={}):
def wrapped_app(req):
# You can specify a custom key parsing function with the key_parser option.
key_parser = options.get('key_parser', parse_nested_keys)
req["params"] = nest_params(req["params"], key_parser)
return app(req)
return wrapped_app
# Takes a flat string of parameters and turns it into a nested dict of
# parameters, using the function key_parser to split the parameter names
# into keys.
def nest_params(params, key_parser):
return reduce(lambda d, kv: set_nested_value(d, key_parser(kv[0]), kv[1]),
param_pairs(params), {})
def param_pairs(params):
return sum(
[[(key, v) for v in val] if isinstance(val, list) else [(key, val)]
for (key, val) in params.items()], [])
# Set a new value, v, in the dict d, at the key given by keys. For example,
#
# set_nested_value({"a": {"b": {"c": "val"}}}, ["a", "b", "c"], "newval")
# # => {"a": {"b": {"c": "newval"}}}
#
# set_nested_value({"a": {"b": {"c": "val"}}}, ["a", "x", "y"], "newval")
# # => {"a": {"b": {"c": "val"}}, {"x": {"y": "newval"}}}
#
# Treats values of blank keys as elements in a list.
def set_nested_value(d, keys, v):
k, ks = None, None
if keys:
k = keys[0]
if len(keys) > 1:
ks = keys[1:]
updates = {}
if k:
if ks:
j, js = ks[0], ks[1:]
if j == "":
updates = {k: set_nested_value(d.get(k, []), js, v)}
else:
updates = {k: set_nested_value(d.get(k, {}), ks, v)}
else:
updates = {k: v}
else:
updates = v
if isinstance(d, list):
return d + [updates]
return dict(d, **updates)
# "a[b][c]" => ["a", "b", "c"]
def parse_nested_keys(string):
k, ks = re.compile(r"^(.*?)((?:\[.*?\])*)$").match(string).groups()
if not ks:
return [k]
keys = re.compile('\[(.*?)\]').findall(ks)
return [k] + keys | {
"repo_name": "adeel/pump",
"path": "pump/middleware/nested_params.py",
"copies": "1",
"size": "2005",
"license": "mit",
"hash": -4757630451724661000,
"line_mean": 29.3939393939,
"line_max": 79,
"alpha_frac": 0.5506234414,
"autogenerated": false,
"ratio": 2.884892086330935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3935515527730935,
"avg_score": null,
"num_lines": null
} |
# A middleware that adds cookie support.
from Cookie import SimpleCookie as Cookie
from pump.util import codec
# Adds a "cookies" key to the request, which contains a dictionary containing
# any cookies sent by the client. If any new values are found in the
# dictionary after your app is called, the new cookies are sent to the client.
# The values in the dict will be converted to strings, unless they are
# themselves dicts. In that case, the "value" key will be used as the cookie
# value and the other keys will be interpreted as cookie attributes.
#
# request["cookies"] = {"a": {"value": "b", "path": "/"}}
#
# Note: if a cookie is set and is later deleted from request["cookies"], the
# corresponding cookie will not automatically be deleted. You need to set the
# "expires" attribute of the cookie to a time in the past.
def wrap_cookies(app):
def wrapped_app(request):
# Get any cookies from the request.
req_cookies = request.get("cookies")
if not req_cookies:
req_cookies = _parse_cookies(request)
request["cookies"] = req_cookies
response = app(request)
# If the app modified request["cookies"], set the new cookies.
updated_cookies = request.get("cookies", {}).copy()
cookie_header = []
for k, v in updated_cookies.iteritems():
try:
value = v.get("value")
except AttributeError:
value = str(v)
if k not in req_cookies or req_cookies[k] != value:
cookie_header.append(_format_cookie(k, v))
response.setdefault("headers", {})["set_cookie"] = cookie_header
return response
return wrapped_app
# Parse the cookies from a request into a dictionary.
def _parse_cookies(request):
cookie = Cookie(request["headers"].get("cookie"))
parsed = {}
for k, v in cookie.iteritems():
parsed[k] = v.value
return parsed
# Formats the dict of cookies for the set_cookie header. If a value is a dict,
# its "value" key will be used as the value and the other keys will be
# interpreted as cookie attributes.
def _format_cookie(key, val):
if not isinstance(val, dict):
val = {"value": val}
cookie = Cookie()
cookie[key] = val["value"]
del val["value"]
morsel = cookie[key]
for k, v in val.iteritems():
morsel[k] = v
return morsel.OutputString()
| {
"repo_name": "adeel/pump",
"path": "pump/middleware/cookies.py",
"copies": "1",
"size": "2285",
"license": "mit",
"hash": 3206476676818734600,
"line_mean": 34.1538461538,
"line_max": 79,
"alpha_frac": 0.6831509847,
"autogenerated": false,
"ratio": 3.7956810631229234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9864556302651686,
"avg_score": 0.02285514903424755,
"num_lines": 65
} |
# A middleware that implements cookie-based sessions using Beaker.
import beaker.middleware
from pump.util import wsgi
# Adds a "session" key to the request. Takes the following options:
#
# - store:
# One of "memory", "database", "file", "cookie", "dbm", "memcached",
# or "google". Defaults to "memory".
# - lock_dir:
# The path to the directory to be used as a lock file. See
# <http://beaker.groovie.org/glossary.html#term-dog-pile-effect>.
# - data_dir:
# The path to the directory where files are stored. Used for file and
# dbm stores.
# - url:
# Used for database and memcached stores. In the former case, this
# should be a SQLAlchemy database string <http://bit.ly/gvzIlw>, e.g.
# "sqlite:///tmp/sessions.db". In the latter case, it should be a
# semicolon-separated list of memcached servers.
# - auto:
# If False, you will need to call request["session"].save() explicitly
# after modifying request["session"]. Defaults to True.
# - cookie:
# A dictionary with the following keys:
#
# - key:
# The name of the cookie. Defaults to "pump-session".
# - secret:
# A long, randomly-generated string used to ensure session
# integrity.
#
# - domain:
# The domain the cookie will be set on. Defaults to the current
# domain.
# - expires:
# The expiration date of the cookie. If True, expires when the
# browser closes. If False, never expires. If a datetime
# instance, expires at that specific date and time. If a
# timedelta instance, expires after the given time interval.
# Defaults to False.
# - secure:
# Whether the session cookie should be marked as secure (for
# SSL).
# - timeout:
# Seconds after the session was last accessed until it is
# invalidated. Defaults to never expiring.
#
# This uses Beaker in the background, so you can read
# <http://beaker.groovie.org/configuration.html> for more details.
def wrap_session(app, options={}):
# Guess which language doesn't support recursively merging dictionaries?
options = dict(dict({
"store": "memory",
"auto": True}, **options), cookies=dict({
"expires": False,
"key": "pump-session",
"secure": False}, **options.get("cookies", {})))
for middleware in [wrap_unbeaker, wrap_beaker(options)]:
app = middleware(app)
return app
# A Pump middleware that wraps around Beaker's WSGI middleware.
def wrap_beaker(options):
return wsgi.build_middleware(_beaker_wsgi_middleware, options)
# Renames the beaker.session key in the request to "session".
def wrap_unbeaker(app):
def wrapped(req):
req["session"] = req.get("beaker.session")
if req.has_key("beaker.session"):
del req["beaker.session"]
return app(req)
return wrapped
# The WSGI middleware provided by Beaker.
def _beaker_wsgi_middleware(app, options):
return beaker.middleware.SessionMiddleware(app, _get_beaker_config(options))
# Reformat options dictionary to match Beaker's configuration settings. See
# <http://beaker.groovie.org/configuration.html>.
def _get_beaker_config(options):
return {
"session.data_dir": options.get("data_dir"),
"session.lock_dir": options.get("lock_dir"),
"session.type": _get_beaker_session_type(options.get("store")),
"session.url": options.get("url"),
"session.auto": options.get("auto"),
"session.cookie_expires": options.get("cookies").get("expires"),
"session.cookie_domain": options.get("cookies").get("domain"),
"session.key": options.get("cookies").get("key"),
"session.secret": options.get("cookies").get("secret"),
"session.secure": options.get("cookies").get("secure"),
"session.timeout": options.get("cookies").get("timeout")}
def _get_beaker_session_type(store):
return {
"database": "ext:database",
"memcached": "ext:memcached",
"google": "ext:google"}.get(store, store) | {
"repo_name": "adeel/pump",
"path": "pump/middleware/session.py",
"copies": "1",
"size": "4189",
"license": "mit",
"hash": 1491062508756447500,
"line_mean": 39.6796116505,
"line_max": 78,
"alpha_frac": 0.6304607305,
"autogenerated": false,
"ratio": 3.6713409290096406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.480180165950964,
"avg_score": null,
"num_lines": null
} |
''' a middleware to serve static files from a configured directory'''
__all__ = ['static']
import os
import urllib.parse
from .file import file
def forbidden():
return 403, {'Content-Type': 'text/html'}, '<html><body><h2>Forbidden</h2></body></html>'
def success(file):
return 200, {}, file
def static(public_dir='public'):
''' a middleware that takes parameters and returns another middleware'''
root = os.path.abspath(public_dir) + os.sep
def static_middleware(app):
def static_app(environ):
uri = urllib.parse.unquote(environ['PATH_INFO'])[1:]
filename, ext = os.path.splitext(uri)
if uri == '' or uri[-1] == '/':
uri += 'index.html'
elif ext:
pass
else:
uri += '.html'
filename = os.path.abspath(os.path.join(root, uri.strip('/\\')))
if uri.startswith(public_dir) or '..' in uri : return forbidden()
try:
file_handle = open(filename, 'rb')
return success(file_handle)
except:
return app(environ)
return static_app
return static_middleware
| {
"repo_name": "web-i/ripplet",
"path": "ripple/middlewares/static.py",
"copies": "1",
"size": "1071",
"license": "mit",
"hash": -4976499714170884000,
"line_mean": 27.9459459459,
"line_max": 91,
"alpha_frac": 0.6181139122,
"autogenerated": false,
"ratio": 3.6804123711340204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47985262833340203,
"avg_score": null,
"num_lines": null
} |
# AMIGA_HW.py
#
# DEMO Python code to switch the AMIGAs audio filter ON and OFF on the fly.
# It also changes the video mode to PAL or NTSC if required.
# (C)2007-2012, B.Walker, G0LCU. Now issued as Public Domain, you may do
# with it as you please.
#
# Written in such a way that almost anyone can understand how it works.
#
# For a minimum of a standard AMIGA A1200(HD) and Python 1.4.0, or for
# higher end AMIGAs to 2.0.1.
# Also WinUAE 1.5.3 minimum with Workbench 3.0.x and Python as above.
#
# Ensure that the assigned T: volume exists so as NOT to keep the file when
# the AMIGA is rebooted.
#
# Copy/drag this file into the PYTHON: volume and rename it AMIGA_HW.py.
# Call it from the Python prompt as:-
#
# >>> execfile("PYTHON:AMIGA_HW.py")<RETURN/ENTER>
#
# And away you go... ;o)
# ==========================================================================
# The assembly program that _defaults_ to audio filter ON condition, (when
# <RETURN/ENTER> is pressed only), and then compiled and converted.
# The address and control byte are changed as required before generating
# the executable file...
#
# From an AMIGA CLI using, a68k and blink, the executable AMIGA_Filter is
# created thus:-
# DRIVE:Path/To/Source> a68k AMIGA_Filter.asm<RETURN/ENTER>
# <Some reports here.>
# DRIVE:Path/To/Source> blink AMIGA_Filter.o
# <Some more reports here.>
# DRIVE:Path/To/Source> _
# ==========================================================================
# start:
# ;Assembler source to switch the
# ;audio filter and power light to ON.
# ;Compiled under a68k and linked under blink.
# ;A68k AMIGA_Filter<RETURN/ENTER>
# ;<Some reports here.>
# ;blink AMIGA_Filter.o<RETURN/ENTER>
# ;<Some more reports here.>
# move.b #252,$BFE001 ;Set Audio Filter to bootup default condition ON.
# ;Decimal 252, (0xFC), sets the filter to ON and
# ;decimal 254, (0xFE), sets the filter to OFF.
# clr.l d0 ;Set return code as OK.
# rts ;Exit program.
# end
# ==========================================================================
# The text HEX dump from the CLI using:-
# DRIVE:Path/To/Source> Type HEX AMIGA_Filter > AMIGA_Filter.txt<RETURN/ENTER>
# DRIVE:Path/To/Source> _
#
# Gives a text file, AMIGA_Filter.txt, with the contents thus...
# ==========================================================================
# 0000: 000003F3 00000000 00000001 00000000 ...ó............
# 0010: 00000000 00000003 000003E9 00000003 ...........é....
# 0020: 13FC00FC 00BFE001 42804E75 000003F2 .ü.ü.¿à.B.Nu...ò
# ==========================================================================
# To be edited to suit the Python code...
#
# Enjoy finding simple solutions to often very difficult problems...
#
# $VER: AMIGA_HW.py_Version_0.00.10_(C)2007-2012_B.Walker_G0LCU.
# The only STANDARD import required...
import os
def main():
while 1:
# A basic working screen to switch the audio filter mode....
print("\f\nA simple Python hardware _controller_ for the Classic AMIGA A1200(HD).\n")
print("(C)2007-2012, B.Walker, G0LCU. Now issued as Public Domain.\n")
print("Press (f)<RETURN/ENTER> to enable audio filtering, (bootup default).")
print("Press (F)<RETURN/ENTER> to disable audio filtering.")
print("Press (p) or (P)<RETURN/ENTER> for PAL video mode.")
print("Press (n) or (N)<RETURN/ENTER> for NTSC video mode.")
# Set to the audio filter address and to ON by default.
control_byte="\xFC"
hw_address="\x00\xBF\xE0\x01"
keyboard=raw_input("Press (q) or (Q)<RETURN/ENTER> to Quit:- ")
if keyboard=="f" or keyboard==chr(13):
control_byte="\xFC"
hw_address="\x00\xBF\xE0\x01"
if keyboard=="F":
control_byte="\xFE"
hw_address="\x00\xBF\xE0\x01"
if keyboard=="p" or keyboard=="P":
control_byte="\x20"
hw_address="\x00\xDF\xF1\xDC"
if keyboard=="n" or keyboard=="N":
control_byte="\x00"
hw_address="\x00\xDF\xF1\xDC"
if keyboard=="Q" or keyboard=="q" or keyboard==chr(27): break
# Manually place the binary into a string format for Python 1.4.0 to 2.0.1.
binary_one="\x00\x00\x03\xF3\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x03\xE9\x00\x00\x00\x03\x13\xFC\x00"
binary_two="\x42\x80\x4E\x75\x00\x00\x03\xF2"
# Create the running file and place it into the AMIGA T: volume as AMIGA_HW.
amiga_exe_file=binary_one+control_byte+hw_address+binary_two
amigafile=open("T:AMIGA_HW","wb+")
amigafile.write(amiga_exe_file)
amigafile.close()
# Give a short delay to allow system to settle.
os.system("C:Wait 1")
# Ensure the file AMIGA_HW can be executed.
os.system("C:Protect T:AMIGA_HW rwed")
# Now run it and _immediately_ re-run this code...
os.system("T:AMIGA_HW")
main()
# End of AMIGA_HW.py.
# Enjoy finding simple solutions to often very difficult problems...
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578255_AMIGAHeads_Only_Poking_HW_Memory_Address/recipe-578255.py",
"copies": "1",
"size": "5191",
"license": "mit",
"hash": -5574228363311314000,
"line_mean": 42.5630252101,
"line_max": 155,
"alpha_frac": 0.5933641975,
"autogenerated": false,
"ratio": 3.130434782608696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9134102803038295,
"avg_score": 0.01793923541408026,
"num_lines": 119
} |
# AMIGA_Peek_Mem.py
#
# DEMO code to show how to peek/read a single BYTE from any address for the
# Classic AMIGA A1200(HD), E-UAE and WinUAE. Although the code only does BYTE
# depth it is easily possible to add, WORD length, (I've already done so).
# (Soon there will be a demo to poke/write to a memory or HW address inside the
# Classic AMIGA too.)
#
# Originally written for a standard AMIGA A1200 using Python 1.4.x to 2.0.1.
# $VER: AMIGA_Peek_Mem.py_Version_0.00.10_(C)2007-2012_B.Walker_G0LCU.
#
# Doing the 256 byte dump in this DEMO is slow but this function was not
# originally designed for that facility but to quickly view the byte contents
# of a single memory address. To work correctly this MUST be run from a "tool"
# icon so that any AMIGA return code Failat reports are directed to the system
# "stderr" rather than the default Python window...
#
# Now issued as Public Domain. You may do with it as you please...
#
# =============================================================================
#
# ; The assembly code for this Python script, peek.asm...
# ; Assembled using a68k and linked using blink, both are on AMINET.
# lea.l $00F80000,a5 ;Set address to the default start of ROM.
# move.b (a5),d0 ;Move byte contents of the address into register d0.
# rts ;Now return to calling routine with the byte value.
# nop ;Long word align code.
# even ;Done!
# end ;This will also compile with DevPac too!
# ; Yep, that's all there is to it... ;o)
#
# =============================================================================
#
# The binary, (peek), generated from the assembly code converted to text format using:-
# AMIGA_Prompt: C:Type HEX peek > peek.HEX<CR>
#
# The text HEX file representing the AMIGA executable to be edited...
# 0000: 000003F3 00000000 00000001 00000000 ...ó............
# 0010: 00000000 00000003 000003E9 00000003 ...........é....
# 0020: 4BF900F8 00001015 4E754E71 000003F2 Kù.ü....NuNq...ò
#
# =============================================================================
#
# This is the 256 byte ROM DUMP at address $00F80000...
#
# 0000: 11144EF9 00F800D2 0000FFFF 00280044 ..Nù.ø.Ò.....(.D
# 0010: 0028000A FFFFFFFF 00414D49 47412052 .(.......AMIGA R
# 0020: 4F4D204F 70657261 74696E67 20537973 OM Operating Sys
# 0030: 74656D20 616E6420 4C696272 61726965 tem and Librarie
# 0040: 7300436F 70797269 67687420 A9203139 s.Copyright © 19
# 0050: 38352D31 39393320 00436F6D 6D6F646F 85-1993 .Commodo
# 0060: 72652D41 6D696761 2C20496E 632E2000 re-Amiga, Inc. .
# 0070: 416C6C20 52696768 74732052 65736572 All Rights Reser
# 0080: 7665642E 00332E31 20524F4D 20006578 ved..3.1 ROM .ex
# 0090: 65632E6C 69627261 72790065 78656320 ec.library.exec
# 00A0: 34302E31 30202831 352E372E 3933290D 40.10 (15.7.93).
# 00B0: 0A004E71 4E714AFC 00F800B6 00F8370E ..NqNqJü.ø.¶.ø7.
# 00C0: 02280969 00F8008E 00F8009B 00F804AC .(.i.ø...ø...ø.¬
# 00D0: 4E704FF8 040041FA FF2872FF 75017B00 NpOø..Aú.(r.u.{.
# 00E0: DA986402 528551C9 FFF851CA FFF44BFA Ú.d.R.QÉ.øQÊ.ôKú
# 00F0: 001A41FA FF0C43F9 00F00000 B3C8670A ..Aú..Cù.ð..³Èg.
#
# =============================================================================
#
# After finding the address of the string, ~find_this_text~ using the id()
# function, this was the RAM DUMP for Python Version 1.4.0 on my test machine.
#
# 0000: 00000002 002B8290 00000072 FFFFFFFF .....+.....r....
# 0010: 57652077 696C6C20 75736520 74686520 We will use the
# 0020: 69642829 2066756E 6374696F 6E20746F id() function to
# 0030: 2066696E 64207468 6973206C 696E6520 find this line
# 0040: 6C617465 722E2054 68652074 68696E67 later. The thing
# 0050: 2069732C 20796F75 2043414E 20646F20 is, you CAN do
# 0060: 74686973 20776974 6820616E 20414D49 this with an AMI
# 0070: 47412057 4954484F 55542061 6E204D4D GA WITHOUT an MM
# 0080: 5521004E 00000020 00000002 002B8290 U!.N... .....+..
# 0090: 0000000E 123E7734 66696E64 5F746869 .....>w4find_thi
# 00A0: 735F7465 78740008 00000000 00000019 s_text..........
# 00B0: 00000001 002B8290 00000007 FFFFFFFF .....+..........
# 00C0: 64656670 61746800 0030494C 00000018 defpath..0IL....
# 00D0: 00000001 002B8290 00000006 FFFFFFFF .....+..........
# 00E0: 73747269 6E67000F 00030030 0000001D string.....0....
# 00F0: 00000001 002B8290 0000000B FFFFFFFF .....+..........
#
# =============================================================================
#
# Again, finding the address of the string, ~find_this_text~ using the id()
# function, this was the RAM DUMP for Python Version 2.0.1 on my test machine.
#
# 0000: 00000002 0032956C 00000072 FBA7A5FC .....2.l...rû§¥ü
# 0010: 00000000 57652077 696C6C20 75736520 ....We will use
# 0020: 74686520 69642829 2066756E 6374696F the id() functio
# 0030: 6E20746F 2066696E 64207468 6973206C n to find this l
# 0040: 696E6520 6C617465 722E2054 68652074 ine later. The t
# 0050: 68696E67 2069732C 20796F75 2043414E hing is, you CAN
# 0060: 20646F20 74686973 20776974 6820616E do this with an
# 0070: 20414D49 47412057 4954484F 55542061 AMIGA WITHOUT a
# 0080: 6E204D4D 55210000 00000085 00000024 n MMU!.........$
# 0090: 00000004 0032956C 0000000E 123E7734 .....2.l.....>w4
# 00A0: 0038A16C 66696E64 5F746869 735F7465 .8¡lfind_this_te
# 00B0: 78740001 00000000 000000C8 00329DA8 xt.........È.2.š
# 00C0: 00000009 0038C10C 00000000 00000013 .....8Á.........
# 00D0: 01240000 00000000 00870000 00010038 .$.............8
# 00E0: A1C40123 0038A250 00000060 00000000 ¡Ä.#.8¢P...`....
# 00F0: 00870000 00010038 A1DC0000 0038A250 .......8¡Ü...8¢P
#
# =============================================================================
print "\f\n$VER: AMIGA_Peek_Mem.py_Version_0.00.10_(C)2007-2012_B.Walker_G0LCU."
print "\nPlease wait..."
import os
import struct
find_this_text="We will use the id() function to find this line later. The thing is, you CAN do this with an AMIGA WITHOUT an MMU!"
# The only important _variable_.
global return_code
return_code=0
# Default to the start of the ROM, 0xF80000.
def peek(address=16252928):
global return_code
return_code=0
# Don't allow any errors......
address=int(address)
# ......although this should NEVER occur...
if address<=0: address=0
# Limit to standard AMIGA A1200(HD) 16MB boundary for this DEMO.
if address>=16777215: address=16777215
# Generate the 32 bit address as a string...
address_string=struct.pack("l",address)
start_peek_string="\x00\x00\x03\xF3\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x03\xE9\x00\x00\x00\x03\x4B\xF9"
end_peek_string="\x10\x15\x4E\x75\x4E\x71\x00\x00\x03\xF2"
# Now create the AMIGA file that will be executed...
peek_address_string=start_peek_string+address_string+end_peek_string
# Generate the file inside the T: Volume, usually a RamDisk...
amigafile=open("T:PeekMem","wb+")
amigafile.write(peek_address_string)
amigafile.close()
# Ensure the file is executable; return_code is ignored here...
return_code=os.system("C:Protect T:PeekMem rwed")
# Run the AMIGA executable and get the return_code as a byte value...
return_code=os.system("T:PeekMem")
# We now have our byte read from memory _address_...
return(return_code)
# Start of the DEMO using the peek() function.
# Do a single byte dump only for a start, it is effectively a Failat return code but
# it is redirected to the system's stderr so it is NOT seen on a default Python window...
print "\f\nFirstly, do a single byte dump at the AMIGA ROM address 16777215, $FFFFFF..."
address=16777215
return_code=peek(address)
print "\nByte value at the last odd address, $FFFFFF in the AMIGA ROM is "+str(return_code)+"...\n"
raw_input("Press <CR> to continue...")
# Access the function 256 times, this is slow, but hey, peeking memory by
# the back door cannot be bad, can it?
# Using the same address value as the default...
print "\f\nDo a 256 byte dump of the AMIGA ROM at the default address, $F80000..."
address=16252928
peeked_address=""
for n in range(address,(address+256),1):
return_code=peek(n)
peeked_address=peeked_address+chr(return_code)
# Generate the binary file as a file and autosave...
amigafile=open("T:Binary.BIN","wb+")
amigafile.write(peeked_address)
amigafile.close()
# Now convert to a text HEX version of the binary file inside the T: Volume.
os.system("C:Type HEX T:Binary.BIN > T:Binary.HEX")
# The return_code is directed to the system's stderr, so this ensures that
# the dump can be printed to the default Python window...
amigafile=open("T:Binary.HEX","r+")
peeked_address=amigafile.read()
amigafile.close()
# Print the dump to screen...
print "\f\nStart address of the 256 byte ROM dump is "+str(address)+", "+hex(address)+"...\n"
print peeked_address
raw_input("Press <CR> to continue...")
# Do the same again but this time find the _address_ of id(find_this_text)...
print "\f\nNow to find the address of the string _variable_ ~find_this_text~..."
address=id(find_this_text)
peeked_address=""
for n in range(address,(address+256),1):
return_code=peek(n)
peeked_address=peeked_address+chr(return_code)
amigafile=open("T:Binary.BIN","wb+")
amigafile.write(peeked_address)
amigafile.close()
os.system("C:Type HEX T:Binary.BIN > T:Binary.HEX")
amigafile=open("T:Binary.HEX","r+")
peeked_address=amigafile.read()
amigafile.close()
print "\f\nStart address of the 256 byte dump is "+str(address)+", "+hex(address)+"...\n"
print peeked_address+"\nEnd of the function, peek(), DEMO..."
# End of AMIGA_Peek_Mem.py DEMO.
# Enjoy finding simple solutions to often very difficult problems...
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578189_AMIGAHeads_Only_Peeking_Memory_Address_Classic/recipe-578189.py",
"copies": "1",
"size": "9818",
"license": "mit",
"hash": 484000610933758850,
"line_mean": 45.5428571429,
"line_max": 157,
"alpha_frac": 0.6685082873,
"autogenerated": false,
"ratio": 2.729405194079866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38979134813798655,
"avg_score": null,
"num_lines": null
} |
#a mimic joint can act as a gear between two joints
#you can control the gear ratio in magnitude and sign (>0 reverses direction)
import pybullet as p
import time
p.connect(p.GUI)
p.loadURDF("plane.urdf", 0, 0, -2)
wheelA = p.loadURDF("differential/diff_ring.urdf", [0, 0, 0])
for i in range(p.getNumJoints(wheelA)):
print(p.getJointInfo(wheelA, i))
p.setJointMotorControl2(wheelA, i, p.VELOCITY_CONTROL, targetVelocity=0, force=0)
c = p.createConstraint(wheelA,
1,
wheelA,
3,
jointType=p.JOINT_GEAR,
jointAxis=[0, 1, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0])
p.changeConstraint(c, gearRatio=1, maxForce=10000)
c = p.createConstraint(wheelA,
2,
wheelA,
4,
jointType=p.JOINT_GEAR,
jointAxis=[0, 1, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0])
p.changeConstraint(c, gearRatio=-1, maxForce=10000)
c = p.createConstraint(wheelA,
1,
wheelA,
4,
jointType=p.JOINT_GEAR,
jointAxis=[0, 1, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0])
p.changeConstraint(c, gearRatio=-1, maxForce=10000)
p.setRealTimeSimulation(1)
while (1):
p.setGravity(0, 0, -10)
time.sleep(0.01)
#p.removeConstraint(c)
| {
"repo_name": "MadManRises/Madgine",
"path": "shared/bullet3-2.89/examples/pybullet/examples/mimicJointConstraint.py",
"copies": "2",
"size": "1638",
"license": "mit",
"hash": 5318097543519920000,
"line_mean": 33.8510638298,
"line_max": 83,
"alpha_frac": 0.5122100122,
"autogenerated": false,
"ratio": 3.5454545454545454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004659651334270352,
"num_lines": 47
} |
"""A Min heap is a complete binary tree [CBT] (implemented using array)
in which each node has a value smaller than its sub-trees"""
from math import ceil
class MinHeap:
def __init__(self, arr=None):
self.heap = []
self.heap_size = 0
if arr is not None:
self.create_min_heap(arr)
self.heap = arr
self.heap_size = len(arr)
def create_min_heap(self, arr):
"""
Converts a given array into a min heap
:param arr: input array of numbers
"""
n = len(arr)
# last n/2 elements will be leaf nodes (CBT property) hence already min heaps
# loop from n/2 to 0 index and convert each index node into min heap
for i in range(int(n / 2), -1, -1):
self.min_heapify(i, arr, n)
def min_heapify(self, indx, arr, size):
"""
Assuming sub trees are already min heaps, converts tree rooted at current indx into a min heap.
:param indx: Index to check for min heap
"""
# Get index of left and right child of indx node
left_child = indx * 2 + 1
right_child = indx * 2 + 2
smallest = indx
# check what is the smallest value node in indx, left child and right child
if left_child < size:
if arr[left_child] < arr[smallest]:
smallest = left_child
if right_child < size:
if arr[right_child] < arr[smallest]:
smallest = right_child
# if indx node is not the smallest value, swap with the smallest child
# and recursively call min_heapify on the respective child swapped with
if smallest != indx:
arr[indx], arr[smallest] = arr[smallest], arr[indx]
self.min_heapify(smallest, arr, size)
def insert(self, value):
"""
Inserts an element in the min heap
:param value: value to be inserted in the heap
"""
self.heap.append(value)
self.heap_size += 1
indx = self.heap_size - 1
# Get parent index of the current node
parent = int(ceil(indx / 2 - 1))
# Check if the parent value is smaller than the newly inserted value
# if so, then replace the value with the parent value and check with the new parent
while parent >= 0 and self.heap[indx] < self.heap[parent]:
self.heap[indx], self.heap[parent] = self.heap[parent], self.heap[indx]
indx = parent
parent = int(ceil(indx / 2 - 1))
def delete(self, indx):
"""
Deletes the value on the specified index node
:param indx: index whose node is to be removed
:return: Value of the node deleted from the heap
"""
if self.heap_size == 0:
print("Heap Underflow!!")
return
self.heap[-1], self.heap[indx] = self.heap[indx], self.heap[-1]
self.heap_size -= 1
self.min_heapify(indx, self.heap, self.heap_size)
return self.heap.pop()
def extract_min(self):
"""
Extracts the minimum value from the heap
:return: extracted min value
"""
return self.delete(0)
def print(self):
print(*self.heap)
heap = MinHeap([5, 10, 4, 8, 3, 0, 9, 11])
heap.insert(15)
print(heap.delete(2))
print(heap.extract_min())
heap.print()
| {
"repo_name": "anubhavshrimal/Data_Structures_Algorithms_In_Python",
"path": "Heaps/MinHeap.py",
"copies": "1",
"size": "3372",
"license": "mit",
"hash": 852777912426217100,
"line_mean": 30.8113207547,
"line_max": 103,
"alpha_frac": 0.5771055753,
"autogenerated": false,
"ratio": 3.8625429553264605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9937010682484053,
"avg_score": 0.0005275696284815464,
"num_lines": 106
} |
""" A mini API framework.
"""
from __future__ import unicode_literals
import json
import re
from django.conf import settings
from django.contrib.gis.measure import D
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, Http404, HttpResponseForbidden, HttpResponseBadRequest
from django.template import loader, RequestContext
from django.template.defaultfilters import escapejs
from django.utils.six import text_type
from django.utils.six.moves.urllib.parse import urlencode
from django.utils.translation import ugettext as _
from django.views.generic import View
from boundaries import kml
from boundaries.models import app_settings
class RawJSONResponse(object):
"""APIView subclasses can return these if they have
already-serialized JSON to return"""
def __init__(self, content):
self.content = content
class BadRequest(Exception):
pass
class APIView(View):
"""Base view class that serializes subclass responses to JSON.
Subclasses should define get/post/etc. methods."""
allow_jsonp = True
content_type = 'application/json; charset=utf-8'
def dispatch(self, request, *args, **kwargs):
try:
result = super(APIView, self).dispatch(request, *args, **kwargs)
except BadRequest as e:
return HttpResponseBadRequest(text_type(e), content_type='text/plain')
if isinstance(result, HttpResponse):
return result
if request.GET.get('format') == 'apibrowser':
return self.apibrowser_response(request, result)
resp = HttpResponse(content_type=self.content_type)
# CORS
if request.method == 'GET' and app_settings.ALLOW_ORIGIN:
resp['Access-Control-Allow-Origin'] = app_settings.ALLOW_ORIGIN
# JSONP
callback = ''
if self.allow_jsonp and 'callback' in request.GET:
callback = re.sub(r'[^a-zA-Z0-9_]', '', request.GET['callback'])
resp.write(callback + '(')
if isinstance(result, RawJSONResponse):
resp.write(result.content)
else:
json.dump(result, resp, indent=(4 if request.GET.get('pretty') else None))
if callback:
resp.write(');')
return resp
def apibrowser_response(self, request, result):
"""If format=apibrowser, return a prettified HTML reponse."""
if isinstance(result, RawJSONResponse):
result = json.loads(result.content)
jsonresult = json.dumps(result, indent=4)
t = loader.get_template('boundaries/apibrowser.html')
json_url = request.path
params = dict([k, v.encode('utf-8')] for k, v in request.GET.items())
params.pop('format')
if params:
json_url += '?' + urlencode(params)
ctx = {
'json': jsonresult,
'resource_name': self.model.__name__,
'is_list': isinstance(self, ModelListView),
'json_url': json_url,
}
if ctx['is_list']:
ctx['title'] = self.model._meta.verbose_name_plural
else:
ctx['title'] = self.model._meta.verbose_name
c = RequestContext(request, ctx)
return HttpResponse(t.render(c))
class ModelListView(APIView):
"""Base API class for a list of resources.
Subclasses should set the 'model' attribute to the appropriate model class.
Set the filterable_fields attribute to a list of field names users should
be able to filter on.
Compatible model classes should define a static method called get_dicts that,
given a list of objects, returns a list of dicts suitable for serialization.
By default, those will be model objects, but the model can also define a static
method called 'prepare_queryset_for_get_dicts' that accepts a queryset and returns
a sliceable iterable of objects that will later be passed to get_dicts."""
filter_types = ['exact', 'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith', 'isnull']
def get_qs(self, request):
return self.model.objects.all()
def filter(self, request, qs):
for (f, val) in request.GET.items():
if '__' in f:
(filter_field, filter_type) = f.split('__')
else:
(filter_field, filter_type) = (f, 'exact')
if filter_field in getattr(self, 'filterable_fields', []) and filter_type in self.filter_types:
if val in ['true', 'True']:
val = True
elif val in ['false', 'False']:
val = False
elif val in ['none', 'None']:
val = None
qs = qs.filter(**{filter_field + '__' + filter_type: val})
return qs
def get_related_resources(self, request, qs, meta):
return {}
def get(self, request, **kwargs):
qs = self.get_qs(request, **kwargs)
try:
qs = self.filter(request, qs)
except ValueError:
raise BadRequest(_("Invalid filter value"))
if hasattr(self.model, 'prepare_queryset_for_get_dicts'):
qs = self.model.prepare_queryset_for_get_dicts(qs)
paginator = Paginator(request.GET, qs, resource_uri=request.path)
result = paginator.page()
result['objects'] = self.model.get_dicts(result['objects'])
related = self.get_related_resources(request, qs, result['meta'])
if related:
result['meta']['related'] = related
return result
class ModelGeoListView(ModelListView):
"""Adds geospatial support to ModelListView.
Subclasses must set the 'allowed_geo_fields' attribute to a list
of geospatial field names which we're allowed to provide.
'name_field' should be the name of the field on objects that
contains a name value
To enable a couple of default geospatial filters, the
default_geo_filter_field attribute should be set to the name
of the geometry field to filter on.
To access a geospatial field, the field name must be provided
by the URLconf in the 'geo_field' keyword argument."""
name_field = 'name'
default_geo_filter_field = None
def filter(self, request, qs):
qs = super(ModelGeoListView, self).filter(request, qs)
if self.default_geo_filter_field:
if 'contains' in request.GET:
try:
latitude, longitude = re.sub(r'[^\d.,-]', '', request.GET['contains']).split(',')
wkt = 'POINT(%s %s)' % (longitude, latitude)
qs = qs.filter(**{self.default_geo_filter_field + "__contains": wkt})
except ValueError:
raise BadRequest(_("Invalid latitude,longitude '%(value)s' provided.") % {'value': request.GET['contains']})
if 'near' in request.GET:
latitude, longitude, range = request.GET['near'].split(',')
wkt = 'POINT(%s %s)' % (longitude, latitude)
numeral = re.match('([0-9]+)', range).group(1)
unit = range[len(numeral):]
numeral = int(numeral)
kwargs = {unit: numeral}
qs = qs.filter(**{self.default_geo_filter_field + "__distance_lte": (wkt, D(**kwargs))})
return qs
def get(self, request, **kwargs):
if 'geo_field' not in kwargs:
# If it's not a geo request, let ModelListView handle it.
return super(ModelGeoListView, self).get(request, **kwargs)
field = kwargs.pop('geo_field')
if field not in self.allowed_geo_fields:
raise Http404
qs = self.get_qs(request, **kwargs)
try:
qs = self.filter(request, qs)
except ValueError:
raise BadRequest(_("Invalid filter value"))
if qs.count() > app_settings.MAX_GEO_LIST_RESULTS:
return HttpResponseForbidden(
_("Spatial-list queries cannot return more than %(expected)d resources; this query would return %(actual)s. Please filter your query.")
% {'expected': app_settings.MAX_GEO_LIST_RESULTS, 'actual': qs.count()})
format = request.GET.get('format', 'json')
if format in ('json', 'apibrowser'):
strings = ['{"objects": [']
strings.append(','.join(('{"name": "%s", "%s": %s}' % (escapejs(x[1]), field, x[0].geojson)
for x in qs.values_list(field, self.name_field))))
strings.append(']}')
return RawJSONResponse(''.join(strings))
elif format == 'wkt':
return HttpResponse("\n".join((geom.wkt for geom in qs.values_list(field, flat=True))), content_type="text/plain")
elif format == 'kml':
placemarks = [kml.generate_placemark(x[1], x[0]) for x in qs.values_list(field, self.name_field)]
resp = HttpResponse(
kml.generate_kml_document(placemarks),
content_type="application/vnd.google-earth.kml+xml")
resp['Content-Disposition'] = 'attachment; filename="shape.kml"'
return resp
else:
raise NotImplementedError
class ModelDetailView(APIView):
"""Return the API representation of a single object.
Subclasses must set the 'model' attribute to the appropriate model class.
Subclasses must define a 'get_object' method to return a single model
object. Its argument will be the request, a QuerySet of objects from
which to select, and any keyword arguments provided by the URLconf.
Compatible model classes must define an as_dict instance method which
returns a serializable dict of the object's data."""
def __init__(self):
super(ModelDetailView, self).__init__()
self.base_qs = self.model.objects.all()
def get(self, request, **kwargs):
try:
return self.get_object(request, self.base_qs, **kwargs).as_dict()
except ObjectDoesNotExist:
raise Http404
class ModelGeoDetailView(ModelDetailView):
"""Adds geospatial support to ModelDetailView
Subclasses must set the 'allowed_geo_fields' attribute to a list
of geospatial field names which we're allowed to provide.
To access a geospatial field, the field name must be provided
by the URLconf in the 'geo_field' keyword argument."""
name_field = 'name'
def get(self, request, **kwargs):
if 'geo_field' not in kwargs:
# If it's not a geo request, let ModelDetailView handle it.
return super(ModelGeoDetailView, self).get(request, **kwargs)
field = kwargs.pop('geo_field')
if field not in self.allowed_geo_fields:
raise Http404
try:
obj = self.get_object(request, self.base_qs.only(field, self.name_field), **kwargs)
except ObjectDoesNotExist:
raise Http404
geom = getattr(obj, field)
name = getattr(obj, self.name_field)
format = request.GET.get('format', 'json')
if format in ('json', 'apibrowser'):
return RawJSONResponse(geom.geojson)
elif format == 'wkt':
return HttpResponse(geom.wkt, content_type="text/plain")
elif format == 'kml':
resp = HttpResponse(
kml.generate_kml_document([kml.generate_placemark(name, geom)]),
content_type="application/vnd.google-earth.kml+xml")
resp['Content-Disposition'] = 'attachment; filename="shape.kml"'
return resp
else:
raise NotImplementedError
class Paginator(object):
"""
Taken from django-tastypie. Thanks!
"""
def __init__(self, request_data, objects, resource_uri=None, limit=None, offset=0, max_limit=1000, collection_name='objects'):
"""
Instantiates the ``Paginator`` and allows for some configuration.
The ``request_data`` argument ought to be a dictionary-like object.
May provide ``limit`` and/or ``offset`` to override the defaults.
Commonly provided ``request.GET``. Required.
The ``objects`` should be a list-like object of ``Resources``.
This is typically a ``QuerySet`` but can be anything that
implements slicing. Required.
Optionally accepts a ``limit`` argument, which specifies how many
items to show at a time. Defaults to ``None``, which is no limit.
Optionally accepts an ``offset`` argument, which specifies where in
the ``objects`` to start displaying results from. Defaults to 0.
Optionally accepts a ``max_limit`` argument, which the upper bound
limit. Defaults to ``1000``. If you set it to 0 or ``None``, no upper
bound will be enforced.
"""
self.request_data = request_data
self.objects = objects
self.limit = limit
self.max_limit = max_limit
self.offset = offset
self.resource_uri = resource_uri
self.collection_name = collection_name
def get_limit(self):
"""
Determines the proper maximum number of results to return.
In order of importance, it will use:
* The user-requested ``limit`` from the GET parameters, if specified.
* The object-level ``limit`` if specified.
* ``settings.API_LIMIT_PER_PAGE`` if specified.
Default is 20 per page.
"""
limit = self.request_data.get('limit', self.limit)
if limit is None:
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
try:
limit = int(limit)
except ValueError:
raise BadRequest(_("Invalid limit '%(value)s' provided. Please provide a positive integer.") % {'value': limit})
if limit < 0:
raise BadRequest(_("Invalid limit '%(value)s' provided. Please provide a positive integer >= 0.") % {'value': limit})
if self.max_limit and (not limit or limit > self.max_limit):
# If it's more than the max, we're only going to return the max.
# This is to prevent excessive DB (or other) load.
return self.max_limit
return limit
def get_offset(self):
"""
Determines the proper starting offset of results to return.
It attempst to use the user-provided ``offset`` from the GET parameters,
if specified. Otherwise, it falls back to the object-level ``offset``.
Default is 0.
"""
offset = self.offset
if 'offset' in self.request_data:
offset = self.request_data['offset']
try:
offset = int(offset)
except ValueError:
raise BadRequest(_("Invalid offset '%(value)s' provided. Please provide a positive integer.") % {'value': offset})
if offset < 0:
raise BadRequest(_("Invalid offset '%(value)s' provided. Please provide a positive integer >= 0.") % {'value': offset})
return offset
def get_slice(self, limit, offset):
"""
Slices the result set to the specified ``limit`` & ``offset``.
"""
if limit == 0:
return self.objects[offset:]
return self.objects[offset:offset + limit]
def get_count(self):
"""
Returns a count of the total number of objects seen.
"""
try:
return self.objects.count()
except (AttributeError, TypeError):
# If it's not a QuerySet (or it's ilk), fallback to ``len``.
return len(self.objects)
def get_previous(self, limit, offset):
"""
If a previous page is available, will generate a URL to request that
page. If not available, this returns ``None``.
"""
if offset - limit < 0:
return None
return self._generate_uri(limit, offset - limit)
def get_next(self, limit, offset, count):
"""
If a next page is available, will generate a URL to request that
page. If not available, this returns ``None``.
"""
if offset + limit >= count:
return None
return self._generate_uri(limit, offset + limit)
def _generate_uri(self, limit, offset):
if self.resource_uri is None:
return None
try:
# QueryDict has a urlencode method that can handle multiple values for the same key
request_params = self.request_data.copy()
if 'limit' in request_params:
del request_params['limit']
if 'offset' in request_params:
del request_params['offset']
request_params.update({'limit': limit, 'offset': offset})
encoded_params = request_params.urlencode()
except AttributeError:
request_params = {}
for k, v in self.request_data.items():
if isinstance(v, text_type):
request_params[k] = v.encode('utf-8')
else:
request_params[k] = v
if 'limit' in request_params:
del request_params['limit']
if 'offset' in request_params:
del request_params['offset']
request_params.update({'limit': limit, 'offset': offset})
encoded_params = urlencode(request_params)
return '%s?%s' % (self.resource_uri, encoded_params)
def page(self):
"""
Generates all pertinent data about the requested page.
Handles getting the correct ``limit`` & ``offset``, then slices off
the correct set of results and returns all pertinent metadata.
"""
limit = self.get_limit()
offset = self.get_offset()
count = self.get_count()
objects = self.get_slice(limit, offset)
meta = {
'offset': offset,
'limit': limit,
'total_count': count,
}
if limit:
meta['previous'] = self.get_previous(limit, offset)
meta['next'] = self.get_next(limit, offset, count)
return {
self.collection_name: objects,
'meta': meta,
}
| {
"repo_name": "opencorato/represent-boundaries",
"path": "boundaries/base_views.py",
"copies": "2",
"size": "18246",
"license": "mit",
"hash": 7543582215020585000,
"line_mean": 36.2367346939,
"line_max": 151,
"alpha_frac": 0.5976104352,
"autogenerated": false,
"ratio": 4.327798861480076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005996817984996104,
"num_lines": 490
} |
""" A minimal application using the Qt console-style Jupyter frontend.
This is not a complete console app, as subprocess will not be able to receive
input, there is no real readline support, among other limitations.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import signal
import sys
from warnings import warn
# If run on Windows:
#
# 1. Install an exception hook which pops up a message box.
# Pythonw.exe hides the console, so without this the application
# silently fails to load.
#
# We always install this handler, because the expectation is for
# qtconsole to bring up a GUI even if called from the console.
# The old handler is called, so the exception is printed as well.
# If desired, check for pythonw with an additional condition
# (sys.executable.lower().find('pythonw.exe') >= 0).
#
# 2. Set AppUserModelID for Windows 7 and later so that qtconsole
# uses its assigned taskbar icon instead of grabbing the one with
# the same AppUserModelID
#
if os.name == 'nt':
# 1.
old_excepthook = sys.excepthook
# Exclude this from our autogenerated API docs.
undoc = lambda func: func
@undoc
def gui_excepthook(exctype, value, tb):
try:
import ctypes, traceback
MB_ICONERROR = 0x00000010
title = u'Error starting QtConsole'
msg = u''.join(traceback.format_exception(exctype, value, tb))
ctypes.windll.user32.MessageBoxW(0, msg, title, MB_ICONERROR)
finally:
# Also call the old exception hook to let it do
# its thing too.
old_excepthook(exctype, value, tb)
sys.excepthook = gui_excepthook
# 2.
try:
from ctypes import windll
windll.shell32.SetCurrentProcessExplicitAppUserModelID("Jupyter.Qtconsole")
except AttributeError:
pass
from qtpy import QtCore, QtGui, QtWidgets
from traitlets.config.application import boolean_flag
from traitlets.config.application import catch_config_error
from qtconsole.jupyter_widget import JupyterWidget
from qtconsole.rich_jupyter_widget import RichJupyterWidget
from qtconsole import styles, __version__
from qtconsole.mainwindow import MainWindow
from qtconsole.client import QtKernelClient
from qtconsole.manager import QtKernelManager
from traitlets import (
Dict, Unicode, CBool, Any
)
from jupyter_core.application import JupyterApp, base_flags, base_aliases
from jupyter_client.consoleapp import (
JupyterConsoleApp, app_aliases, app_flags,
)
from jupyter_client.localinterfaces import is_local_ip
_examples = """
jupyter qtconsole # start the qtconsole
"""
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
# FIXME: workaround bug in jupyter_client < 4.1 excluding base_flags,aliases
flags = dict(base_flags)
qt_flags = {
'plain' : ({'JupyterQtConsoleApp' : {'plain' : True}},
"Disable rich text support."),
}
qt_flags.update(boolean_flag(
'banner', 'JupyterQtConsoleApp.display_banner',
"Display a banner upon starting the QtConsole.",
"Don't display a banner upon starting the QtConsole."
))
# and app_flags from the Console Mixin
qt_flags.update(app_flags)
# add frontend flags to the full set
flags.update(qt_flags)
# start with copy of base jupyter aliases
aliases = dict(base_aliases)
qt_aliases = dict(
style = 'JupyterWidget.syntax_style',
stylesheet = 'JupyterQtConsoleApp.stylesheet',
editor = 'JupyterWidget.editor',
paging = 'ConsoleWidget.paging',
)
# and app_aliases from the Console Mixin
qt_aliases.update(app_aliases)
qt_aliases.update({'gui-completion':'ConsoleWidget.gui_completion'})
# add frontend aliases to the full set
aliases.update(qt_aliases)
# get flags&aliases into sets, and remove a couple that
# shouldn't be scrubbed from backend flags:
qt_aliases = set(qt_aliases.keys())
qt_flags = set(qt_flags.keys())
class JupyterQtConsoleApp(JupyterApp, JupyterConsoleApp):
name = 'jupyter-qtconsole'
version = __version__
description = """
The Jupyter QtConsole.
This launches a Console-style application using Qt. It is not a full
console, in that launched terminal subprocesses will not be able to accept
input.
"""
examples = _examples
classes = [JupyterWidget] + JupyterConsoleApp.classes
flags = Dict(flags)
aliases = Dict(aliases)
frontend_flags = Any(qt_flags)
frontend_aliases = Any(qt_aliases)
kernel_client_class = QtKernelClient
kernel_manager_class = QtKernelManager
stylesheet = Unicode('', config=True,
help="path to a custom CSS stylesheet")
hide_menubar = CBool(False, config=True,
help="Start the console window with the menu bar hidden.")
maximize = CBool(False, config=True,
help="Start the console window maximized.")
plain = CBool(False, config=True,
help="Use a plaintext widget instead of rich text (plain can't print/save).")
display_banner = CBool(True, config=True,
help="Whether to display a banner upon starting the QtConsole."
)
def _plain_changed(self, name, old, new):
kind = 'plain' if new else 'rich'
self.config.ConsoleWidget.kind = kind
if new:
self.widget_factory = JupyterWidget
else:
self.widget_factory = RichJupyterWidget
# the factory for creating a widget
widget_factory = Any(RichJupyterWidget)
def parse_command_line(self, argv=None):
super(JupyterQtConsoleApp, self).parse_command_line(argv)
self.build_kernel_argv(self.extra_args)
def new_frontend_master(self):
""" Create and return new frontend attached to new kernel, launched on localhost.
"""
kernel_manager = self.kernel_manager_class(
connection_file=self._new_connection_file(),
parent=self,
autorestart=True,
)
# start the kernel
kwargs = {}
# FIXME: remove special treatment of IPython kernels
if self.kernel_manager.ipykernel:
kwargs['extra_arguments'] = self.kernel_argv
kernel_manager.start_kernel(**kwargs)
kernel_manager.client_factory = self.kernel_client_class
kernel_client = kernel_manager.client()
kernel_client.start_channels(shell=True, iopub=True)
widget = self.widget_factory(config=self.config,
local_kernel=True)
self.init_colors(widget)
widget.kernel_manager = kernel_manager
widget.kernel_client = kernel_client
widget._existing = False
widget._may_close = True
widget._confirm_exit = self.confirm_exit
widget._display_banner = self.display_banner
return widget
def new_frontend_connection(self, connection_file):
"""Create and return a new frontend attached to an existing kernel.
Parameters
----------
connection_file : str
The connection_file path this frontend is to connect to
"""
kernel_client = self.kernel_client_class(
connection_file=connection_file,
config=self.config,
)
kernel_client.load_connection_file()
kernel_client.start_channels()
widget = self.widget_factory(config=self.config,
local_kernel=False)
self.init_colors(widget)
widget._existing = True
widget._may_close = False
widget._confirm_exit = False
widget._display_banner = self.display_banner
widget.kernel_client = kernel_client
widget.kernel_manager = None
return widget
def new_frontend_slave(self, current_widget):
"""Create and return a new frontend attached to an existing kernel.
Parameters
----------
current_widget : JupyterWidget
The JupyterWidget whose kernel this frontend is to share
"""
kernel_client = self.kernel_client_class(
connection_file=current_widget.kernel_client.connection_file,
config = self.config,
)
kernel_client.load_connection_file()
kernel_client.start_channels()
widget = self.widget_factory(config=self.config,
local_kernel=False)
self.init_colors(widget)
widget._existing = True
widget._may_close = False
widget._confirm_exit = False
widget._display_banner = self.display_banner
widget.kernel_client = kernel_client
widget.kernel_manager = current_widget.kernel_manager
return widget
def init_qt_app(self):
# separate from qt_elements, because it must run first
self.app = QtWidgets.QApplication(['jupyter-qtconsole'])
self.app.setApplicationName('jupyter-qtconsole')
def init_qt_elements(self):
# Create the widget.
base_path = os.path.abspath(os.path.dirname(__file__))
icon_path = os.path.join(base_path, 'resources', 'icon', 'JupyterConsole.svg')
self.app.icon = QtGui.QIcon(icon_path)
QtWidgets.QApplication.setWindowIcon(self.app.icon)
ip = self.ip
local_kernel = (not self.existing) or is_local_ip(ip)
self.widget = self.widget_factory(config=self.config,
local_kernel=local_kernel)
self.init_colors(self.widget)
self.widget._existing = self.existing
self.widget._may_close = not self.existing
self.widget._confirm_exit = self.confirm_exit
self.widget._display_banner = self.display_banner
self.widget.kernel_manager = self.kernel_manager
self.widget.kernel_client = self.kernel_client
self.window = MainWindow(self.app,
confirm_exit=self.confirm_exit,
new_frontend_factory=self.new_frontend_master,
slave_frontend_factory=self.new_frontend_slave,
connection_frontend_factory=self.new_frontend_connection,
)
self.window.log = self.log
self.window.add_tab_with_frontend(self.widget)
self.window.init_menu_bar()
# Ignore on OSX, where there is always a menu bar
if sys.platform != 'darwin' and self.hide_menubar:
self.window.menuBar().setVisible(False)
self.window.setWindowTitle('Jupyter QtConsole')
def init_colors(self, widget):
"""Configure the coloring of the widget"""
# Note: This will be dramatically simplified when colors
# are removed from the backend.
# parse the colors arg down to current known labels
cfg = self.config
colors = cfg.ZMQInteractiveShell.colors if 'ZMQInteractiveShell.colors' in cfg else None
style = cfg.JupyterWidget.syntax_style if 'JupyterWidget.syntax_style' in cfg else None
sheet = cfg.JupyterWidget.style_sheet if 'JupyterWidget.style_sheet' in cfg else None
# find the value for colors:
if colors:
colors=colors.lower()
if colors in ('lightbg', 'light'):
colors='lightbg'
elif colors in ('dark', 'linux'):
colors='linux'
else:
colors='nocolor'
elif style:
if style=='bw':
colors='nocolor'
elif styles.dark_style(style):
colors='linux'
else:
colors='lightbg'
else:
colors=None
# Configure the style
if style:
widget.style_sheet = styles.sheet_from_template(style, colors)
widget.syntax_style = style
widget._syntax_style_changed()
widget._style_sheet_changed()
elif colors:
# use a default dark/light/bw style
widget.set_default_style(colors=colors)
if self.stylesheet:
# we got an explicit stylesheet
if os.path.isfile(self.stylesheet):
with open(self.stylesheet) as f:
sheet = f.read()
else:
raise IOError("Stylesheet %r not found." % self.stylesheet)
if sheet:
widget.style_sheet = sheet
widget._style_sheet_changed()
def init_signal(self):
"""allow clean shutdown on sigint"""
signal.signal(signal.SIGINT, lambda sig, frame: self.exit(-2))
# need a timer, so that QApplication doesn't block until a real
# Qt event fires (can require mouse movement)
# timer trick from http://stackoverflow.com/q/4938723/938949
timer = QtCore.QTimer()
# Let the interpreter run each 200 ms:
timer.timeout.connect(lambda: None)
timer.start(200)
# hold onto ref, so the timer doesn't get cleaned up
self._sigint_timer = timer
def _deprecate_config(self, cfg, old_name, new_name):
"""Warn about deprecated config."""
if old_name in cfg:
self.log.warning(
"Use %s in config, not %s. Outdated config:\n %s",
new_name, old_name,
'\n '.join(
'{name}.{key} = {value!r}'.format(key=key, value=value,
name=old_name)
for key, value in self.config[old_name].items()
)
)
cfg = cfg.copy()
cfg[new_name].merge(cfg[old_name])
return cfg
def _init_asyncio_patch(self):
"""
Same workaround fix as https://github.com/ipython/ipykernel/pull/456
Set default asyncio policy to be compatible with tornado
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overrideable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
@catch_config_error
def initialize(self, argv=None):
self._init_asyncio_patch()
self.init_qt_app()
super(JupyterQtConsoleApp, self).initialize(argv)
if self._dispatching:
return
# handle deprecated renames
for old_name, new_name in [
('IPythonQtConsoleApp', 'JupyterQtConsole'),
('IPythonWidget', 'JupyterWidget'),
('RichIPythonWidget', 'RichJupyterWidget'),
]:
cfg = self._deprecate_config(self.config, old_name, new_name)
if cfg:
self.update_config(cfg)
JupyterConsoleApp.initialize(self,argv)
self.init_qt_elements()
self.init_signal()
def start(self):
super(JupyterQtConsoleApp, self).start()
# draw the window
if self.maximize:
self.window.showMaximized()
else:
self.window.show()
self.window.raise_()
# Start the application main loop.
self.app.exec_()
class IPythonQtConsoleApp(JupyterQtConsoleApp):
def __init__(self, *a, **kw):
warn("IPythonQtConsoleApp is deprecated; use JupyterQtConsoleApp",
DeprecationWarning)
super(IPythonQtConsoleApp, self).__init__(*a, **kw)
# -----------------------------------------------------------------------------
# Main entry point
# -----------------------------------------------------------------------------
def main():
JupyterQtConsoleApp.launch_instance()
if __name__ == '__main__':
main()
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/qtconsole/qtconsoleapp.py",
"copies": "1",
"size": "16819",
"license": "mit",
"hash": 2770537533948160500,
"line_mean": 35.4837310195,
"line_max": 96,
"alpha_frac": 0.6084190499,
"autogenerated": false,
"ratio": 4.241866330390921,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0029337545085013983,
"num_lines": 461
} |
""" A minimal application using the ZMQ-based terminal IPython frontend.
This is not a complete console app, as subprocess will not be able to receive
input, there is no real readline support, among other limitations.
Authors:
* Min RK
* Paul Ivanov
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import signal
import sys
import time
from IPython.frontend.terminal.ipapp import TerminalIPythonApp, frontend_flags as term_flags
from IPython.utils.traitlets import (
Dict, List, Unicode, Int, CaselessStrEnum, CBool, Any
)
from IPython.utils.warn import warn,error
from IPython.zmq.ipkernel import IPKernelApp
from IPython.zmq.session import Session, default_secure
from IPython.zmq.zmqshell import ZMQInteractiveShell
from IPython.frontend.consoleapp import (
IPythonConsoleApp, app_aliases, app_flags, aliases, app_aliases, flags
)
from IPython.frontend.terminal.console.interactiveshell import ZMQTerminalInteractiveShell
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
_examples = """
ipython console # start the ZMQ-based console
ipython console --existing # connect to an existing ipython session
"""
#-----------------------------------------------------------------------------
# Flags and Aliases
#-----------------------------------------------------------------------------
# copy flags from mixin:
flags = dict(flags)
# start with mixin frontend flags:
frontend_flags = dict(app_flags)
# add TerminalIPApp flags:
frontend_flags.update(term_flags)
# disable quick startup, as it won't propagate to the kernel anyway
frontend_flags.pop('quick')
# update full dict with frontend flags:
flags.update(frontend_flags)
# copy flags from mixin
aliases = dict(aliases)
# start with mixin frontend flags
frontend_aliases = dict(app_aliases)
# load updated frontend flags into full dict
aliases.update(frontend_aliases)
# get flags&aliases into sets, and remove a couple that
# shouldn't be scrubbed from backend flags:
frontend_aliases = set(frontend_aliases.keys())
frontend_flags = set(frontend_flags.keys())
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class ZMQTerminalIPythonApp(TerminalIPythonApp, IPythonConsoleApp):
name = "ipython-console"
"""Start a terminal frontend to the IPython zmq kernel."""
description = """
The IPython terminal-based Console.
This launches a Console application inside a terminal.
The Console supports various extra features beyond the traditional
single-process Terminal IPython shell, such as connecting to an
existing ipython session, via:
ipython console --existing
where the previous session could have been created by another ipython
console, an ipython qtconsole, or by opening an ipython notebook.
"""
examples = _examples
classes = [ZMQTerminalInteractiveShell] + IPythonConsoleApp.classes
flags = Dict(flags)
aliases = Dict(aliases)
frontend_aliases = Any(frontend_aliases)
frontend_flags = Any(frontend_flags)
subcommands = Dict()
def parse_command_line(self, argv=None):
super(ZMQTerminalIPythonApp, self).parse_command_line(argv)
self.build_kernel_argv(argv)
def init_shell(self):
IPythonConsoleApp.initialize(self)
# relay sigint to kernel
signal.signal(signal.SIGINT, self.handle_sigint)
self.shell = ZMQTerminalInteractiveShell.instance(config=self.config,
display_banner=False, profile_dir=self.profile_dir,
ipython_dir=self.ipython_dir, kernel_manager=self.kernel_manager)
def init_gui_pylab(self):
# no-op, because we don't want to import matplotlib in the frontend.
pass
def handle_sigint(self, *args):
if self.shell._executing:
if self.kernel_manager.has_kernel:
# interrupt already gets passed to subprocess by signal handler.
# Only if we prevent that should we need to explicitly call
# interrupt_kernel, until which time, this would result in a
# double-interrupt:
# self.kernel_manager.interrupt_kernel()
pass
else:
self.shell.write_err('\n')
error("Cannot interrupt kernels we didn't start.\n")
else:
# raise the KeyboardInterrupt if we aren't waiting for execution,
# so that the interact loop advances, and prompt is redrawn, etc.
raise KeyboardInterrupt
def init_code(self):
# no-op in the frontend, code gets run in the backend
pass
def launch_new_instance():
"""Create and run a full blown IPython instance"""
app = ZMQTerminalIPythonApp.instance()
app.initialize()
app.start()
if __name__ == '__main__':
launch_new_instance()
| {
"repo_name": "sodafree/backend",
"path": "build/ipython/build/lib.linux-i686-2.7/IPython/frontend/terminal/console/app.py",
"copies": "3",
"size": "5217",
"license": "bsd-3-clause",
"hash": 1332183983444967700,
"line_mean": 32.8766233766,
"line_max": 92,
"alpha_frac": 0.609929078,
"autogenerated": false,
"ratio": 4.666368515205725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6776297593205725,
"avg_score": null,
"num_lines": null
} |
"""A minimal example to lint playbook for undefined variables"""
# pylint: disable=W0603,W0613
# TODO: Add tests.
import inspect
import multiprocessing
import os
import sys
from ansible.cli.playbook import PlaybookCLI # pylint: disable=E0611,F0401
from ansible.errors import AnsibleUndefinedVariable
from ansible.plugins.strategy import StrategyBase # pylint: disable=E0611,F0401
from ansible.executor.process.worker import WorkerProcess # pylint: disable=E0611,F0401
from collections import defaultdict
from Queue import Empty
from interceptor import intercept
from example.lint_pbook.composite_queue import CompositeQueue
# Override multiprocess Queue with Composite Queue.
multiprocessing.Queue = CompositeQueue
# Classes to intercept
ANSIBLE_CLASSES = (AnsibleUndefinedVariable, StrategyBase, WorkerProcess)
# Dictionary of task, set of its exceptions
RESULT = defaultdict(set)
# Interceptor internal method names to be skipped while backtracking through call stack.
INTERCEPTOR_INTERNAL_METHODS = {'run_advices', 'trivial'}
# Composite queue internal method names to be skipped for while backtracking through call stack.
COMPOSITE_QUEUE_INTERNAL_METHODS = {'meth', 'get_instance_attr'}
SKIP_METHODS = INTERCEPTOR_INTERNAL_METHODS.union(COMPOSITE_QUEUE_INTERNAL_METHODS)
def main():
"""Run playbook"""
for flag in ('--check',):
if flag not in sys.argv:
sys.argv.append(flag)
obj = PlaybookCLI(sys.argv)
obj.parse()
obj.run()
def queue_exc(*arg, **kw):
"""Queue undefined variable exception"""
_self = arg[0]
if not isinstance(_self, AnsibleUndefinedVariable):
# Run for AnsibleUndefinedVariable instance
return
_rslt_q = None
for stack_trace in inspect.stack():
# Check if method to be skipped
if stack_trace[3] in SKIP_METHODS:
continue
_frame = stack_trace[0]
_locals = inspect.getargvalues(_frame).locals
if 'self' not in _locals:
continue
# Check if current frame instance of worker
if isinstance(_locals['self'], WorkerProcess):
# Get queue to add exception
_rslt_q = getattr(_locals['self'], '_rslt_q')
if not _rslt_q:
raise ValueError("No Queue found.")
# Add interceptor exception
_rslt_q.put(arg[3].message, interceptor=True)
def extract_worker_exc(*arg, **kw):
"""Get exception added by worker"""
_self = arg[0]
if not isinstance(_self, StrategyBase):
# Run for StrategyBase instance only
return
# Iterate over workers to get their task and queue
for _worker_prc, _main_q, _rslt_q in _self._workers:
_task = _worker_prc._task
if _task.action == 'setup':
# Ignore setup
continue
# Do till queue is empty for the worker
while True:
try:
_exc = _rslt_q.get(block=False, interceptor=True)
RESULT[_task.name].add(_exc)
except Empty:
break
if __name__ == '__main__':
# Disable ansible console output
_STDOUT = sys.stdout
fptr = open(os.devnull, 'w') # pylint: disable=C0103
sys.stdout = fptr
ASPECTS = {
r'__init__': dict(
around_after=queue_exc
),
r'run': dict(
before=extract_worker_exc
)
}
for _class in ANSIBLE_CLASSES:
intercept(ASPECTS)(_class)
# Run playbook in check mode.
main()
# Enable console output
sys.stdout = _STDOUT
if not RESULT:
print "Valid playbook"
sys.exit()
print "Linter Output"
print "#" * 20
for task, errors in RESULT.items():
print 'Task: {1}{0}{2}{0}'.format('\n', task, '\n'.join(errors))
| {
"repo_name": "host-anshu/simpleInterceptor",
"path": "example/lint_pbook/lint.py",
"copies": "1",
"size": "3760",
"license": "mit",
"hash": 4068460928338356700,
"line_mean": 30.8644067797,
"line_max": 96,
"alpha_frac": 0.6478723404,
"autogenerated": false,
"ratio": 3.9085239085239087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5056396248923909,
"avg_score": null,
"num_lines": null
} |
"""A minimal, full screen text editor.
"""
import re
from Tkinter import *
from textarea import TextArea
from tkSimpleDialog import askstring
from tkFileDialog import asksaveasfilename, askopenfilename
from tkMessageBox import askokcancel
import os_sniffer
class MinimalEdit(Tk):
"""The minimaledit application.
Create, and then call the .mainloop() method.
"""
is_fullscreen = False
saved_geometry = None
textarea = None
def __init__(self):
Tk.__init__(self)
self.makewidgets()
self.mapkeys()
self.style()
# Catch the "click on close" event.
self.protocol("WM_DELETE_WINDOW", self.onquit)
def style(self):
self.config(background="#000000",
borderwidth=0,
highlightbackground="#000000")
if os_sniffer.is_mac:
# Trying to get rid of the title bar on a mac.
# This must be done before the window is actually created.
# If we want to be able to have a title bar in windowed mode and
# have no title bar in minimal editing mode, we'll need to rebuild
# the window.
self.tk.call("::tk::unsupported::MacWindowStyle", "style", self._w, "plain", "noTitleBar")
# On Mac OS X, attempt to make the window appear on top vs. behind
# other windows.
# This is a total hack, but it seems to work quite well.
# We set up a delayed callback that will ensure the application is
# up and running, force the application to be topmost which will,
# even on mac move the application above all other windows in the
# desktop, but then allow the window to lose focus later if the user
# chooses to switch to another application in the desktop.
# I'm working around an apparent bug according to the interwebs,
# but it seems to work.
# NOTE: The best I can do with this so far is setting the window on
# top. Having the window have focus is not happening when not running
# this application as a py2app.
# TODO: test running as a py2app to see if I can grab focus.
def mac_osx_lift_callback():
self.tk.call('wm', 'attributes', '.', '-topmost', '1')
self.tk.call('update')
self.tk.call('wm', 'attributes', '.', '-topmost', '0')
self.tk.call('update')
self.after(100, mac_osx_lift_callback)
def makewidgets(self):
self.title("MinimalEdit")
self.textarea = TextArea(self)
def mapkeys(self):
if os_sniffer.is_mac:
command_key = "Command"
else:
command_key = "Control"
self.bind("<"+command_key+"-n>", lambda a: self.onnew())
self.bind("<"+command_key+"-l>", lambda a: self.onload())
self.bind("<"+command_key+"-s>", lambda a: self.onsave())
self.bind("<"+command_key+"-f>", lambda a: self.onfind())
self.bind("<"+command_key+"-q>", lambda a: self.onquit())
self.bind("<Escape>", lambda a: self.toggleminimal())
def onquit(self):
ans = askokcancel("Verify exit", "Really quit?")
if ans:
# NOTE: On Mac, there seems to be a bug with preventing the
# program from quitting. We can intercept the event and do
# what clean up we need to do here. However, once this function
# exits, on mac os x (but not on windows) our program will close
# no matter what. This is a tkinter version problem on mac.
# TODO: Need to confirm that I'm using the non-Apple tkinter.
self.quit()
def onnew(self):
ans = askokcancel("Verify new file", "Really start a new file?")
if ans:
self.textarea.settext(text='')
def onload(self):
filename = askopenfilename()
if filename:
f = open(filename, 'r')
alltext = f.read()
self.textarea.settext(alltext)
f.close()
def onsave(self):
filename = asksaveasfilename()
if filename:
alltext = self.textarea.gettext()
f = open(filename, 'w')
f.write(alltext)
f.close()
def onfind(self):
target = askstring('Search String', '')
if target:
self.textarea.findtext(target)
def storegeometry(self):
pg = re.split(r"x|\+", self.geometry())
self.saved_geometry = tuple(pg)
def restoregeometry(self):
if self.saved_geometry:
self.geometry("{0[0]}x{0[1]}+{0[2]}+{0[3]}".format(self.saved_geometry))
def setfullscreenview(self):
# Store the previous geometry
self.storegeometry()
if os_sniffer.is_mac:
# Assuming this is TK 8.5 or higher
self.wm_attributes('-fullscreen', 1)
# Default for all systems
self.overrideredirect(1)
self.geometry("{0}x{1}+0+0".format(self.winfo_screenwidth(), self.winfo_screenheight()))
def setwindowedview(self):
if os_sniffer.is_mac:
# Assuming this is TK 8.5 or higher
self.wm_attributes('-fullscreen', 0)
# Default for all systems
self.overrideredirect(0)
self.restoregeometry()
def toggleminimal(self):
if self.is_fullscreen == False:
self.is_fullscreen = True
self.setfullscreenview()
self.textarea.toggleminimal()
else:
self.is_fullscreen = False
self.setwindowedview()
self.textarea.toggleminimal()
if __name__ == '__main__':
app = MinimalEdit()
# Our job to call the mainloop when we're ready.
app.mainloop()
| {
"repo_name": "jeremyosborne/python",
"path": "general/minimaledit/minimaledit.py",
"copies": "1",
"size": "6007",
"license": "mit",
"hash": 4360021962408174000,
"line_mean": 33.9700598802,
"line_max": 102,
"alpha_frac": 0.562676877,
"autogenerated": false,
"ratio": 4.148480662983426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007416703763568958,
"num_lines": 167
} |
"""A minimalistic FTP util. A shell client for Python ftplib
getwelcome - Return the welcome message sent by the server in reply to the initial connection. (This message sometimes
contains disclaimers or help information that may be relevant to the user.)
connect [host=''] [port=0] [timeout=None] - Connect to the given host and port. The default port number is 21, as
specified by the FTP protocol specification. It is rarely needed to specify a different port number. This function
should be called only once for each instance; it should not be called at all if a host was given when the instance was
created. All other methods can only be used after a connection has been made. The optional timeout parameter specifies
a timeout in seconds for the connection attempt. If no timeout is passed, the global default timeout setting will be
used.
login [user='anonymous'] [passwd=''] [acct=''] - Log in as the given user. The passwd and acct parameters are optional
and default to the empty string. If no user is specified, it defaults to 'anonymous'. If user is 'anonymous', the
default passwd is 'anonymous@'. This function should be called only once for each instance, after a connection has been
established; it should not be called at all if a host and user were given when the instance was created. Most FTP
commands are only allowed after the client has logged in. The acct parameter supplies “accounting information”;
few systems implement this.
set_debuglevel [level] - Set the instance’s debugging level. This controls the amount of debugging output printed. The
default, 0, produces no debugging output. A value of 1 produces a moderate amount of debugging output, generally a
single line per request. A value of 2 or higher produces the maximum amount of debugging output, logging each line sent
and received on the control connection.
nlst [directory] - Return a list of file names as returned by the NLST command. The optional argument is a directory to
list (default is the current server directory). Multiple arguments can be used to pass non-standard options to the NLST
command.
dir [directory] - Produce a directory listing as returned by the LIST command, printing it to standard output. The
optional argument is a directory to list (default is the current server directory).
cwd [pathname] - Set the current directory on the server.
mkd [pathname] - Create a new directory on the server.
pwd - Return the pathname of the current directory on the server.
rmd [dirname] - Remove the directory named dirname on the server.
size [filename] - Request the size of the file named filename on the server. On success, the size of the file is
returned as an integer, otherwise None is returned. Note that the SIZE command is not standardized, but is supported by
many common server implementations.
rename [fromname] [toname] - Rename file fromname on the server to toname.
delete [filename] - Remove the file named filename from the server. If successful, returns the text of the response.
retrieve [filename] [destination] - Retrieve a file in binary transfer mode and save it to destination folder.
store [filename] [origin] - Store a file located in origin using binary transfer mode.
quit - Send a QUIT command to the server and close the connection.
exit - Send a QUIT command to the server and close the connection. Same as quit.
close - Send a QUIT command to the server and close the connection. Same as quit.
help - display help
"""
import os
import sys
from ftplib import FTP, error_perm, all_errors
from getpass import getpass
from ftp.ftptracker import FTPTracker
try:
import gnureadline as readline
except ImportError:
readline = None
try:
import readline
except ImportError:
pass
print("FTP util\n")
host = input("Enter FTP hostname: ").replace('http://', '').replace('ftp://', '')
user = input("Enter username: ")
password = getpass("Enter password: ")
try:
ftp = FTP(host)
except all_errors as error:
print(error)
input()
sys.exit()
else:
print(ftp.getwelcome())
try:
print(ftp.login(user, password))
except error_perm as error:
print(error)
input()
sys.exit()
if readline:
readline.set_startup_hook() # Enables input history
while True:
try:
user_input = input(">> ").split(" ")
command = user_input[0]
arguments = user_input[1:]
if command in ('exit', 'quit', 'close'):
print(ftp.quit())
sys.exit()
elif command == 'help':
print(__doc__)
elif command == 'retrieve':
tracker = FTPTracker(ftp.size(arguments[0]))
if len(arguments) < 2:
arguments.append(arguments[0])
with open(arguments[1], 'wb') as file:
print(ftp.retrbinary('RETR {}'.format(arguments[0]),
lambda block: (file.write(block), tracker.handle(block))))
elif command == 'store':
tracker = FTPTracker(os.path.getsize(arguments[0]))
if len(arguments) < 2:
arguments.append(arguments[0])
with open(arguments[1], 'rb') as file:
print(ftp.storbinary('STOR {}'.format(arguments[0]), file, callback=tracker.handle))
elif not command:
continue
else:
func = getattr(ftp, command)
arguments = list(map(lambda argument: int(argument) if argument.isdigit() else argument, arguments))
result = func(*arguments)
if result:
print(result)
except all_errors as error:
print(error)
except (TypeError, IndexError):
print('Invalid amount of arguments')
except AttributeError:
print('Unknown command: "{}"'.format(command))
| {
"repo_name": "litvinchuck/python-scripts",
"path": "ftp.py",
"copies": "2",
"size": "5795",
"license": "mit",
"hash": 7338775312013232000,
"line_mean": 40.6474820144,
"line_max": 119,
"alpha_frac": 0.7004664018,
"autogenerated": false,
"ratio": 4.253490080822925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002098547617542011,
"num_lines": 139
} |
""" A minimal jsonschema validator.
Supports only a tiny subset of jsonschema.
"""
from croniter import croniter
from pytz import all_timezones
class ValidationError(Exception):
pass
def validate(obj, schema, obj_name="value"):
if schema.get("type") == "string":
if not isinstance(obj, str):
raise ValidationError("%s is not a string" % obj_name)
if "minLength" in schema and len(obj) < schema["minLength"]:
raise ValidationError("%s is too short" % obj_name)
if "maxLength" in schema and len(obj) > schema["maxLength"]:
raise ValidationError("%s is too long" % obj_name)
if schema.get("format") == "cron":
try:
# Does it have 5 components?
if len(obj.split()) != 5:
raise ValueError()
# Does croniter accept the schedule?
it = croniter(obj)
# Can it calculate the next datetime?
it.next()
except:
raise ValidationError("%s is not a valid cron expression" % obj_name)
if schema.get("format") == "timezone" and obj not in all_timezones:
raise ValidationError("%s is not a valid timezone" % obj_name)
elif schema.get("type") == "number":
if not isinstance(obj, int):
raise ValidationError("%s is not a number" % obj_name)
if "minimum" in schema and obj < schema["minimum"]:
raise ValidationError("%s is too small" % obj_name)
if "maximum" in schema and obj > schema["maximum"]:
raise ValidationError("%s is too large" % obj_name)
elif schema.get("type") == "boolean":
if not isinstance(obj, bool):
raise ValidationError("%s is not a boolean" % obj_name)
elif schema.get("type") == "array":
if not isinstance(obj, list):
raise ValidationError("%s is not an array" % obj_name)
for v in obj:
validate(v, schema["items"], "an item in '%s'" % obj_name)
elif schema.get("type") == "object":
if not isinstance(obj, dict):
raise ValidationError("%s is not an object" % obj_name)
properties = schema.get("properties", {})
for key, spec in properties.items():
if key in obj:
validate(obj[key], spec, obj_name=key)
for key in schema.get("required", []):
if key not in obj:
raise ValidationError("key %s absent in %s" % (key, obj_name))
if "enum" in schema:
if obj not in schema["enum"]:
raise ValidationError("%s has unexpected value" % obj_name)
| {
"repo_name": "healthchecks/healthchecks",
"path": "hc/lib/jsonschema.py",
"copies": "2",
"size": "2662",
"license": "bsd-3-clause",
"hash": 2121037052741972000,
"line_mean": 35.9722222222,
"line_max": 85,
"alpha_frac": 0.5694966191,
"autogenerated": false,
"ratio": 4.245614035087719,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5815110654187718,
"avg_score": null,
"num_lines": null
} |
# a (minimal) python script to make a (24BPP) PNG out of RGB data
# only with standard dependencies
# Ange Albertini BSD Licence 2015
import struct, sys, zlib, binascii
DEPTH8 = 8
MODE_TRUECOLOR = 2
COMPRESSION_DEFLATE = 0
NO_FILTER = 0
NO_INTERLACING = 0
rawfilename, width, height = sys.argv[1:4]
width, height = int(width), int(height)
with open(rawfilename, "rb") as source:
rawdata = source.read()
image_data = []
for i in range(0, len(rawdata), width * 3):
# each line starts with an extra filter byte, that we don't use here
image_data.append("\0")
image_data.append(rawdata[i:i+width * 3])
#chunks [type (4 letters), chunk data]
chunks = [
["IHDR", struct.pack(">IIBBBBB",
width, height,
DEPTH8, MODE_TRUECOLOR, COMPRESSION_DEFLATE, NO_FILTER, NO_INTERLACING
)],
#the Image Data chunk is just Zlib-ed filter+pixels lines
["IDAT", zlib.compress("".join(image_data), 9)],
["IEND", ""]
]
with open("%s.png" % rawfilename, "wb") as target:
# the magic sig
target.write("\x89PNG\x0d\x0a\x1a\x0a")
# a sequence of chunk
for type, data in chunks:
target.write("".join([
# Length, Type, Data, CRC32
struct.pack(">I", len(data)),
type,
data,
struct.pack(">I", binascii.crc32(type + data) & 0xffffffff)
]))
| {
"repo_name": "angea/corkami",
"path": "misc/python/rgb2png.py",
"copies": "1",
"size": "1367",
"license": "bsd-2-clause",
"hash": -5463465958753986000,
"line_mean": 26.34,
"line_max": 78,
"alpha_frac": 0.6181419166,
"autogenerated": false,
"ratio": 3.1790697674418604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42972116840418606,
"avg_score": null,
"num_lines": null
} |
"""A minimal SExpression parser for terms, goals, and theorems from HOL Light.
Assumes SExpressions of the form '(word1 word1 (word1) () (() ()))'.
That is, spaces and parantheses are treated as separators, bare words are
accepted as SExpressions, and nodes can have 0 children. The expression above
has 5 children: 'word1', 'word1', '(word1)', '()', and '(() ())'. The order of
children is respected.
"""
from __future__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
from typing import List, Optional, Text
class SExpParseError(Exception):
pass
def is_start_of_word(sexp: Text, pos: int) -> bool:
if pos < 0 or pos > len(sexp):
raise SExpParseError('Position %d out of bounds of string %s.' %
(pos, sexp))
return pos == 0 or sexp[pos - 1] in [' ', '('] and sexp[pos] not in [' ', ')']
def is_end_of_word(sexp: Text, pos: int) -> int:
if pos < 0 or pos > len(sexp):
raise SExpParseError('Position %d out of bounds of string %s.' %
(pos, sexp))
return pos == len(sexp) or sexp[pos] in [' ', ')']
def end_of_word(sexp: Text, start: int) -> Optional[int]:
"""Returns the end of the bare word starting at start."""
if not is_start_of_word(sexp, start):
raise SExpParseError('end_of_word called in the middle of a word pos %d.' %
start)
if is_end_of_word(sexp, start):
raise SExpParseError('Beginning and end of word coincide at pos %d.' %
start)
for pos in range(start, len(sexp) + 1):
if is_end_of_word(sexp, pos):
return pos
return None
def end_of_child(sexp: Text, start: int) -> int:
"""Returns the index of the end of the word + 1."""
if not is_start_of_word(sexp, start):
raise SExpParseError(
'end_of_child must be called at begginning of a word (pos %d)' % start)
if sexp[start] == '(':
parenthesis_counter = 0
for idx, c in enumerate(sexp[start:]):
if c == '(':
parenthesis_counter += 1
elif c == ')':
parenthesis_counter -= 1
if parenthesis_counter == 0:
return start + idx + 1
else:
return end_of_word(sexp, start) # pytype: disable=bad-return-type
def validate_parens(sexp: Text):
"""Counts the opening and closing parantheses."""
if sexp[0] != '(' or sexp[-1] != ')':
raise SExpParseError(
'SExpressions must start and end with parantheses: %s' % sexp)
parenthesis_counter = 0
for idx, c in enumerate(sexp):
if c == '(':
parenthesis_counter += 1
elif c == ')':
parenthesis_counter -= 1
if parenthesis_counter <= 0 and idx != len(sexp) - 1:
raise SExpParseError(
'Closing parenthesis before end of expression at pos %d' % idx)
if parenthesis_counter > 0:
raise SExpParseError(
'Expression not closed; not enough closing parantheses: %s' % sexp)
def is_bare_word(sexp: Text):
"""Base case of SExpressions."""
for c in sexp:
if c in [' ', '(', ')']:
return False
return True
def children(sexp: Text) -> List[Text]:
"""Returns the children of an SExpression."""
if is_bare_word(sexp):
return []
validate_parens(sexp)
pos = 1
result = []
while pos < len(sexp) - 1:
while not is_start_of_word(sexp, pos):
pos += 1
end = end_of_child(sexp, pos)
result.append(sexp[pos:end])
pos = end
return result
| {
"repo_name": "tensorflow/deepmath",
"path": "deepmath/deephol/utilities/sexpression_parser.py",
"copies": "1",
"size": "3443",
"license": "apache-2.0",
"hash": 2436482763530975000,
"line_mean": 30.8796296296,
"line_max": 80,
"alpha_frac": 0.6148707523,
"autogenerated": false,
"ratio": 3.3623046875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44771754398,
"avg_score": null,
"num_lines": null
} |
"""A minimal subset of the locale module used at interpreter startup
(imported by the _io module), in order to reduce startup time.
Don't import directly from third-party code; use the `locale` module instead!
"""
import sys
import _locale
if sys.platform.startswith("win"):
def getpreferredencoding(do_setlocale=True):
if sys.flags.utf8_mode:
return 'UTF-8'
return _locale._getdefaultlocale()[1]
else:
try:
_locale.CODESET
except AttributeError:
if hasattr(sys, 'getandroidapilevel'):
# On Android langinfo.h and CODESET are missing, and UTF-8 is
# always used in mbstowcs() and wcstombs().
def getpreferredencoding(do_setlocale=True):
return 'UTF-8'
else:
def getpreferredencoding(do_setlocale=True):
if sys.flags.utf8_mode:
return 'UTF-8'
# This path for legacy systems needs the more complex
# getdefaultlocale() function, import the full locale module.
import locale
return locale.getpreferredencoding(do_setlocale)
else:
def getpreferredencoding(do_setlocale=True):
assert not do_setlocale
if sys.flags.utf8_mode:
return 'UTF-8'
result = _locale.nl_langinfo(_locale.CODESET)
if not result and sys.platform == 'darwin':
# nl_langinfo can return an empty string
# when the setting has an invalid value.
# Default to UTF-8 in that case because
# UTF-8 is the default charset on OSX and
# returning nothing will crash the
# interpreter.
result = 'UTF-8'
return result
| {
"repo_name": "huguesv/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/_bootlocale.py",
"copies": "40",
"size": "1801",
"license": "apache-2.0",
"hash": 6849969450716602000,
"line_mean": 38.152173913,
"line_max": 77,
"alpha_frac": 0.588006663,
"autogenerated": false,
"ratio": 4.3189448441247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A minimal text editor.
#
# To be done:
# - Update viewrect after resize
# - Handle horizontal scrollbar correctly
# - Functionality: find, etc.
from Carbon.Menu import DrawMenuBar
from FrameWork import *
from Carbon import Win
from Carbon import Qd
from Carbon import TE
from Carbon import Scrap
import os
import macfs
class TEWindow(ScrolledWindow):
def open(self, path, name, data):
self.path = path
self.name = name
r = windowbounds(400, 400)
w = Win.NewWindow(r, name, 1, 0, -1, 1, 0)
self.wid = w
x0, y0, x1, y1 = self.wid.GetWindowPort().GetPortBounds()
x0 = x0 + 4
y0 = y0 + 4
x1 = x1 - 20
y1 = y1 - 20
vr = dr = x0, y0, x1, y1
##vr = 4, 0, r[2]-r[0]-15, r[3]-r[1]-15
##dr = (0, 0, vr[2], 0)
Qd.SetPort(w)
Qd.TextFont(4)
Qd.TextSize(9)
self.ted = TE.TENew(dr, vr)
self.ted.TEAutoView(1)
self.ted.TESetText(data)
w.DrawGrowIcon()
self.scrollbars()
self.changed = 0
self.do_postopen()
self.do_activate(1, None)
def do_idle(self):
self.ted.TEIdle()
def getscrollbarvalues(self):
dr = self.ted.destRect
vr = self.ted.viewRect
height = self.ted.nLines * self.ted.lineHeight
vx = self.scalebarvalue(dr[0], dr[2]-dr[0], vr[0], vr[2])
vy = self.scalebarvalue(dr[1], dr[1]+height, vr[1], vr[3])
print dr, vr, height, vx, vy
return None, vy
def scrollbar_callback(self, which, what, value):
if which == 'y':
if what == 'set':
height = self.ted.nLines * self.ted.lineHeight
cur = self.getscrollbarvalues()[1]
delta = (cur-value)*height/32767
if what == '-':
delta = self.ted.lineHeight
elif what == '--':
delta = (self.ted.viewRect[3]-self.ted.lineHeight)
if delta <= 0:
delta = self.ted.lineHeight
elif what == '+':
delta = -self.ted.lineHeight
elif what == '++':
delta = -(self.ted.viewRect[3]-self.ted.lineHeight)
if delta >= 0:
delta = -self.ted.lineHeight
self.ted.TEPinScroll(0, delta)
print 'SCROLL Y', delta
else:
pass # No horizontal scrolling
def do_activate(self, onoff, evt):
print "ACTIVATE", onoff
ScrolledWindow.do_activate(self, onoff, evt)
if onoff:
self.ted.TEActivate()
self.parent.active = self
self.parent.updatemenubar()
else:
self.ted.TEDeactivate()
def do_update(self, wid, event):
Qd.EraseRect(wid.GetWindowPort().GetPortBounds())
self.ted.TEUpdate(wid.GetWindowPort().GetPortBounds())
self.updatescrollbars()
def do_contentclick(self, local, modifiers, evt):
shifted = (modifiers & 0x200)
self.ted.TEClick(local, shifted)
self.updatescrollbars()
self.parent.updatemenubar()
def do_char(self, ch, event):
self.ted.TESelView()
self.ted.TEKey(ord(ch))
self.changed = 1
self.updatescrollbars()
self.parent.updatemenubar()
def close(self):
if self.changed:
save = EasyDialogs.AskYesNoCancel('Save window "%s" before closing?'%self.name, 1)
if save > 0:
self.menu_save()
elif save < 0:
return
if self.parent.active == self:
self.parent.active = None
self.parent.updatemenubar()
del self.ted
self.do_postclose()
def menu_save(self):
if not self.path:
self.menu_save_as()
return # Will call us recursively
print 'Saving to ', self.path
dhandle = self.ted.TEGetText()
data = dhandle.data
fp = open(self.path, 'wb') # NOTE: wb, because data has CR for end-of-line
fp.write(data)
if data[-1] <> '\r': fp.write('\r')
fp.close()
self.changed = 0
def menu_save_as(self):
path = EasyDialogs.AskFileForSave(message='Save as:')
if not path: return
self.path = path
self.name = os.path.split(self.path)[-1]
self.wid.SetWTitle(self.name)
self.menu_save()
def menu_cut(self):
self.ted.TESelView()
self.ted.TECut()
if hasattr(Scrap, 'ZeroScrap'):
Scrap.ZeroScrap()
else:
Scrap.ClearCurrentScrap()
TE.TEToScrap()
self.updatescrollbars()
self.parent.updatemenubar()
self.changed = 1
def menu_copy(self):
self.ted.TECopy()
if hasattr(Scrap, 'ZeroScrap'):
Scrap.ZeroScrap()
else:
Scrap.ClearCurrentScrap()
TE.TEToScrap()
self.updatescrollbars()
self.parent.updatemenubar()
def menu_paste(self):
TE.TEFromScrap()
self.ted.TESelView()
self.ted.TEPaste()
self.updatescrollbars()
self.parent.updatemenubar()
self.changed = 1
def menu_clear(self):
self.ted.TESelView()
self.ted.TEDelete()
self.updatescrollbars()
self.parent.updatemenubar()
self.changed = 1
def have_selection(self):
return (self.ted.selStart < self.ted.selEnd)
class Ped(Application):
def __init__(self):
Application.__init__(self)
self.num = 0
self.active = None
self.updatemenubar()
def makeusermenus(self):
self.filemenu = m = Menu(self.menubar, "File")
self.newitem = MenuItem(m, "New window", "N", self.open)
self.openitem = MenuItem(m, "Open...", "O", self.openfile)
self.closeitem = MenuItem(m, "Close", "W", self.closewin)
m.addseparator()
self.saveitem = MenuItem(m, "Save", "S", self.save)
self.saveasitem = MenuItem(m, "Save as...", "", self.saveas)
m.addseparator()
self.quititem = MenuItem(m, "Quit", "Q", self.quit)
self.editmenu = m = Menu(self.menubar, "Edit")
self.undoitem = MenuItem(m, "Undo", "Z", self.undo)
self.cutitem = MenuItem(m, "Cut", "X", self.cut)
self.copyitem = MenuItem(m, "Copy", "C", self.copy)
self.pasteitem = MenuItem(m, "Paste", "V", self.paste)
self.clearitem = MenuItem(m, "Clear", "", self.clear)
# Not yet implemented:
self.undoitem.enable(0)
# Groups of items enabled together:
self.windowgroup = [self.closeitem, self.saveitem, self.saveasitem, self.editmenu]
self.focusgroup = [self.cutitem, self.copyitem, self.clearitem]
self.windowgroup_on = -1
self.focusgroup_on = -1
self.pastegroup_on = -1
def updatemenubar(self):
changed = 0
on = (self.active <> None)
if on <> self.windowgroup_on:
for m in self.windowgroup:
m.enable(on)
self.windowgroup_on = on
changed = 1
if on:
# only if we have an edit menu
on = self.active.have_selection()
if on <> self.focusgroup_on:
for m in self.focusgroup:
m.enable(on)
self.focusgroup_on = on
changed = 1
if hasattr(Scrap, 'InfoScrap'):
on = (Scrap.InfoScrap()[0] <> 0)
else:
flavors = Scrap.GetCurrentScrap().GetScrapFlavorInfoList()
for tp, info in flavors:
if tp == 'TEXT':
on = 1
break
else:
on = 0
if on <> self.pastegroup_on:
self.pasteitem.enable(on)
self.pastegroup_on = on
changed = 1
if changed:
DrawMenuBar()
#
# Apple menu
#
def do_about(self, id, item, window, event):
EasyDialogs.Message("A simple single-font text editor")
#
# File menu
#
def open(self, *args):
self._open(0)
def openfile(self, *args):
self._open(1)
def _open(self, askfile):
if askfile:
path = EasyDialogs.AskFileForOpen(typeList=('TEXT',))
if not path:
return
name = os.path.split(path)[-1]
try:
fp = open(path, 'rb') # NOTE binary, we need cr as end-of-line
data = fp.read()
fp.close()
except IOError, arg:
EasyDialogs.Message("IOERROR: %r" % (arg,))
return
else:
path = None
name = "Untitled %d"%self.num
data = ''
w = TEWindow(self)
w.open(path, name, data)
self.num = self.num + 1
def closewin(self, *args):
if self.active:
self.active.close()
else:
EasyDialogs.Message("No active window?")
def save(self, *args):
if self.active:
self.active.menu_save()
else:
EasyDialogs.Message("No active window?")
def saveas(self, *args):
if self.active:
self.active.menu_save_as()
else:
EasyDialogs.Message("No active window?")
def quit(self, *args):
for w in self._windows.values():
w.close()
if self._windows:
return
self._quit()
#
# Edit menu
#
def undo(self, *args):
pass
def cut(self, *args):
if self.active:
self.active.menu_cut()
else:
EasyDialogs.Message("No active window?")
def copy(self, *args):
if self.active:
self.active.menu_copy()
else:
EasyDialogs.Message("No active window?")
def paste(self, *args):
if self.active:
self.active.menu_paste()
else:
EasyDialogs.Message("No active window?")
def clear(self, *args):
if self.active:
self.active.menu_clear()
else:
EasyDialogs.Message("No active window?")
#
# Other stuff
#
def idle(self, *args):
if self.active:
self.active.do_idle()
else:
Qd.SetCursor(Qd.GetQDGlobalsArrow())
def main():
App = Ped()
App.mainloop()
if __name__ == '__main__':
main()
| {
"repo_name": "TathagataChakraborti/resource-conflicts",
"path": "PLANROB-2015/seq-sat-lama/Python-2.5.2/Mac/Demo/textedit/ped.py",
"copies": "39",
"size": "10512",
"license": "mit",
"hash": -319992567368376400,
"line_mean": 28.2813370474,
"line_max": 94,
"alpha_frac": 0.5244482496,
"autogenerated": false,
"ratio": 3.652536483669215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A minimal text editor using MLTE. Based on wed.py.
#
# To be done:
# - Functionality: find, etc.
from Menu import DrawMenuBar
from FrameWork import *
from Carbon import Win
from Carbon import Ctl
from Carbon import Qd
from Carbon import Res
from Carbon import Scrap
import os
from Carbon import MacTextEditor
from Carbon import Mlte
UNDOLABELS = [ # Indexed by MLTECanUndo() value
"Typing", "Cut", "Paste", "Clear", "Font Change", "Color Change", "Size Change",
"Style Change", "Align Left", "Align Center", "Align Right", "Drop", "Move"]
class MlteWindow(Window):
def open(self, path, name, data):
self.path = path
self.name = name
r = windowbounds(400, 400)
w = Win.NewWindow(r, name, 1, 0, -1, 1, 0)
self.wid = w
flags = MacTextEditor.kTXNDrawGrowIconMask|MacTextEditor.kTXNWantHScrollBarMask| \
MacTextEditor.kTXNWantVScrollBarMask
self.ted, self.frameid = Mlte.TXNNewObject(None, w, None, flags, MacTextEditor.kTXNTextEditStyleFrameType,
MacTextEditor.kTXNTextFile, MacTextEditor.kTXNMacOSEncoding)
self.ted.TXNSetData(MacTextEditor.kTXNTextData, data, 0, 0x7fffffff)
self.changed = 0
self.do_postopen()
self.do_activate(1, None)
def do_idle(self, event):
self.ted.TXNIdle()
self.ted.TXNAdjustCursor(None)
def do_activate(self, onoff, evt):
if onoff:
## self.ted.TXNActivate(self.frameid, 0)
self.ted.TXNFocus(1)
self.parent.active = self
else:
self.ted.TXNFocus(0)
self.parent.active = None
self.parent.updatemenubar()
def do_update(self, wid, event):
self.ted.TXNDraw(None)
def do_postresize(self, width, height, window):
self.ted.TXNResizeFrame(width, height, self.frameid)
def do_contentclick(self, local, modifiers, evt):
self.ted.TXNClick(evt)
self.parent.updatemenubar()
def do_char(self, ch, event):
self.ted.TXNKeyDown(event)
self.parent.updatemenubar()
def close(self):
if self.changed:
save = EasyDialogs.AskYesNoCancel('Save window "%s" before closing?'%self.name, 1)
if save > 0:
self.menu_save()
elif save < 0:
return
if self.parent.active == self:
self.parent.active = None
self.ted.TXNDeleteObject()
del self.ted
## del self.tedtexthandle
self.do_postclose()
def menu_save(self):
if not self.path:
self.menu_save_as()
return # Will call us recursively
dhandle = self.ted.TXNGetData(0, 0x7fffffff)
data = dhandle.data
fp = open(self.path, 'wb') # NOTE: wb, because data has CR for end-of-line
fp.write(data)
if data[-1] <> '\r': fp.write('\r')
fp.close()
self.changed = 0
def menu_save_as(self):
path = EasyDialogs.AskFileForSave(message='Save as:')
if not path: return
self.path = path
self.name = os.path.split(self.path)[-1]
self.wid.SetWTitle(self.name)
self.menu_save()
def menu_cut(self):
## self.ted.WESelView()
self.ted.TXNCut()
### Mlte.ConvertToPublicScrap()
## Scrap.ZeroScrap()
## self.ted.WECut()
## self.updatescrollbars()
self.parent.updatemenubar()
self.changed = 1
def menu_copy(self):
## Scrap.ZeroScrap()
self.ted.TXNCopy()
### Mlte.ConvertToPublicScrap()
## self.updatescrollbars()
self.parent.updatemenubar()
def menu_paste(self):
### Mlte.ConvertFromPublicScrap()
self.ted.TXNPaste()
## self.updatescrollbars()
self.parent.updatemenubar()
self.changed = 1
def menu_clear(self):
## self.ted.WESelView()
self.ted.TXNClear()
## self.updatescrollbars()
self.parent.updatemenubar()
self.changed = 1
def menu_undo(self):
self.ted.TXNUndo()
## self.updatescrollbars()
self.parent.updatemenubar()
def menu_redo(self):
self.ted.TXNRedo()
## self.updatescrollbars()
self.parent.updatemenubar()
def have_selection(self):
start, stop = self.ted.TXNGetSelection()
return start < stop
def can_paste(self):
return Mlte.TXNIsScrapPastable()
def can_undo(self):
can, which = self.ted.TXNCanUndo()
if not can:
return None
if which >= len(UNDOLABELS):
# Unspecified undo
return "Undo"
which = UNDOLABELS[which]
return "Undo "+which
def can_redo(self):
can, which = self.ted.TXNCanRedo()
if not can:
return None
if which >= len(UNDOLABELS):
# Unspecified undo
return "Redo"
which = UNDOLABELS[which]
return "Redo "+which
class Mlted(Application):
def __init__(self):
Application.__init__(self)
self.num = 0
self.active = None
self.updatemenubar()
def makeusermenus(self):
self.filemenu = m = Menu(self.menubar, "File")
self.newitem = MenuItem(m, "New window", "N", self.open)
self.openitem = MenuItem(m, "Open...", "O", self.openfile)
self.closeitem = MenuItem(m, "Close", "W", self.closewin)
m.addseparator()
self.saveitem = MenuItem(m, "Save", "S", self.save)
self.saveasitem = MenuItem(m, "Save as...", "", self.saveas)
m.addseparator()
self.quititem = MenuItem(m, "Quit", "Q", self.quit)
self.editmenu = m = Menu(self.menubar, "Edit")
self.undoitem = MenuItem(m, "Undo", "Z", self.undo)
self.redoitem = MenuItem(m, "Redo", None, self.redo)
m.addseparator()
self.cutitem = MenuItem(m, "Cut", "X", self.cut)
self.copyitem = MenuItem(m, "Copy", "C", self.copy)
self.pasteitem = MenuItem(m, "Paste", "V", self.paste)
self.clearitem = MenuItem(m, "Clear", "", self.clear)
# Groups of items enabled together:
self.windowgroup = [self.closeitem, self.saveitem, self.saveasitem, self.editmenu]
self.focusgroup = [self.cutitem, self.copyitem, self.clearitem]
self.windowgroup_on = -1
self.focusgroup_on = -1
self.pastegroup_on = -1
self.undo_label = "never"
self.redo_label = "never"
def updatemenubar(self):
changed = 0
on = (self.active <> None)
if on <> self.windowgroup_on:
for m in self.windowgroup:
m.enable(on)
self.windowgroup_on = on
changed = 1
if on:
# only if we have an edit menu
on = self.active.have_selection()
if on <> self.focusgroup_on:
for m in self.focusgroup:
m.enable(on)
self.focusgroup_on = on
changed = 1
on = self.active.can_paste()
if on <> self.pastegroup_on:
self.pasteitem.enable(on)
self.pastegroup_on = on
changed = 1
on = self.active.can_undo()
if on <> self.undo_label:
if on:
self.undoitem.enable(1)
self.undoitem.settext(on)
self.undo_label = on
else:
self.undoitem.settext("Nothing to undo")
self.undoitem.enable(0)
changed = 1
on = self.active.can_redo()
if on <> self.redo_label:
if on:
self.redoitem.enable(1)
self.redoitem.settext(on)
self.redo_label = on
else:
self.redoitem.settext("Nothing to redo")
self.redoitem.enable(0)
changed = 1
if changed:
DrawMenuBar()
#
# Apple menu
#
def do_about(self, id, item, window, event):
EasyDialogs.Message("A simple single-font text editor based on MacTextEditor")
#
# File menu
#
def open(self, *args):
self._open(0)
def openfile(self, *args):
self._open(1)
def _open(self, askfile):
if askfile:
path = EasyDialogs.AskFileForOpen(typeList=('TEXT',))
if not path:
return
name = os.path.split(path)[-1]
try:
fp = open(path, 'rb') # NOTE binary, we need cr as end-of-line
data = fp.read()
fp.close()
except IOError, arg:
EasyDialogs.Message("IOERROR: %r" % (arg,))
return
else:
path = None
name = "Untitled %d"%self.num
data = ''
w = MlteWindow(self)
w.open(path, name, data)
self.num = self.num + 1
def closewin(self, *args):
if self.active:
self.active.close()
else:
EasyDialogs.Message("No active window?")
def save(self, *args):
if self.active:
self.active.menu_save()
else:
EasyDialogs.Message("No active window?")
def saveas(self, *args):
if self.active:
self.active.menu_save_as()
else:
EasyDialogs.Message("No active window?")
def quit(self, *args):
for w in self._windows.values():
w.close()
if self._windows:
return
self._quit()
#
# Edit menu
#
def undo(self, *args):
if self.active:
self.active.menu_undo()
else:
EasyDialogs.Message("No active window?")
def redo(self, *args):
if self.active:
self.active.menu_redo()
else:
EasyDialogs.Message("No active window?")
def cut(self, *args):
if self.active:
self.active.menu_cut()
else:
EasyDialogs.Message("No active window?")
def copy(self, *args):
if self.active:
self.active.menu_copy()
else:
EasyDialogs.Message("No active window?")
def paste(self, *args):
if self.active:
self.active.menu_paste()
else:
EasyDialogs.Message("No active window?")
def clear(self, *args):
if self.active:
self.active.menu_clear()
else:
EasyDialogs.Message("No active window?")
#
# Other stuff
#
def idle(self, event):
if self.active:
self.active.do_idle(event)
else:
Qd.SetCursor(Qd.GetQDGlobalsArrow())
def main():
Mlte.TXNInitTextension(0)
try:
App = Mlted()
App.mainloop()
finally:
Mlte.TXNTerminateTextension()
if __name__ == '__main__':
main()
| {
"repo_name": "kjc88/sl4a",
"path": "python/src/Mac/Demo/mlte/mlted.py",
"copies": "39",
"size": "11096",
"license": "apache-2.0",
"hash": 7996692490143777000,
"line_mean": 28.6684491979,
"line_max": 114,
"alpha_frac": 0.5373107426,
"autogenerated": false,
"ratio": 3.6778256546237986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# a minimal tracking script - this will start all peer
# services and attach everything appropriately
# change parameters depending on your pan eyeY, pins and
# Arduino details
# all commented code is not necessary but allows custom
# options
port = "COM8"
eyeXPin = 3
eyeYPin = 6
headXPin = 10
headYPin = 9
eyes = Runtime.create("eyes", "Tracking")
head = Runtime.create("head", "Tracking")
# name to bind correctly
eyes.reserveAs("x", "eyeX")
eyes.reserveAs("y", "eyeY")
eyes.reserveAs("xpid", "eyeXPID")
eyes.reserveAs("ypid", "eyeYPID")
head.reserveAs("x", "rothead")
head.reserveAs("y", "neck")
head.reserveAs("xpid", "rotheadPID")
head.reserveAs("ypid", "neckPID")
# naming - binding of peer services is done with service names
# the Tracking service will use the following default names
# arduinoName = "arduino" - the arduino controller - used to control the servos
# xpidName = "xpid" - the Pid service to control X tracking
# ypidName = "ypid" - the Pid service to control Y tracking
# xName = "x" - the x servo (pan)
# yName = "y" - the y servo (eyeY)
# opencvName = "opencv" - the camera
# after the Tracking service is "created" you may create peer service
# and change values of that service - for example if we want to invert a
# servo :
eyeY = Runtime.create("eyeY", "Servo")
eyeY.setInverted(True)
# initialization
eyes.connect(port)
eyes.attachServos(eyeXPin, eyeYPin)
head.attachServos(headXPin, headYPin)
# set limits if necessary
# default is servo limits
eyes.setServoLimits(65, 90, 22, 85)
# set rest position default is 90 90
eyes.setRestPosition(80, 47)
#eyes.setPIDDefaults()
# changing Pid values
# setXPID(Kp, Ki, Kd, Direction 0=direct 1=reverse, Mode 0=manual 1= automatic, minOutput, maxOutput, sampleTime, setPoint);
# defaults look like this_AUTOMATIC
eyes.setXPID(10.0, 5, 1, 0, 1, -10, 10, 30, 0.5)
eyes.setYPID(10.0, 5, 1, 0, 1, -10, 10, 30, 0.5)
head.setXPID(5.0, 0, 0.1, 0, 1, -1, 1, 30, 0.5)
head.setYPID(5.0, 0, 0.1, 0, 1, -1, 1, 30, 0.5)
eyes.startService()
head.startService()
# set a point and track it
# there are two interfaces one is float value
# where 0.5,0.5 is middle of screen
# eyes.trackPoint(0.5, 0.5)
# don't be surprised if the point does not
# stay - it needs / wants a corner in the image
# to presist - otherwise it might disappear
# you can set points manually by clicking on the
# opencv screen
# face tracking from face detection filter
eyes.faceDetect()
head.faceDetect() | {
"repo_name": "MyRobotLab/pyrobotlab",
"path": "home/Alessandruino/InMoov.Alessandruino.py",
"copies": "1",
"size": "2463",
"license": "apache-2.0",
"hash": -1408929468548347600,
"line_mean": 29.8,
"line_max": 124,
"alpha_frac": 0.7149817296,
"autogenerated": false,
"ratio": 2.8606271777003482,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4075608907300348,
"avg_score": null,
"num_lines": null
} |
# a minimal tracking script - this will start all peer
# services and attach everything appropriately
# change parameters depending on your pan tilt, pins and
# Arduino details
# all commented code is not necessary but allows custom
# options
port = "COM12"
xServoPin = 13
yServoPin = 12
#select the pin where to start polling ( we can connect a PIR to this pin to see his state HIGH/LOW)
readDigitalPin = 8
tracker = Runtime.createAndStart("tracker", "Tracking")
# set specifics on each Servo
servoX = tracker.getX()
servoX.setPin(xServoPin)
servoX.setMinMax(30, 150)
servoY = tracker.getY()
servoY.setPin(yServoPin)
servoY.setMinMax(30, 150)
# optional filter settings
opencv = tracker.getOpenCV()
# setting camera index to 1 default is 0
opencv.setCameraIndex(1)
# connect to the Arduino
tracker.connect(port)
#set a Low sample rate, we don't want to bork serial connection !
arduino = tracker.getArduino()
arduino.setSampleRate(8000)
#start polling data from the digital pin
arduino.digitalReadPollingStart(readDigitalPin)
#add python as listener of the arduino service, each time arduino publish the value of the pin
arduino.addListener("publishPin", "python", "publishPin")
#define a function which is called every time arduino publish the value of the pin
def publishPin():
pin = msg_tracker_arduino_publishPin.data[0]
print pin.pin, pin.value,pin.type,pin.source
#if an HIGH state is read, PIR is detecting something so start face tracking
if (pin.value == 1):
if tracker.isIdle():
tracker.faceDetect()
#if a LOW state is read , stop tracking.. there is no human there !
elif (pin.value == 0):
if not tracker.isIdle():
tracker.stopTracking()
# Gray & PyramidDown make face tracking
# faster - if you dont like these filters - you
# may remove them before you select a tracking type with
# the following command
# tracker.clearPreFilters()
# diffrent types of tracking
# simple face detection and tracking
# tracker.faceDetect()
# lkpoint - click in video stream with
# mouse and it should track
# tracker.startLKTracking()
# scans for faces - tracks if found
# tracker.findFace() | {
"repo_name": "DarkRebel/myrobotlab",
"path": "src/resource/Python/examples/Tracking.PIRTriggered.borsaci06.py",
"copies": "4",
"size": "2143",
"license": "apache-2.0",
"hash": -6971620421257550000,
"line_mean": 28.3698630137,
"line_max": 100,
"alpha_frac": 0.7512832478,
"autogenerated": false,
"ratio": 3.4508856682769724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6202168916076972,
"avg_score": null,
"num_lines": null
} |
# A minimal version of the ItemServer that uses the BlockChain.info JSON api
# https://blockchain.info/api/json_rpc_api
from jsonrpc import ServiceProxy
class ItemClient():
def __init__(self, wallet, password, second_password):
self.second_password = second_password
self.btc = ServiceProxy(
'https://%s:%s@blockchain.info:443' % (wallet, password))
def GetNewAddress(self):
self.btc.walletpassphrase(self.second_password, 50)
return self.btc.getnewaddress()
def SignMessage(self, address, message):
self.btc.walletpassphrase(self.second_password, 50)
sig = self.btc.signmessage(address, message)
return sig
def GenerateProofSig(self, item, target):
""" Prove that this server owns the output of last_tx_id. """
tx = self.btc.gettransaction(item.last_tx_id)
last_address = None
for det in tx['details']:
if det['category'] == 'receive':
last_address = det['address']
if not last_address: return None
self.btc.walletpassphrase(self.second_password, 50)
proof_sig = self.btc.signmessage(last_address, target)
return proof_sig
| {
"repo_name": "thandal/passe-partout",
"path": "pp/pp_item_client_blockchain.py",
"copies": "1",
"size": "1119",
"license": "mit",
"hash": 2322956003138069500,
"line_mean": 33.96875,
"line_max": 76,
"alpha_frac": 0.6997319035,
"autogenerated": false,
"ratio": 3.443076923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9611053820946458,
"avg_score": 0.006351001126092925,
"num_lines": 32
} |
#A minimum example illustrating how to use a
#Gaussian Processes for binary classification
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.preprocessing import normalize
n = 1000
def stochProcess(x, a, b):
return np.exp(a*x) * np.cos(b*x)
def fx(processes, x):
samples = processes[x]
den = norm.pdf(samples)
idxSort = np.argsort(samples)
x = np.sort(samples)
y = den[idxSort]
return (x, y)
# STOCHASTIC PROCESS
a = np.random.uniform(low=0, high=1, size=n)
a = normalize(a.reshape(1, -1))[0]
a = a + (-a.min())
b = np.random.normal(size=n)
s = np.linspace(0, 2, num=100)
print(a)
## sampling
stoch = []
i = 0
for input in s:
output = [stochProcess(input, a[i], b[i]) for i in range(0, len(a))]
stoch.append(output)
## dist
x, y = fx(stoch, 50)
## plot
stochT = np.transpose(stoch)
stochDisplay = np.transpose([stochT[i] for i in range(0, 10)])
f, ax = plt.subplots(2, 2)
ax[0, 0].plot(s, stochDisplay)
ax[0, 1].plot(x, y)
#ax3.plot(stoch)
#ax4.plot(x, y)
plt.show()
| {
"repo_name": "KHP-Informatics/sleepsight-analytics",
"path": "scarp.py",
"copies": "1",
"size": "1058",
"license": "apache-2.0",
"hash": 947072383408660600,
"line_mean": 19.3461538462,
"line_max": 72,
"alpha_frac": 0.6559546314,
"autogenerated": false,
"ratio": 2.5617433414043584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.856566818454535,
"avg_score": 0.030405957651801645,
"num_lines": 52
} |
"""A minimum implementation of the actor model.
An actor is basically a daemon thread processing messages from a queue,
and a message is composed of a method and its arguments (you can think
of it as a single-threaded executor).
By default the queue size is infinite, but you may specify a finite
queue size, which is useful in implementing back pressure.
An actor's state is either alive or dead, and once it's dead, it will
never become alive again (but even if it is alive at this moment, it
does not guarantee that it will still be alive at the next moment).
Since actors are executed by daemon threads, when the main program
exits, all actor threads might not have chance to release resources,
which typically are calling __exit__ in context managers. So you should
pay special attention to resources that must be release even when the
main program is crashing (unlike ThreadPoolExecutor, which blocks the
main program until all submitted jobs are done).
An actor and the world communicate with each other through a queue and a
future object.
Queue: (world -> actor)
* The world sends messages to the actor through the queue, obviously.
* The world, or sometimes the actor itself, signals a kill of the
actor by closing the queue (but the actor won't die immediately).
Future object: (actor -> world)
* When the actor is about to die, it completes the future object.
And thus the world may know when an actor was died by observing the
future object.
* Note that, an actor could be dead without the world killing it (when
an message raises an uncaught exception, for example).
"""
__all__ = [
'BUILD',
'ActorError',
'Exit',
'Exited',
'Return',
'OneShotActor',
'Stub',
'StubPool',
'method',
'build',
'make_maker',
'inject',
]
import collections
import functools
import logging
import threading
import types
import weakref
from concurrent.futures import Future
from garage.assertions import ASSERT
from . import queues
from . import utils
LOG = logging.getLogger(__name__)
BUILD = object()
_MAGIC = object()
class ActorError(Exception):
"""A generic error of actors."""
class Exited(ActorError):
"""Raise when sending message to an exited actor."""
class Exit(Exception):
"""Raise this when an actor would like to self-terminate."""
class Return(Exception):
"""Request to append a message to the actor's own queue."""
def __init__(self, result, func, *args, **kwargs):
self.result = result
self.message_data = (func, args, kwargs)
class OneShotActor:
"""A special kind of actor for "one-shot" task.
It processes one type of message and one message only (thus the name
one-shot). By the for, for this simpler case, I would prefer not to
use metaclass.
"""
@classmethod
def from_func(cls, actor_func, *, name=None):
if isinstance(actor_func, str):
return functools.partial(cls.from_func, name=actor_func)
stub_maker = cls(actor_func)
names = utils.generate_names(name=name or actor_func.__name__)
def make(*args, **kwargs):
return build(
stub_maker,
name=next(names),
set_pthread_name=True,
args=args,
kwargs=kwargs,
)
# Create an alias to the exposed member
make.actor_func = stub_maker.actor_func
return make
class Stub:
def __init__(self, name, future):
self._name = name # Because Stub exposes this
self._future = future
def _kill(self, graceful=True):
self._future.cancel()
def _get_future(self):
return self._future
def _send_message(self, func, args, kwargs, block=True, timeout=None):
raise Exited('OneShotActor does not take additional message')
def __init__(self, actor_func):
self.actor_func = actor_func
def __call__(self, *args, **kwargs):
if args and args[0] is BUILD:
name = kwargs.get('name')
set_pthread_name = kwargs.get('set_pthread_name')
args = kwargs.get('args', ())
kwargs = kwargs.get('kwargs', {})
else:
name = None
set_pthread_name = False
future = Future()
thread = threading.Thread(
target=self._run_actor,
name=name,
args=(weakref.ref(future), args, kwargs),
daemon=True,
)
thread.start()
# thread.ident is None if it has not been started
if set_pthread_name:
utils.set_pthread_name(thread, name)
# Let interface be consistent with full-blown actors
return self.Stub(thread.name, future)
def _run_actor(self, future_ref, args, kwargs):
if not _deref(future_ref).set_running_or_notify_cancel():
return
LOG.debug('start')
try:
result = self.actor_func(*args, **kwargs)
except Exit:
_deref(future_ref).set_result(None)
except BaseException as exc:
_deref(future_ref).set_exception(exc)
else:
_deref(future_ref).set_result(result)
LOG.debug('exit')
class StubPool:
"""Manage a pool of same-type stubs.
This also fakes an appearance that as if it is a stub.
"""
def __init__(self, stubs):
self._stubs = collections.deque(stubs)
self._name = '%s(%s)' % (
self.__class__.__name__,
', '.join(stub._name for stub in self._stubs),
)
# This is pretty naive so far: the pool is "done" when all stubs
# are done, and it won't tell you whether actors err or not.
self.__lock = threading.Lock()
self.__counter = len(self._stubs)
self.__future = Future()
for stub in self._stubs:
stub._get_future().add_done_callback(self.__done_callback)
# Check at the end not at the beginning because actor threads
# may die during the __init__ function execution.
for stub in self._stubs:
if stub._get_future().done():
raise RuntimeError('actor is already dead: %s', stub._name)
def __done_callback(self, _):
with self.__lock:
self.__counter -= 1
if self.__counter <= 0:
self.__future.set_result(None)
def _next_stub(self):
# XXX: This implements a "naive" round-robin that it distributes
# work to actors regardless they are still busy or not, which
# might result in volatile latency distribution.
while self._stubs:
# Find next "alive" stub.
stub = self._stubs[0]
if not stub._get_future().done():
self._stubs.rotate(-1)
return stub
# We assume a stub should never exit, but bad things happen.
# Let's remove it and carry on. (In the future we might
# re-spawn new stubs?)
exc = stub._get_future().exception()
if exc:
LOG.error('stub errs: %s', stub._name, exc_info=exc)
else:
LOG.error('stub exits: %s', stub._name)
self._stubs.popleft()
raise RuntimeError('no stub available')
def __getattr__(self, name):
ASSERT(
not name.startswith('_'),
'not support `%s` attribute for now', name,
)
stub = self._next_stub()
return getattr(stub, name)
def _kill(self, graceful=True):
for stub in self._stubs:
stub._kill(graceful=graceful)
def _get_future(self):
return self.__future
def _send_message(self, func, args, kwargs, block=True, timeout=None):
stub = self._next_stub()
return stub._send_message(
func, args, kwargs, block=block, timeout=timeout)
def method(func):
"""Decorate a func as a method of an actor."""
if not isinstance(func, types.FunctionType):
raise ActorError('%r is not a function' % func)
func.is_actor_method = _MAGIC
return func
class _StubMeta(type):
"""Generates a stub class when given an actor class."""
def __new__(mcs, name, bases, namespace, actor=None):
if actor:
stub_methods = _StubMeta.make_stub_methods(actor)
for stub_method_name in stub_methods:
if stub_method_name.startswith('_'):
raise ActorError(
'stub method name starts with "_": %s.%s' %
(name, stub_method_name))
if stub_method_name in namespace:
raise ActorError(
'stub method should not override %s.%s' %
(name, stub_method_name))
namespace[stub_method_name] = stub_methods[stub_method_name]
stub_cls = super().__new__(mcs, name, bases, namespace)
if actor:
Stub.ACTORS[stub_cls] = actor
return stub_cls
def __init__(cls, name, bases, namespace, **_):
super().__init__(name, bases, namespace)
@staticmethod
def make_stub_methods(actor_class):
stub_methods = {}
for cls in actor_class.__mro__:
for name, func in vars(cls).items():
if not hasattr(func, 'is_actor_method'):
continue
if func.is_actor_method is not _MAGIC:
raise ActorError(
'function should not overwrite %s.is_actor_method',
func.__qualname__)
if name not in stub_methods:
stub_methods[name] = _StubMeta.make_stub_method(func)
return stub_methods
@staticmethod
def make_stub_method(func):
@functools.wraps(func)
def stub_method(self, *args, **kwargs):
return self._send_message(func, args, kwargs)
return stub_method
def build(stub_cls, *,
name=None, set_pthread_name=False,
capacity=0,
args=None, kwargs=None):
"""Build a stub/actor pair with finer configurations."""
return stub_cls(
BUILD,
name=name, set_pthread_name=set_pthread_name,
capacity=capacity,
args=args or (), kwargs=kwargs or {},
)
def make_maker(basename, capacity=0):
"""Return a default classmethod `make` that wraps `build`."""
names = utils.generate_names(name=basename)
@classmethod
def make(cls, *args, **kwargs):
return build(
cls,
name=next(names), set_pthread_name=True,
capacity=capacity,
args=args, kwargs=kwargs,
)
return make
def inject(args, kwargs, extra_args=None, extra_kwargs=None):
"""Inject additional args/kwargs to actor's __init__.
In order to support build(), Stub.__init__ method's signature is
slightly more complex than you might expect. If you would like to
override Stub.__init__, use this function to inject additional
args/kwargs that you would like to send to the actor's __init__.
You may use this to pass the stub object to the actor, but bear in
mind that this might cause unnecessary object retention.
"""
if args and args[0] is BUILD:
if extra_args:
kwargs['args'] += extra_args
if extra_kwargs:
kwargs['kwargs'].update(extra_kwargs)
else:
if extra_args:
args += extra_args
if extra_kwargs:
kwargs.update(extra_kwargs)
return args, kwargs
class Stub(metaclass=_StubMeta):
"""The base class of all actor stub classes."""
# Map stub classes to their actor class.
ACTORS = {}
#
# NOTE:
#
# * _StubMeta may generate stub methods for a subclass that
# override Stub's methods. Always use fully qualified name
# when calling Stub's methods.
#
# * Subclass field names might conflict Stub's. Always use
# double leading underscore (and thus enable name mangling) on
# Stub's fields.
#
# * We don't join threads; instead, wait on the future object.
#
def __init__(self, *args, **kwargs):
"""Initialize actor.
This starts the actor thread, and then blocks on actor object's
__init__ (and re-raise the exception if it fails).
"""
actor_cls = Stub.ACTORS.get(type(self))
if not actor_cls:
raise ActorError(
'%s is not a stub of an actor' % type(self).__qualname__)
if args and args[0] is BUILD:
name = kwargs.get('name')
set_pthread_name = kwargs.get('set_pthread_name')
capacity = kwargs.get('capacity', 0)
args = kwargs.get('args', ())
kwargs = kwargs.get('kwargs', {})
else:
name = None
set_pthread_name = False
capacity = 0
self.__msg_queue = queues.Queue(capacity=capacity)
self.__future = Future()
thread = threading.Thread(
target=_actor_message_loop,
name=name,
args=(self.__msg_queue, weakref.ref(self.__future)),
daemon=True,
)
self._name = thread.name # Useful for logging
thread.start()
# Since we can't return a future here, we have to wait on the
# result of actor's __init__() call for any exception that might
# be raised inside it. (By the way, use Stub._send_message here
# to ensure that we won't call sub-class' _send_message.)
Stub._send_message(self, actor_cls, args, kwargs).result()
# If this stub is not referenced, kill the actor gracefully.
weakref.finalize(self, self.__msg_queue.close)
if set_pthread_name:
utils.set_pthread_name(thread, name)
def _kill(self, graceful=True):
"""Set the kill flag of the actor thread.
If graceful is True (the default), the actor will be dead after
it processes the remaining messages in the queue. Otherwise it
will be dead after it finishes processing the current message.
Note that this method does not block even when the queue is full
(in other words, you can't implement kill on top of the normal
message sending without the possibility that caller being
blocked).
"""
for msg in self.__msg_queue.close(graceful=graceful):
_deref(msg.future_ref).cancel()
def _get_future(self):
"""Return the future object that represents actor's liveness.
Note: Cancelling this future object is not going to kill this
actor. You should call kill() instead.
"""
return self.__future
def _send_message(self, func, args, kwargs, block=True, timeout=None):
"""Enqueue a message into actor's message queue."""
try:
future = Future()
self.__msg_queue.put(
_Message(weakref.ref(future), func, args, kwargs),
block=block,
timeout=timeout,
)
return future
except queues.Closed:
raise Exited('actor has been killed') from None
_Message = collections.namedtuple('_Message', 'future_ref func args kwargs')
class _FakeFuture:
def cancel(self):
return True
def set_running_or_notify_cancel(self):
return True
def set_result(self, _):
pass
def set_exception(self, _):
pass
_FAKE_FUTURE = _FakeFuture()
def _deref(ref):
"""Dereference a weak reference of future."""
obj = ref()
return obj if obj is not None else _FAKE_FUTURE
def _actor_message_loop(msg_queue, future_ref):
"""The main message processing loop of an actor."""
LOG.debug('start')
try:
_actor_message_loop_impl(msg_queue, future_ref)
except Exit:
for msg in msg_queue.close(graceful=False):
_deref(msg.future_ref).cancel()
_deref(future_ref).set_result(None)
except BaseException as exc:
for msg in msg_queue.close(graceful=False):
_deref(msg.future_ref).cancel()
_deref(future_ref).set_exception(exc)
else:
ASSERT.true(msg_queue.is_closed())
_deref(future_ref).set_result(None)
LOG.debug('exit')
def _actor_message_loop_impl(msg_queue, future_ref):
"""Dequeue and process messages one by one."""
# Note: Call `del msg` as soon as possible (see issue 16284).
if not _deref(future_ref).set_running_or_notify_cancel():
raise ActorError('future of this actor has been canceled')
# The first message must be the __init__() call.
msg = msg_queue.get()
if not _deref(msg.future_ref).set_running_or_notify_cancel():
raise ActorError('__init__ has been canceled')
try:
actor = msg.func(*msg.args, **msg.kwargs)
except BaseException as exc:
_deref(msg.future_ref).set_exception(exc)
raise
else:
_deref(msg.future_ref).set_result(actor)
del msg
LOG.debug('start message loop')
while True:
try:
msg = msg_queue.get()
except queues.Closed:
break
if not _deref(msg.future_ref).set_running_or_notify_cancel():
del msg
continue
try:
result = msg.func(actor, *msg.args, **msg.kwargs)
except Return as ret:
_deref(msg.future_ref).set_result(ret.result)
try:
msg_queue.put(
# Use `lambda: None` as a fake weakref
_Message(lambda: None, *ret.message_data),
# Do not block the message loop inside itself
block=False,
)
except (queues.Closed, queues.Full) as exc:
# I am not sure if I should notify the original method
# caller about this error, nor if I should break this
# actor message loop. For now let's just log the error
# and carry on.
LOG.error('cannot append message %r due to %r',
ret.message_data, exc)
del ret
except BaseException as exc:
_deref(msg.future_ref).set_exception(exc)
raise
else:
_deref(msg.future_ref).set_result(result)
del result
del msg
#
# Observe that stubs return a Future object of method result (let's call
# this the result Future object). An interesting idea is that, if a
# method returns a Future object X, instead of put it inside the result
# Future object R, thus a future of future, we could add R's callback to
# X. So when X is done, R's callback will be called, and so R will be
# done, too. For example,
#
# class _Alice:
# @actors.method
# def compute(self):
# # Forward long computation to another actor Bob.
# return self.other_stub.do_long_computation()
#
# class _Bob:
# @actors.method
# def do_long_computation(self):
# time.sleep(60) # Simulate a long computation.
# return 42
#
# So under this idea, we will write
#
# stub.compute().result() == 42
#
# rather than
#
# stub.compute().result().result() == 42
#
# Essentially, this makes Bob invisible from an outside observer.
#
#
# This idea, let's call it "future chaining" for now, while sounds
# interesting, has a few issues that I haven't sorted out yet; so I
# will leave the notes here for future reference.
#
# First, the current mental model is that an actor will process messages
# one by one sequentially. Nevertheless, under the future chaining, we
# will have Alice returned immediately from compute() while Bob is still
# processing do_long_computation(). Then Alice will start processing
# the next message. From an outside observer, it is as if the first and
# the second message were being processed concurrently instead of
# sequentially (and the second message could be done before the first
# one, which makes things even more confusing).
#
# Second, since all the callbacks are called by the innermost actor
# thread (Bob), while it is easy to propagate the result (42) to all the
# chained Future objects, it is difficulty to propagate and re-raise
# exceptions to all the outer actor threads to kill them properly.
# Imagine while Bob is processing do_long_computation(), it raises an
# exception. Now not only Bob but also Alice should be dead because
# under the future chaining, Bob is invisible from an outside observer,
# and the observer could only observe that Alice's compute() has raised
# an exception, deducing that Alice should be dead after that.
#
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/threads/actors.py",
"copies": "1",
"size": "20805",
"license": "mit",
"hash": -4660248809034923000,
"line_mean": 32.3413461538,
"line_max": 78,
"alpha_frac": 0.6026916607,
"autogenerated": false,
"ratio": 4.093860684769775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5196552345469776,
"avg_score": null,
"num_lines": null
} |
## a mini PDB parser which provides all the functionality we need
import string, sys, os
import Common
## generic PDBError exception, never used directly
class PDBError(Exception):
pass
class ResidueNumberError(PDBError):
def __init__(self, number):
self.number = number
def __str__(self):
return "Non-existent residue number %d\n" % number
class ResidueIntegrityError(PDBError):
def __init__(self, number):
self.number = number
def __str__(self):
return "Integrity constraint failed: more than one residue numbered %d in chain\n" % number
class Atom:
def __init__(self, serial=1, name=None, alternate=" ", residue=None, coords=(0.,0.,0.), occupancy=1.0, bfactor=1.0, element=' ', isHetatm=0):
self._serial = serial
self._name = name
self._alternate = alternate
self._residue = residue
self._coords = coords
self._occupancy = occupancy
self._bfactor = bfactor
self._element = element
self._isHetatm = isHetatm
def getSerial(self):
return self._serial
def getName(self):
return self._name
## note: changing the residue should change the residue's atom ownership
def setResidue(self, residue):
self._residue = residue
def getResidue(self):
return self._residue
def setCoords(self, coords):
self._coords = coords
def getCoords(self):
return self._coords
def setOccupancy(self, occupancy):
self._occupancy = occupancy
def getOccupancy(self):
return self._occupancy
def setBfactor(self, bfactor):
self._bfactor = bfactor
def getBfactor(self):
return self._bfactor
def getRecord(self):
record_type = 'ATOM'
if self._isHetatm:
record_type = 'HETATM'
residue_name = self._residue.getName()
residue_number = self._residue.getNumber()
if len(self._name) == 4 or (self._name[0] in string.digits and self._name[1] == 'H'):
atom_name = "%-4s" % self._name
else:
atom_name = " %-3s" % self._name
if residue_number[-1] in string.ascii_letters:
resnum = residue_number[:-1]
insertion_code = residue_number[-1]
else:
resnum = residue_number
insertion_code = " "
if len(residue_name) == 4:
code = "%-6s%5d %4s%c%4s%c%4s%c %8.3f%8.3f%8.3f %5.2f%6.2f" % (record_type, self._serial, atom_name, insertion_code, self._residue.getName(), self._residue.getChain().getName(), resnum, insertion_code, self._coords[0], self._coords[1], self._coords[2], self._occupancy, self._bfactor)
else:
code = "%-6s%5d %4s%c%3s %c%4s%c %8.3f%8.3f%8.3f %5.2f%6.2f" % (record_type, self._serial, atom_name, self._alternate, self._residue.getName(), self._residue.getChain().getName(), resnum, insertion_code, self._coords[0], self._coords[1], self._coords[2], self._occupancy, self._bfactor)
return code
def __str__(self):
return "Atom %d, %s @ (%5.2f %5.2f %5.2f)" % (self._serial, self._name, self._coords[0], self._coords[1], self._coords[2])
class Residue:
def __init__(self, number=1, name=None, chain=None, atoms=None):
self._number = number
self._name = name
self._chain = chain
self._atoms = atoms
def getNumber(self):
return self._number
def getName(self):
return self._name
def getAtoms(self):
return self._atoms
def getAtom(self, name):
for atom in self._atoms:
if atom.getName() == name:
return atom
return None
def setChain(self, chain):
self._chain = chain
def getChain(self):
return self._chain
def __str__(self):
return "Residue '%s', '%s'" % (self._number, self._name)
class Chain:
def __init__(self, name, residues=None, seqres=None, model=None):
self._name = name
self._residues = residues
self._sequence = None
self._seqres = None
self._model = None
self.buildSequence()
def buildSequence(self):
## build the chain's sequence from the residue list
sequence = [" "] * len(self._residues)
for i in range(len(self._residues)):
residue = self._residues[i]
try:
res = Common.three_to_one(residue.getName())
except RuntimeError:
res = "?"
sequence[i] = res
self._sequence = string.join(sequence, '')
def setModel(self, model):
self._model = model
def getModel(self):
return _model
def getName(self):
return self._name
def getResidues(self):
return self._residues
def getResidue(self, number):
x = filter(lambda x: x.getNumber() == number, self._residues)
if len(x) == 0:
msg = "Residue numbered %d not in chain" % number
raise ResidueNumberError, number
elif len(x) > 1:
raise ResidueIntegrityConstraint, number
return x[0]
def getSequence(self):
return self._sequence
def setSeqres(self, seqres):
self._seqres = seqres
def getSeqres(self):
return self._seqres
def __str__(self):
return "Chain %s\nSequence: %s\nSEQRES Sequence: %s" % (self.getName(), self.getSequence(), self.getSeqres())
class Model:
def __init__(self, chains=None):
self._chains = chains
def getChain(self, chain):
return self._chains[chain]
def getChains(self):
return self._chains
class PDB:
def __init__(self, models, extra_records):
self._models = models
self._extra_records = extra_records
def getModels(self):
return self._models
def getModel(self, model_number):
return self._models[model_number]
def __str__(self):
s = ""
for key in self._models.keys():
model = self._models[key]
chains = model.getChains()
chainkeys = chains.keys()
chainkeys.sort()
for chainkey in chainkeys:
chain = chains[chainkey]
s += chain.__str__() + "\n"
for residue in chain.getResidues():
s += residue.__str__() + "\n"
for atom in residue.getAtoms():
s += atom.__str__() + "\n"
return s
## destructively transform the second pdb onto the first using the rotation matrix from the structure match
def transformPDB(self,matrix):
## for every model,
for key in self._models.keys():
model = self._models[key]
chains = model.getChains()
chainkeys = chains.keys()
chainkeys.sort()
## for every chain,
for chainkey in chainkeys:
chain = chains[chainkey]
residues = chain.getResidues()
## for every residue,
for res in residues:
atoms = res.getAtoms()
## for every atom,
for atom in atoms:
c = atom.getCoords()
## transform the coordinates
newc = Numeric.innerproduct(Numeric.array((c[0], c[1], c[2], 1.)), matrix)
atom.setCoords((newc[0], newc[1], newc[2]))
## write a PDB, including SEQRES records, to a file
def formatPDB(self, handle, chainkeys = None, use_atom_seqres=1):
for record in self._extra_records:
handle.write(record)
modelkeys = self._models.keys()
## print SEQRES only once, for model #1
model = self._models[modelkeys[0]]
chains = model.getChains()
if chainkeys == None:
chainkeys = chains.keys()
chainkeys.sort()
for chainkey in chainkeys:
chain = chains[chainkey]
if use_atom_seqres:
residues = chain.getResidues()
for i in range(0, len(residues), 13):
handle.write("SEQRES%4d %c%5d " % ((i/13)+1, chain.getName(), len(residues)))
for j in range(i, i+13):
if j == len(residues):
break
handle.write(" %3s" % residues[j].getName())
handle.write("\n")
else:
seq = chain.getSeqres()
for i in range(0, len(seq), 13):
handle.write("SEQRES%4d %c%5d " % ((i/13)+1, chain.getName(), len(seq)))
for j in range(i, i+13):
if j == len(seq):
break
handle.write(" %3s" % protein_one_to_three[seq[j]])
handle.write("\n")
for key in modelkeys:
model = self._models[key]
if len(modelkeys) > 1:
handle.write("MODEL %d\n" % key)
for chainkey in chainkeys:
chain = chains[chainkey]
residues = chain.getResidues()
for res in residues:
atoms = res.getAtoms()
for atom in atoms:
handle.write(atom.getRecord())
handle.write("\n")
handle.write("TER\n")
if len(modelkeys) > 1:
handle.write("ENDMDL\n")
handle.write("END\n")
handle.close()
| {
"repo_name": "dakoner/PDB",
"path": "__init__.py",
"copies": "1",
"size": "9591",
"license": "apache-2.0",
"hash": -6740440898929897000,
"line_mean": 32.0724137931,
"line_max": 301,
"alpha_frac": 0.5373787926,
"autogenerated": false,
"ratio": 3.822638501394978,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.984297675397635,
"avg_score": 0.0034081080037255645,
"num_lines": 290
} |
# Amino acid properties from https://www.ncbi.nlm.nih.gov/pubmed/15895431
# TODO: Missing ['C'][12] - set to zero
# TODO: Missing ['P'][14] - set to zero
# Add selenocysteine
hydrophobic_props = {
'A': (7.3, -0.368, 0.67, 297, 0, 0, 0.1, 1, 0.5, -0.1, -2.89, 25, 5.1, 0.87, 1.94, 0.89, -1.52, 0.3),
'C': (-9.2, 4.53, 0.38, 178, 0, 0, -1.42, 0.1, -6.8, -2.2, -2.49, 32, 0, 1.52, -1.24, 0.85, -0.29, 0.9),
'D': (-2.9, 2.06, -1.2, 270, 1, 4, 0.78, 6.5, -8.2, -2.8, -3.38, 2, 0.7, 0.66, -10.95, 0.87, -2.6, -0.6),
'E': (-7.1, 1.77, -0.76, 249, 1, 4, 0.83, 6.2, -16.9, -7.5, -2.94, 14, 1.8, 0.67, -10.2, 0.84, -2.47, -0.7),
'F': (19.2, 1.06, 2.3, 284, 0, 0, -2.12, 1.4, 13.2, 13.9, -1.63, 100, 9.6, 2.87, -0.76, 0.52, -0.04, 0.5),
'G': (-1.2, -0.525, 0, 290, 0, 0, 0.33, 1.1, 0, -0.5, -3.25, -2, 4.1, 0.1, 2.39, 0.92, -1.83, 0.3),
'H': (-2.1, 0, 0.64, 277, 1, 1, -0.5, 2.8, -3.5, 0.8, -2.84, -26, 1.6, 0.87, -10.27, 0.83, -1.7, -0.1),
'I': (6.6, 0.791, 1.9, 284, 0, 0, -1.13, 0.8, 13.9, 11.8, -1.72, 91, 9.3, 3.15, 2.15, 0.76, -0.03, 0.7),
'K': (-3.7, 0, -0.57, 224, 2, 1, 1.4, 5.3, 0.1, -3.2, -3.31, -26, 1.3, 1.64, -9.52, 0.97, -2.82, -1.8),
'L': (20, 1.07, 1.9, 337, 0, 0, -1.18, 0.8, 8.8, 10, -1.61, 100, 10, 2.17, 2.28, 0.73, -0.13, 0.5),
'M': (5.6, 0.656, 2.4, 283, 0, 0, -1.59, 0.7, 4.8, 7.1, -1.84, 68, 8.7, 1.67, -1.48, 0.76, 0.6, 0.4),
'N': (-5.7, 0, -0.6, 236, 2, 3, 0.48, 2.2, 0.8, -1.6, -3.41, -7, 0.6, 0.09, -9.68, 0.89, -2.41, -0.5),
'P': (5.1, -2.24, 1.2, 222, 0, 0, 0.73, 0.9, 6.1, 8, -2.5, 25, 4.9, 2.77, 0, 0.82, -1.34, -0.3),
'Q': (-0.3, 0.731, -0.22, 185, 2, 3, 0.95, 2.1, -4.8, -2.5, -3.15, 0, 1.4, 0, -9.38, 0.82, -2.05, -0.7),
'R': (-3.6, -1.03, -2.1, 238, 4, 3, 1.91, 2.3, 0.8, -4.5, -3.3, -7, 2, 0.85, -19.92, 0.88, -2.84, -1.4),
'S': (-4.1, -0.524, 0.01, 228, 1, 2, 0.52, 1.7, 1.2, -3.7, -3.3, -2, 3.1, 0.07, -5.06, 0.96, -1.87, -0.1),
'T': (0.8, 0, 0.52, 253, 1, 2, 0.07, 1.5, 2.7, 1.5, -2.91, 7, 3.5, 0.07, -4.88, 0.92, -1.57, -0.2),
'V': (3.5, 0.401, 1.5, 293, 0, 0, -1.27, 0.9, 2.7, 3.3, -2.08, 62, 8.5, 1.87, 1.99, 0.85, -0.61, 0.6),
'W': (16.3, 1.6, 2.6, 282, 1, 0, -0.51, 1.9, 14.9, 18.1, -1.75, 109, 9.2, 3.77, -5.88, 0.2, 0.42, 0.3),
'Y': (5.9, 4.91, 1.6, 344, 1, 2, -0.21, 2.1, 6.1, 8.2, -2.42, 56, 8, 2.67, -6.11, 0.49, -0.87, -0.4)}
steric_props = {
'A': (91.5, 115, 1.28, 1, 2.87, 1.52, 2.04, 27.8, 0.77, 121.9, 243.2, 0.77, 5.2, 0.025, 4.34, 17.05, 13.7),
'C': (117.7, 135, 1.77, 2.43, 4.47, 1.52, 3.41, 15.5, 1.38, 113.7, 209.4, 1.22, 6.1, 0.1, 35.77, 28.84, 25),
'D': (124.5, 150, 1.6, 2.78, 4.74, 1.52, 3.78, 60.6, 1.99, 121.2, 215, 1.43, 5, 0.1, 12, 19.27, 30),
'E': (155.1, 190, 1.56, 3.78, 5.97, 1.52, 3.31, 68.2, 2.63, 118.2, 213.6, 1.77, 6, 0.1, 17.26, 20.12, 40.2),
'F': (203.4, 210, 2.94, 5.89, 4.62, 1.52, 6.02, 25.5, 2.97, 118.2, 203.7, 1.9, 7.1, 0.39, 29.4, 16.26, 56.1),
'G': (66.4, 75, 0, 0, 2.06, 1, 1, 24.5, 0, 0, 300, 0.58, 4.2, 0.025, 0, 38.14, 3.5),
'H': (167.3, 195, 2.99, 4.66, 5.23, 1.52, 5.66, 50.7, 2.76, 118.2, 219.9, 1.78, 6, 0.1, 21.81, 23.07, 45.1),
'I': (168.8, 175, 4.19, 4, 4.92, 1.9, 3.49, 22.8, 1.83, 118.9, 217.9, 1.56, 7, 0.19, 19.06, 16.66, 44.4),
'K': (171.3, 200, 1.89, 4.77, 6.89, 1.52, 4.87, 103, 2.94, 122, 210.9, 2.08, 6, 0.2, 21.29, 16.46, 61.5),
'L': (167.9, 170, 2.59, 4, 4.92, 1.52, 4.45, 27.6, 2.08, 118.1, 205.6, 1.54, 7, 0.19, 18.78, 10.89, 44.4),
'M': (170.8, 185, 2.35, 4.43, 6.36, 1.52, 4.8, 33.5, 2.34, 113.1, 204, 1.8, 6.8, 0.19, 21.64, 20.61, 45),
'N': (135.2, 160, 1.6, 2.95, 4.58, 1.52, 4.37, 60.1, 1.98, 117.5, 207.1, 1.45, 5, 0.1, 13.28, 34.81, 32.7),
'P': (129.3, 145, 2.67, 2.72, 4.11, 1.52, 4.31, 51.5, 1.42, 81.9, 237.4, 1.25, 6.2, 0.17, 10.93, 23.94, 30.7),
'Q': (161.1, 180, 1.56, 3.95, 6.11, 1.52, 3.53, 68.7, 2.58, 118, 205.4, 1.75, 6, 0.1, 17.56, 15.42, 42.7),
'R': (202, 225, 2.34, 6.13, 7.82, 1.52, 6.24, 94.7, 3.72, 121.4, 206.6, 2.38, 6, 0.2, 26.66, 21.25, 77.3),
'S': (99.1, 115, 1.31, 1.6, 3.97, 1.52, 2.7, 42, 1.28, 117.9, 232, 1.08, 4.9, 0.025, 6.35, 19.95, 18.3),
'T': (122.1, 140, 3.03, 2.6, 4.11, 1.73, 3.17, 45, 1.43, 117.1, 226.7, 1.24, 5, 0.1, 11.01, 18.92, 28.5),
'V': (141.7, 155, 3.67, 3, 4.11, 1.9, 3.17, 23.7, 1.49, 121.7, 220.3, 1.29, 6.4, 0.15, 13.92, 17.06, 34.1),
'W': (237.6, 255, 3.21, 8.08, 7.68, 1.52, 5.9, 34.7, 3.58, 118.4, 203.7, 2.21, 7.6, 0.56, 42.53, 23.36, 74.8),
'Y': (203.6, 230, 2.94, 6.47, 4.73, 1.52, 6.72, 55.2, 3.36, 110, 195.6, 2.13, 7.1, 0.39, 31.53, 26.49, 59.1)}
electronic_props = {
'A': (4.35, 8.249, 0, 0, 7.3, -0.01, 0, 0, 8.1, 0, 0, 6, 0.0373, 9.69, 2.34),
'C': (4.65, 8.312, 0, 1, 14.4, 0.12, 0, 0, 5.5, 0, 0, 5.05, 0.0829, 8.35, 1.92),
'D': (4.76, 8.41, 1, 0, 9.2, 0.15, 0, 1, 13, -1, 0, 2.77, 0.1263, 9.6, 1.88),
'E': (4.29, 8.368, 1, 0, 11.4, 0.07, 0, 1, 12.3, -1, 1.27, 3.22, 0.0058, 9.67, 2.1),
'F': (4.66, 8.228, 0, 1, 13.9, 0.03, 0, 0, 5.2, 0, 0, 5.48, 0.0946, 9.18, 2.16),
'G': (3.97, 8.391, 1, 0, 0, 0, 0, 0, 9, 0, 0, 5.97, 0.05, 9.78, 2.35),
'H': (4.63, 8.415, 0, 1, 10.2, 0.08, 1, 0, 10.4, 0, 1.45, 7.59, 0.0242, 9.17, 1.82),
'I': (3.95, 8.195, 0, 0, 16.1, -0.01, 0, 0, 5.2, 0, 0, 6.02, 0, 9.68, 2.36),
'K': (4.36, 8.408, 0, 1, 10.9, 0, 1, 0, 11.3, 1, 3.67, 9.74, 0.0371, 9.18, 2.16),
'L': (4.17, 8.423, 0, 0, 10.1, -0.01, 0, 0, 4.9, 0, 0, 5.98, 0, 9.6, 2.36),
'M': (4.52, 8.418, 0, 1, 10.4, 0.04, 0, 0, 5.7, 0, 0, 5.74, 0.0823, 9.21, 2.28),
'N': (4.75, 8.747, 1, 1, 8, 0.06, 0, 0, 11.6, 0, 0, 5.41, 0.0036, 8.8, 2.02),
'P': (4.44, 0, 0, 0, 17.8, 0, 0, 0, 8, 0, 0, 6.3, 0.0198, 10.64, 1.95),
'Q': (4.37, 8.411, 0, 1, 10.6, 0.05, 0, 0, 10.5, 0, 1.25, 5.65, 0.0761, 9.13, 2.17),
'R': (4.38, 8.274, 0, 1, 11.1, 0.04, 1, 0, 10.5, 1, 2.45, 10.76, 0.0959, 8.99, 1.82),
'S': (4.5, 8.38, 0, 0, 13.1, 0.11, 0, 0, 9.2, 0, 0, 5.68, 0.0829, 9.21, 2.19),
'T': (4.35, 8.236, 0, 0, 16.7, 0.04, 0, 0, 8.6, 0, 0, 5.66, 0.0941, 9.1, 2.09),
'V': (3.95, 8.436, 0, 0, 17.2, 0.01, 0, 0, 5.9, 0, 0, 5.96, 0.0057, 9.62, 2.32),
'W': (4.7, 8.094, 0, 1, 13.2, 0, 0, 0, 5.4, 0, 6.93, 5.89, 0.0548, 9.44, 2.43),
'Y': (4.6, 8.183, 0, 1, 13.9, 0.03, 0, 0, 6.2, 0, 5.06, 5.66, 0.0516, 9.11, 2.2)}
assert(electronic_props.keys() == steric_props.keys())
assert(steric_props.keys() == hydrophobic_props.keys())
all_aa_props = {}
for key in electronic_props.keys():
all_aa_props[key] = hydrophobic_props[key] + steric_props[key] + electronic_props[key]
seq_to_aa_props = lambda seq: [float(x) for y in [all_aa_props[aa] for aa in seq] for x in y]
| {
"repo_name": "massie/notebooks",
"path": "aa_props.py",
"copies": "2",
"size": "6634",
"license": "apache-2.0",
"hash": -2248640615891661300,
"line_mean": 82.9746835443,
"line_max": 114,
"alpha_frac": 0.4526680736,
"autogenerated": false,
"ratio": 1.6070736434108528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7952782451040785,
"avg_score": 0.021391853194013443,
"num_lines": 79
} |
# Ami_Square_Wave.py
#
# DEMO to generate an audio tone from one of the classic AMIGA audio
# channels, in this case a square wave. Almost any waveform is possible
# and the Left Mouse Button is used to STOP this DEMO.
#
# MINIMUM Requirements Are:-
# 68EC020 CPU and 2MB RAM total, example, a standard A1200(HD),
# WinUAE and E-UAE. Standard AMIGA OS3.0x install or better.
# Full Python 1.4.0, minimum, installed, can be found on AMINET.
# (Python 1.5.2 to 2.0.x are also available on AMINET.)
# (Now Python 2.4.6 is available for advanced 68K machines.)
#
# To install just download the file and drag this file into an Assign(ed)
# PYTHON: volume...
# From an AMIGA A1200(HD), E-UAE or WinUAE Python 1.4.0 prompt:-
#
# >>> execfile("PYTHON:Ami_Square_Wave.py")
#
# And away you go...
# ==========================================================================
# The DEMO assembly code that will be compiled and converted.
# Call the code beep.asm...
# From a CLI and using the a68k and blink from AMINET:-
#
# Prompt> a68k beep.asm<RETURN/ENTER>
# Some reports here...
# Prompt> blink beep.o<RETURN/ENTER>
# Some reports here...
#
# This code is TOTALLY, even address, position independent.
# ==========================================================================
# start:
# ;"beep.asm" test code...
# movem.l d0-d7/a0-a6,-(sp) ;Save all registers just in case.
# movea.l $4,a6 ;Set ~execbase~.
# moveq #16,d0 ;Length of square wave data.
# moveq #2,d1 ;Set to chip ram.
# jsr -198(a6) ;Allocate memory for the task.
# beq.s getout ;On error, Quit.
# move.l d0,a0 ;Set address in chip ram.
# move.l #$3f3f3f3f,(a0) ;Set first four bytes of sixteen.
# addq.l #4,a0 ;Move addres by four.
# move.l #$3f3f3f3f,(a0) ;Set next four bytes of sixteen.
# addq.l #4,a0 ;Move addres by four.
# move.l #$80808080,(a0) ;Set next four bytes of sixteen.
# addq.l #4,a0 ;Move addres by four.
# move.l #$80808080,(a0) ;Set last four bytes of sixteen.
# ;This ensures absolute position
# ;independence.
# lea $dff000,a5 ;Set HW register base.
# move.w #$000f,$96(a5) ;Disable audio DMA.
# move.l d0,$a0(a5) ;Set address of audio data.
# move.w #8,$a4(a5) ;Set length in words.
# move.w #64,$a8(a5) ;Set volume to maximum.
# move.w #220,$a6(a5) ;Set the period.
# move.w #$00ff,$9e(a5) ;Disable any modulation.
# move.w #$8201,$96(a5) ;Enable audio DMA, 1 channel only.
# wait:
# btst #6,$bfe001 ;If LMB pressed then Quit.
# beq.s closeme ;Do it.
# bne.s wait ;Play the tone until LMB pressed...
# closeme:
# move.w #$000f,$96(a5) ;Disable audio DMA.
# move.l d0,a0 ;Address of the square wave data.
# moveq #16,d0 ;The data length to recover.
# jsr -210(a6) ;Free assigned memory.
# getout:
# movem.l (sp)+,d0-d7/a0-a6 ;Restore all registers.
# clr.l d0 ;Set returm code OK.
# rts
# nop
# even
# end
# ==========================================================================
# The text HEX file to be edited for the Python code:-
#
# Prompt> Type HEX beep > beep.hex<RETURN/ENTER>
#
# Gives a text file "beep.hex" that has the contents:-
# ==========================================================================
# 0000: 000003F3 00000000 00000001 00000000 ...ó............
# 0010: 00000000 00000021 000003E9 00000021 .......!...é...!
# 0020: 48E7FFFE 2C780004 70107202 4EAEFF3A Hç.þ,x..p.r.N®.:
# 0030: 67682040 20BC3F3F 3F3F5888 20BC3F3F gZ @ Œ????X. Œ??
# 0040: 3F3F5888 20BC8080 80805888 20BC8080 ??X. Œ....X. Œ..
# 0050: 80804BF9 00DFF000 3B7C000F 00962B40 ..Kù.ßð.;|....+@
# 0060: 00A03B7C 000800A4 3B7C0040 00A83B7C . ;|...€;|.@.š;|
# 0070: 00DC00A6 3B7C00FF 009E3B7C 82010096 .Ü.Š;|....;|....
# 0080: 08390006 00BFE001 670266F4 3B7C000F .9...¿à.g.fô;|..
# 0090: 00962040 70104EAE FF2E4CDF 7FFF4280 .. @p.N®..Lß..B.
# 00A0: 4E754E71 000003F2 NuNq...ò
# ==========================================================================
# With careful manipulation of the Python code you could have control of the
# audio levels, channels, frequency, etc, using this method...
#
# Enjoy finding simple solutions to often very difficult problems...
#
# $VER: Ami_Square_Wave.py_Version_0.00.30_(C)2007-2011_B.Walker_G0LCU.
#
# Original copyright, (C)2007-2011, B.Walker, G0LCU. Now finally issued as Public Domain.
import os
# Manually place the executable code into practical binary string lengths.
one="\x00\x00\x03\xF3\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x21\x00\x00"
two="\x03\xE9\x00\x00\x00\x21\x48\xE7\xFF\xFE\x2C\x78\x00\x04\x70\x10\x72\x02\x4E\xAE\xFF\x3A\x67\x68\x20\x40\x20\xBC"
wave="\x3F\x3F\x3F\x3F\x58\x88\x20\xBC\x3F\x3F\x3F\x3F\x58\x88\x20\xBC\x80\x80\x80\x80\x58\x88\x20\xBC\x80\x80\x80\x80"
three="\x4B\xF9\x00\xDF\xF0\x00\x3B\x7C\x00\x0F\x00\x96\x2B\x40\x00\xA0\x3B\x7C\x00\x08\x00\xA4\x3B\x7C\x00"
volume="\x40"
four="\x00\xA8\x3B\x7C\x00\xDC\x00\xA6\x3B\x7C\x00\xFF\x00\x9E\x3B\x7C\x82\x01\x00\x96\x08\x39\x00\x06\x00\xBF"
five="\xE0\x01\x67\x02\x66\xF4\x3B\x7C\x00\x0F\x00\x96\x20\x40\x70\x10\x4E\xAE\xFF\x2E\x4C\xDF\x7F\xFF\x42\x80"
six="\x4E\x75\x4E\x71\x00\x00\x03\xF2"
# Clear the screen the standard AMIGA way... ;o)
print "\f"
# A simple user screen...
print "1 KHz Square Wave Generator for the classic AMIGA A1200."
print "Using standard text mode Python 1.4.0 to 2.0.1.\n"
print "(C)2007-2011, B.Walker, GOLCU. Issued as Public Domain...\n"
# Show how to change output, (volume), level...
vol=raw_input("Set output level, 0 to 64:- ")
# Don't allow any errors...
if vol=="": vol="64"
if len(vol)>=3: vol="64"
count=0
while count<=(len(vol)-1):
if vol[count]>=chr(48) and vol[count]<=chr(57): count=count+1
else: vol="64"
if eval(vol)>=64: vol="64"
if eval(vol)<=0: vol="0"
volume=chr(eval(vol))
# Put them all together as a single binary string.
amiga_exe_file=one+two+wave+three+volume+four+five+six
# Generate a file called SquareWave inside the S: VOLUME and write to the disk.
amigafile=open("S:SquareWave","wb+")
amigafile.write(amiga_exe_file)
amigafile.close()
# Give a short delay to allow system to settle.
os.system("C:Wait 1")
print "\nPress Left Mouse Button to stop...\n"
# Ensure the file SquareWave can be executed.
os.system("C:Protect S:SquareWave rwed")
# Now run it.
os.system("S:SquareWave")
print "This DEMO has now stopped..."
# Ami_Square_Wave.py end...
# Enjoy finding simple solutions to often very difficult problems.
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577971_AMIGAHeads_Only_Generating_Audio_Signal_Text/recipe-577971.py",
"copies": "1",
"size": "6451",
"license": "mit",
"hash": -383967958270615000,
"line_mean": 39.6772151899,
"line_max": 119,
"alpha_frac": 0.6450910222,
"autogenerated": false,
"ratio": 2.3177064551027766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34627974773027764,
"avg_score": null,
"num_lines": null
} |
''' A mixin class for building mixed models '''
import numpy as np
from scipy.linalg import inv
from scipy.sparse import issparse
from scipy.sparse.linalg import inv as spinv
class MixedModelMixin(object):
"Mixin class for populations to implement mixed model functions"
def __getmatrix(self, mtype):
"Get a genetic relationship matrix"
if mtype == 'additive':
return self.additive_relationship_matrix()
elif mtype == 'dominance':
return self.dominance_relationship_matrix()
elif mtype == 'mitochrondrial':
return self.mitochondrial_relationship_matrix()
else:
raise ValueError(
'Invalid pedigree covariance matrix type: {}'.format(mtype))
def __sort_inds_in_ped(self, indlist):
"""
Takes a list of individuals, filters out the ones in the current
pedigree and sorts those.
:param indlist: individuals to sort
"""
return sorted((x for x in indlist if x.pedigree.label == self.label),
key=lambda x: (x.pedigree.label, x.label))
def incidence_matrix(self, variable=None, inds=None, onlylevels=None):
"""
Generates an incidence matrix for random effect in a mixed model based
on variable. If no variable is given, the individual is used (i.e. an
identity matrix).
:param variable: phenotype to form the matrix for
:param inds: only use these individuals
:param onlylevels: only use these levels of the random effect
"""
if variable is None:
getvar = lambda ind: ind.label
else:
getvar = lambda ind: ind.phenotypes[variable]
levels = sorted({getvar(ind) for ind in self.individuals})
if onlylevels is not None:
onlylevels = set(onlylevels)
else:
# If we're not reducing the number of levels, we'll set onlylevels
# to levels so the intersection of levels and onlylevels reduces
# to just levels
onlylevels = levels
levels = sorted(levels & onlylevels)
# Test for cases incompatible for the mixed model
if not levels:
raise ValueError('No valid levels for variable!')
elif len(levels) == 1:
raise ValueError('Variable only has one level!')
if not inds:
inds = self.individuals
Z = []
for ind in inds:
Z.append([getvar(ind) == level for level in levels])
# Make sure everyone has a 1 in their row for the incidence matrix
if not all(sum(row) for row in Z):
raise ValueError(
'Individuals are missing values in random effects!')
return np.matrix(Z, dtype=np.int8)
| {
"repo_name": "jameshicks/pydigree",
"path": "pydigree/stats/mixedmodel/mixin.py",
"copies": "1",
"size": "2823",
"license": "apache-2.0",
"hash": 5200402038018341000,
"line_mean": 34.2875,
"line_max": 79,
"alpha_frac": 0.6110520723,
"autogenerated": false,
"ratio": 4.383540372670807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5494592444970807,
"avg_score": null,
"num_lines": null
} |
"""A mix of `bib.evensen2009ensemble` and `bib.sakov2008implications`.
.. note::
Since there is no noise, and the system is stable,
the rmse's from this HMM go to zero as `T` goes to infinity.
Thus, benchmarks largely depend on the initial error,
and so these absolute rmse values are not so useful
for quantatative evaluation of DA methods.
For that purpose, see `dapper.mods.LA.raanes2015` instead.
"""
import numpy as np
import dapper.mods as modelling
from dapper.mods.LA import Fmat, sinusoidal_sample
from dapper.mods.Lorenz96 import LPs
Nx = 1000
Ny = 4
jj = modelling.linspace_int(Nx, Ny)
tseq = modelling.Chronology(dt=1, dkObs=5, T=300, BurnIn=-1, Tplot=100)
# WITHOUT explicit matrix (assumes dt == dx/c):
# step = lambda x,t,dt: np.roll(x,1,axis=x.ndim-1)
# WITH:
Fm = Fmat(Nx, c=-1, dx=1, dt=tseq.dt)
def step(x, t, dt):
assert dt == tseq.dt
return x @ Fm.T
Dyn = {
'M': Nx,
'model': step,
'linear': lambda x, t, dt: Fm,
'noise': 0,
}
# In the animation, it can sometimes/somewhat occur
# that the truth is outside 3*sigma !!!
# Yet this is not so implausible because sinusoidal_sample()
# yields (multivariate) uniform (random numbers) -- not Gaussian.
wnum = 25
a = np.sqrt(5)/10
X0 = modelling.RV(M=Nx, func = lambda N: a*sinusoidal_sample(Nx, wnum, N))
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 0.01
HMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0, LP=LPs(jj))
####################
# Suggested tuning
####################
# xp = EnKF('PertObs',N=100,infl=1.02)
| {
"repo_name": "nansencenter/DAPPER",
"path": "dapper/mods/LA/evensen2009.py",
"copies": "1",
"size": "1559",
"license": "mit",
"hash": -8807714321585998000,
"line_mean": 25.4237288136,
"line_max": 74,
"alpha_frac": 0.6626042335,
"autogenerated": false,
"ratio": 2.664957264957265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8817597451205081,
"avg_score": 0.001992809450436569,
"num_lines": 59
} |
# Ammianus writes in prose. Things will be a little different from Statius
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
# functions are mostly made by Sarah Otts
def add_to_database(verse_entries, db):
logger.info("Adding {} entries to the database".format(len(verse_entries)))
curs = db.cursor()
#Replace old Ammianus entries with new ones to prevent duplications.
curs.execute("DELETE FROM texts WHERE author='Ammianus'")
for i, v in enumerate(verse_entries):
data = curs.execute("SELECT * FROM texts")
curs.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, v["title"], v["book"], "Latin", v["author"], v["date"],
v["chapter"], v["verse"], v["text"], v["link"], "prose"))
def add_entry_to_list(entries, title, book, author, date, chapter, verse, text, txturl):
entry_dict = {"title": title, "book": book, "author": author, "date": date, "chapter": chapter,
"verse": verse, "text": text, "link": txturl}
entries.append(entry_dict)
def get_verses(soup):
# if there's nothing in the paragraph, return an empty array
if len(soup.contents) == 0:
return None
# verses in prose is split by certain characters
para_text = soup.get_text()
verses = re.split('\[?[0-9]+[A-Z]?\]?|\[[ivx]+\]',
para_text) # "[x]" can contain arabic numerals, lower case roman numerals, or upper case letters
verses = [re.sub(r'^\s+', '', v) for v in verses] # remove whitespace
verses = [re.sub(r'^\n', '', v) for v in verses] # remove \n
verses = filter(lambda x: len(x) > 0, verses)
# print verses
return verses
def get_name_and_author_of_book(soup, url):
# attempt to get it from the page title
pagetitle = soup.title.string
split_title = pagetitle.split(":")
if len(split_title) >= 2:
author = split_title[0]
book = split_title[-1]
# if that doesn't work, get the author from the page title and the next p-tag
else:
author = pagetitle
book = soup.p.br.next_sibling
# remove any surrounding spaces
book = re.sub(r'^\s+|\s+$|\n', '', book)
author = re.sub(r'^\s+|\s+$|\n', '', author)
return [book, author]
def get_title_and_date(soup):
title_soup = soup.find('h1')
title = ""
date = ""
if title_soup != None:
title = title_soup.string
else:
pagehead = soup.find('p', class_="pagehead")
if (pagehead is not None):
title = pagehead.find(text=True)
if (pagehead.find('span') is not None):
date = pagehead.find('span').string.replace("(", '').replace(")", '')
else:
h1 = soup.find('h1')
title = h1.string
if date is None or date == "":
date_tag = soup.find('h2', class_='date')
if (not date_tag is None):
date = date_tag.find(text=True).replace('(', '').replace(')', '')
else:
date = ""
date = date.replace(u"\u2013", '-')
title = title.upper()
return [title, date]
def main():
# collection name: Ammianus
ammaniusURL = 'http://www.thelatinlibrary.com/ammianus.html'
siteURL = 'http://www.thelatinlibrary.com'
ammaniusOpen = urllib.request.urlopen(ammaniusURL)
soup = BeautifulSoup(ammaniusOpen, "html5lib")
textsURL = []
# search through Ammanius' soup for links to his works
for a in soup.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, a['href']))
# remove some unnecessary urls
while ("http://www.thelatinlibrary.com/index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com/index.html")
textsURL.remove("http://www.thelatinlibrary.com/classics.html")
logger.info("\n".join(textsURL))
# extract data
# get titles of Ammanius' collection
title_dict_amm, date_dict_amm = get_title_and_date(soup)
verses = []
for work in textsURL:
workURL = urllib.request.urlopen(work)
soup = BeautifulSoup(workURL, "html5lib")
book, author = get_name_and_author_of_book(soup, workURL)
date = date_dict_amm
# go through text to find chapters
para = soup.findAll('p')[:-1]
chapter = "1" #Note that chapters aren't integers.
verse = 0
text = ""
for p in para:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder',
'smallboarder', 'margin', 'internal_navigation']: #not in the main text
continue
except:
pass
chap_found = False
# in other books, chapters are bold or italicized
potential_chap = p.find('b')
# some Ammianus texts have italic chapters
if potential_chap is None:
potential_chap = p.find('i')
if potential_chap is not None:
chapter = potential_chap.find(text=True)
# Include italicized part in chap name
italic = potential_chap.i
if italic is not None:
chapter += italic.string
chapter = chapter.replace("\n", "")
chapter = chapter.replace(u'\xa0', '')
chapnum = {'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X',
'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI'}
if chapter in chapnum:
chapter = chapter.upper() #Roman numerals need to be uppercase
else:
chapter = chapter.title()
verse = 0
continue
# go through text to find verses
if (get_verses(p)):
for i, t in enumerate(get_verses(p)):
verse += 1
text = t
#The following line works in Python 2, but not here.
# text = unicode.encode(text, errors="ignore")
# add the entry
add_entry_to_list(verses, title_dict_amm, book, author, date, chapter,
verse, text, work)
with sqlite3.connect('texts.db') as db:
# open cursor
curs = db.cursor()
# create the database if it doesn't already exist
curs.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
db.commit()
# put it all in the db
add_to_database(verses,db)
db.commit()
logger.info("Process finished")
if __name__ == '__main__':
main()
| {
"repo_name": "oudalab/phyllo",
"path": "phyllo/extractors/ammianusDB.py",
"copies": "1",
"size": "7041",
"license": "apache-2.0",
"hash": 8362915272768862000,
"line_mean": 35.1076923077,
"line_max": 119,
"alpha_frac": 0.5544666951,
"autogenerated": false,
"ratio": 3.729343220338983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4783809915438983,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.