INSTRUCTION stringlengths 1 8.43k | RESPONSE stringlengths 75 104k |
|---|---|
r Convert Result to dict. | def asdict(self, rawkey=False):
r"""Convert Result to dict.
Parameters:
rawkey(bool):
* True: dict key is Descriptor instance
* False: dict key is str
Returns:
dict
"""
if rawkey:
return dict(self.items())
... |
r Access descriptor value by descriptor name or instance. | def name(self):
r"""Access descriptor value by descriptor name or instance.
>>> from mordred import Calculator, descriptors
>>> from rdkit import Chem
>>> result = Calculator(descriptors)(Chem.MolFromSmiles("C1CCCCC1"))
>>> result.name["C2SP3"]
6
"""
if ... |
Decorator to log function calls. | def log_calls(func):
'''Decorator to log function calls.'''
def wrapper(*args, **kargs):
callStr = "%s(%s)" % (func.__name__, ", ".join([repr(p) for p in args] + ["%s=%s" % (k, repr(v)) for (k, v) in list(kargs.items())]))
debug(">> %s", callStr)
ret = func(*args, **kargs)
debug("<< %s: %s", callStr... |
Decorator to synchronize function. | def synchronized(func):
'''Decorator to synchronize function.'''
func.__lock__ = threading.Lock()
def synced_func(*args, **kargs):
with func.__lock__:
return func(*args, **kargs)
return synced_func |
Show current progress message to stderr. This function will remember the previous message so that next time it will clear the previous message before showing next one. | def progress(msg, *args):
'''Show current progress message to stderr.
This function will remember the previous message so that next time,
it will clear the previous message before showing next one.
'''
# Don't show any progress if the output is directed to a file.
if not (sys.stdout.isatty() and sys.s... |
Program message output. | def message(msg, *args):
'''Program message output.'''
clear_progress()
text = (msg % args)
sys.stdout.write(text + '\n') |
Utility function to handle runtime failures gracefully. Show concise information if possible then terminate program. | def fail(message, exc_info=None, status=1, stacktrace=False):
'''Utility function to handle runtime failures gracefully.
Show concise information if possible, then terminate program.
'''
text = message
if exc_info:
text += str(exc_info)
error(text)
if stacktrace:
error(traceback.format_exc())
... |
Get a temp filename for atomic download. | def tempfile_get(target):
'''Get a temp filename for atomic download.'''
fn = '%s-%s.tmp' % (target, ''.join(random.Random().sample("0123456789abcdefghijklmnopqrstuvwxyz", 15)))
TEMP_FILES.add(fn)
return fn |
Atomically rename and clean tempfile | def tempfile_set(tempfile, target):
'''Atomically rename and clean tempfile'''
if target:
os.rename(tempfile, target)
else:
os.unlink(tempfile)
if target in TEMP_FILES:
TEMP_FILES.remove(tempfile) |
Clean up temp files | def clean_tempfiles():
'''Clean up temp files'''
for fn in TEMP_FILES:
if os.path.exists(fn):
os.unlink(fn) |
Return a list of the logger methods: ( debug info warn error ) | def get_loggers(self):
'''Return a list of the logger methods: (debug, info, warn, error)'''
return self.log.debug, self.log.info, self.log.warn, self.log.error |
Get the fixed part of the path without wildcard | def get_fixed_path(self):
'''Get the fixed part of the path without wildcard'''
pi = self.path.split(PATH_SEP)
fi = []
for p in pi:
if '*' in p or '?' in p:
break
fi.append(p)
return PATH_SEP.join(fi) |
Given a API name list all legal parameters using boto3 service model. | def get_legal_params(self, method):
'''Given a API name, list all legal parameters using boto3 service model.'''
if method not in self.client.meta.method_to_api_mapping:
# Injected methods. Ignore.
return []
api = self.client.meta.method_to_api_mapping[method]
shape = self.client.meta.servic... |
Combine existing parameters with extra options supplied from command line options. Carefully merge special type of parameter if needed. | def merge_opt_params(self, method, kargs):
'''Combine existing parameters with extra options supplied from command line
options. Carefully merge special type of parameter if needed.
'''
for key in self.legal_params[method]:
if not hasattr(self.opt, key) or getattr(self.opt, key) is None:
... |
Add the whole list of API parameters into optparse. | def add_options(parser):
'''Add the whole list of API parameters into optparse.'''
for param, param_type, param_doc in BotoClient.EXTRA_CLIENT_PARAMS:
parser.add_option('--API-' + param, help=param_doc, type=param_type, dest=param) |
Override original join () with a timeout and handle keyboard interrupt. | def join(self):
'''Override original join() with a timeout and handle keyboard interrupt.'''
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait(1000)
# Child thread has exceptions, fail main thread too.
if self.exc_info:
fail('[T... |
Terminate all threads by deleting the queue and forcing the child threads to quit. | def terminate(self, exc_info=None):
'''Terminate all threads by deleting the queue and forcing the child threads
to quit.
'''
if exc_info:
self.exc_info = exc_info
try:
while self.get_nowait():
self.task_done()
except Queue.Empty:
pass |
Utility function to add a single task into task queue | def add_task(self, func_name, *args, **kargs):
'''Utility function to add a single task into task queue'''
self.tasks.put((func_name, 0, args, kargs)) |
Utility function to wait all tasks to complete | def join(self):
'''Utility function to wait all tasks to complete'''
self.tasks.join()
# Force each thread to break loop.
for worker in self.workers:
self.tasks.put(None)
# Wait for all thread to terminate.
for worker in self.workers:
worker.join()
worker.s3 = None |
Increase the processed task counter and show progress message | def processed(self):
'''Increase the processed task counter and show progress message'''
self.processed_tasks += 1
qsize = self.tasks.qsize()
if qsize > 0:
progress('[%d task(s) completed, %d remaining, %d thread(s)]', self.processed_tasks, qsize, len(self.workers))
else:
progress('[%d t... |
Retrieve S3 access keys from the environment or None if not present. | def s3_keys_from_env():
'''Retrieve S3 access keys from the environment, or None if not present.'''
env = os.environ
if S3_ACCESS_KEY_NAME in env and S3_SECRET_KEY_NAME in env:
keys = (env[S3_ACCESS_KEY_NAME], env[S3_SECRET_KEY_NAME])
debug("read S3 keys from environment")
return keys
... |
Retrieve S3 access keys from the command line or None if not present. | def s3_keys_from_cmdline(opt):
'''Retrieve S3 access keys from the command line, or None if not present.'''
if opt.access_key != None and opt.secret_key != None:
keys = (opt.access_key, opt.secret_key)
debug("read S3 keys from commandline")
return keys
else:
return None |
Retrieve S3 access key settings from s3cmd s config file if present ; otherwise return None. | def s3_keys_from_s3cfg(opt):
'''Retrieve S3 access key settings from s3cmd's config file, if present; otherwise return None.'''
try:
if opt.s3cfg != None:
s3cfg_path = "%s" % opt.s3cfg
else:
s3cfg_path = "%s/.s3cfg" % os.environ["HOME"]
if not os.path.exists(s3cfg_path):
... |
Initialize s3 access keys from environment variable or s3cfg config file. | def init_s3_keys(opt):
'''Initialize s3 access keys from environment variable or s3cfg config file.'''
S3Handler.S3_KEYS = S3Handler.s3_keys_from_cmdline(opt) or S3Handler.s3_keys_from_env() \
or S3Handler.s3_keys_from_s3cfg(opt) |
Connect to S3 storage | def connect(self):
'''Connect to S3 storage'''
try:
if S3Handler.S3_KEYS:
self.s3 = BotoClient(self.opt, S3Handler.S3_KEYS[0], S3Handler.S3_KEYS[1])
else:
self.s3 = BotoClient(self.opt)
except Exception as e:
raise RetryFailure('Unable to connect to s3: %s' % e) |
List all buckets | def list_buckets(self):
'''List all buckets'''
result = []
for bucket in self.s3.list_buckets().get('Buckets') or []:
result.append({
'name': S3URL.combine('s3', bucket['Name'], ''),
'is_dir': True,
'size': 0,
'last_modified': bucket['CreationDate']
})
... |
Walk through a S3 directory. This function initiate a walk with a basedir. It also supports multiple wildcards. | def s3walk(self, basedir, show_dir=None):
'''Walk through a S3 directory. This function initiate a walk with a basedir.
It also supports multiple wildcards.
'''
# Provide the default value from command line if no override.
if not show_dir:
show_dir = self.opt.show_dir
# trailing slash ... |
Walk through local directories from root basedir | def local_walk(self, basedir):
'''Walk through local directories from root basedir'''
result = []
for root, dirs, files in os.walk(basedir):
for f in files:
result.append(os.path.join(root, f))
return result |
Unix style basename. This fuction will return bar for/ foo/ bar/ instead of empty string. It is used to normalize the input trailing slash. | def get_basename(self, path):
'''Unix style basename.
This fuction will return 'bar' for '/foo/bar/' instead of empty string.
It is used to normalize the input trailing slash.
'''
if path[-1] == PATH_SEP:
path = path[0:-1]
return os.path.basename(path) |
Expand the wildcards for an S3 path. This emulates the shall expansion for wildcards if the input is local path. | def source_expand(self, source):
'''Expand the wildcards for an S3 path. This emulates the shall expansion
for wildcards if the input is local path.
'''
result = []
if not isinstance(source, list):
source = [source]
for src in source:
# XXX Hacky: We need to disable recursive wh... |
Upload a single file or a directory by adding a task into queue | def put_single_file(self, pool, source, target):
'''Upload a single file or a directory by adding a task into queue'''
if os.path.isdir(source):
if self.opt.recursive:
for f in (f for f in self.local_walk(source) if not os.path.isdir(f)):
target_url = S3URL(target)
# deal with ... |
Upload files to S3. This function can handle multiple file upload if source is a list. Also it works for recursive mode which copy all files and keep the directory structure under the given source directory. | def put_files(self, source, target):
'''Upload files to S3.
This function can handle multiple file upload if source is a list.
Also, it works for recursive mode which copy all files and keep the
directory structure under the given source directory.
'''
pool = ThreadPool(ThreadUtil, self... |
Use the create_bucket API to create a new bucket | def create_bucket(self, source):
'''Use the create_bucket API to create a new bucket'''
s3url = S3URL(source)
message('Creating %s', source)
if not self.opt.dry_run:
resp = self.s3.create_bucket(Bucket=s3url.bucket)
if resp['ResponseMetadata']["HTTPStatusCode"] == 200:
message('Done... |
Get privileges from metadata of the source in s3 and apply them to target | def update_privilege(self, obj, target):
'''Get privileges from metadata of the source in s3, and apply them to target'''
if 'privilege' in obj['Metadata']:
os.chmod(target, int(obj['Metadata']['privilege'], 8)) |
Print out a series of files | def print_files(self, source):
'''Print out a series of files'''
sources = self.source_expand(source)
for source in sources:
s3url = S3URL(source)
response = self.s3.get_object(Bucket=s3url.bucket, Key=s3url.path)
message('%s', response['Body'].read()) |
Download a single file or a directory by adding a task into queue | def get_single_file(self, pool, source, target):
'''Download a single file or a directory by adding a task into queue'''
if source[-1] == PATH_SEP:
if self.opt.recursive:
basepath = S3URL(source).path
for f in (f for f in self.s3walk(source) if not f['is_dir']):
pool.download(f['... |
Download files. This function can handle multiple files if source S3 URL has wildcard characters. It also handles recursive mode by download all files and keep the directory structure. | def get_files(self, source, target):
'''Download files.
This function can handle multiple files if source S3 URL has wildcard
characters. It also handles recursive mode by download all files and
keep the directory structure.
'''
pool = ThreadPool(ThreadUtil, self.opt)
source = self.... |
Remove remote files that are not present in the local source. ( Obsolete ) It is used for old sync command now. | def delete_removed_files(self, source, target):
'''Remove remote files that are not present in the local source.
(Obsolete) It is used for old sync command now.
'''
message("Deleting files found in %s and not in %s", source, target)
if os.path.isdir(source):
unecessary = []
basepath =... |
Copy a single file or a directory by adding a task into queue | def cp_single_file(self, pool, source, target, delete_source):
'''Copy a single file or a directory by adding a task into queue'''
if source[-1] == PATH_SEP:
if self.opt.recursive:
basepath = S3URL(source).path
for f in (f for f in self.s3walk(source) if not f['is_dir']):
pool.co... |
Copy files This function can handle multiple files if source S3 URL has wildcard characters. It also handles recursive mode by copying all files and keep the directory structure. | def cp_files(self, source, target, delete_source=False):
'''Copy files
This function can handle multiple files if source S3 URL has wildcard
characters. It also handles recursive mode by copying all files and
keep the directory structure.
'''
pool = ThreadPool(ThreadUtil, self.opt)
... |
Delete files on S3 | def del_files(self, source):
'''Delete files on S3'''
src_files = []
for obj in self.s3walk(source):
if not obj['is_dir']: # ignore directories
src_files.append(obj['name'])
pool = ThreadPool(ThreadUtil, self.opt)
pool.batch_delete(src_files)
pool.join() |
Generic version of directory walk. Return file list without base path for comparison. | def relative_dir_walk(self, dir):
'''Generic version of directory walk. Return file list without base path
for comparison.
'''
result = []
if S3URL.is_valid(dir):
basepath = S3URL(dir).path
for f in (f for f in self.s3walk(dir) if not f['is_dir']):
result.append(os.path.relpa... |
Sync directory to directory. | def dsync_files(self, source, target):
'''Sync directory to directory.'''
src_s3_url = S3URL.is_valid(source)
dst_s3_url = S3URL.is_valid(target)
source_list = self.relative_dir_walk(source)
if len(source_list) == 0 or '.' in source_list:
raise Failure('Sync command need to sync directory to ... |
Sync files to S3. Does implement deletions if syncing TO s3. Currently identical to get/ put - r - f -- sync - check with exception of deletions. | def sync_files(self, source, target):
'''Sync files to S3. Does implement deletions if syncing TO s3.
Currently identical to get/put -r -f --sync-check with exception of deletions.
'''
src_s3_url = S3URL.is_valid(source)
dst_s3_url = S3URL.is_valid(target)
if src_s3_url and not dst_s3_url:
... |
Get the size component of the given s3url. If it is a directory combine the sizes of all the files under that directory. Subdirectories will not be counted unless -- recursive option is set. | def size(self, source):
'''Get the size component of the given s3url. If it is a
directory, combine the sizes of all the files under
that directory. Subdirectories will not be counted unless
--recursive option is set.
'''
result = []
for src in self.source_expand(source):
size... |
Calculate MD5 hash code for a local file | def file_hash(self, filename, block_size=2**20):
'''Calculate MD5 hash code for a local file'''
m = hashlib.md5()
with open(filename, 'rb') as f:
while True:
data = f.read(block_size)
if not data:
break
m.update(data)
return m.hexdigest() |
Get or calculate MD5 value of the local file. | def get_md5(self):
'''Get or calculate MD5 value of the local file.'''
if self.md5 is None:
self.md5 = self.file_hash(self.filename)
return self.md5 |
Ensure all directories are created for a given target file. | def mkdirs(self, target):
'''Ensure all directories are created for a given target file.'''
path = os.path.dirname(target)
if path and path != PATH_SEP and not os.path.isdir(path):
# Multi-threading means there will be intervleaved execution
# between the check and creation of the directory.
... |
Check MD5 for a local file and a remote file. Return True if they have the same md5 hash otherwise False. | def sync_check(self, md5cache, remoteKey):
'''Check MD5 for a local file and a remote file.
Return True if they have the same md5 hash, otherwise False.
'''
if not remoteKey:
return False
if not os.path.exists(md5cache.filename):
return False
localmd5 = md5cache.get_md5()
# c... |
Partially match a path and a filter_path with wildcards. This function will return True if this path partially match a filter path. This is used for walking through directories with multiple level wildcard. | def partial_match(self, path, filter_path):
'''Partially match a path and a filter_path with wildcards.
This function will return True if this path partially match a filter path.
This is used for walking through directories with multiple level wildcard.
'''
if not path or not filter_path:
... |
Thread worker for s3walk. Recursively walk into all subdirectories if they still match the filter path partially. | def s3walk(self, s3url, s3dir, filter_path, result):
'''Thread worker for s3walk.
Recursively walk into all subdirectories if they still match the filter
path partially.
'''
paginator = self.s3.get_paginator('list_objects')
filter_path_level = filter_path.count(PATH_SEP)
for page in ... |
Check all file item with given conditions. | def conditional(self, result, obj):
'''Check all file item with given conditions.'''
fileonly = (self.opt.last_modified_before is not None) or (self.opt.last_modified_after is not None)
if obj['is_dir']:
if not fileonly:
result.append(obj)
return
if (self.opt.last_modified_before i... |
Get file splits for upload/ download/ copy operation. | def get_file_splits(self, id, source, target, fsize, splitsize):
'''Get file splits for upload/download/copy operation.'''
pos = 0
part = 1 # S3 part id starts from 1
mpi = ThreadUtil.MultipartItem(id)
splits = []
while pos < fsize:
chunk = min(splitsize, fsize - pos)
assert(chunk >... |
Get privileges of a local file | def get_file_privilege(self, source):
'''Get privileges of a local file'''
try:
return str(oct(os.stat(source).st_mode)[-3:])
except Exception as e:
raise Failure('Could not get stat for %s, error_message = %s', source, e) |
Get the s3 object with the S3 URL. Return None if not exist. | def lookup(self, s3url):
'''Get the s3 object with the S3 URL. Return None if not exist.'''
try:
return self.s3.head_object(Bucket=s3url.bucket, Key=s3url.path)
except BotoClient.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == 404:
return None
else:
... |
Read local file chunk | def read_file_chunk(self, source, pos, chunk):
'''Read local file chunk'''
if chunk==0:
return StringIO()
data = None
with open(source, 'rb') as f:
f.seek(pos)
data = f.read(chunk)
if not data:
raise Failure('Unable to read data from source: %s' % source)
return StringI... |
Thread worker for upload operation. | def upload(self, source, target, mpi=None, pos=0, chunk=0, part=0):
'''Thread worker for upload operation.'''
s3url = S3URL(target)
obj = self.lookup(s3url)
# Initialization: Set up multithreaded uploads.
if not mpi:
fsize = os.path.getsize(source)
md5cache = LocalMD5Cache(source)
... |
Verify the file size of the downloaded file. | def _verify_file_size(self, obj, downloaded_file):
'''Verify the file size of the downloaded file.'''
file_size = os.path.getsize(downloaded_file)
if int(obj['ContentLength']) != file_size:
raise RetryFailure('Downloaded file size inconsistent: %s' % (repr(obj))) |
Write local file chunk | def write_file_chunk(self, target, pos, chunk, body):
'''Write local file chunk'''
fd = os.open(target, os.O_CREAT | os.O_WRONLY)
try:
os.lseek(fd, pos, os.SEEK_SET)
data = body.read(chunk)
num_bytes_written = os.write(fd, data)
if(num_bytes_written != len(data)):
raise Retry... |
Thread worker for download operation. | def download(self, source, target, mpi=None, pos=0, chunk=0, part=0):
'''Thread worker for download operation.'''
s3url = S3URL(source)
obj = self.lookup(s3url)
if obj is None:
raise Failure('The obj "%s" does not exists.' % (s3url.path,))
# Initialization: Set up multithreaded downloads.
... |
Copy a single file from source to target using boto S3 library. | def copy(self, source, target, mpi=None, pos=0, chunk=0, part=0, delete_source=False):
'''Copy a single file from source to target using boto S3 library.'''
if self.opt.dry_run:
message('%s => %s' % (source, target))
return
source_url = S3URL(source)
target_url = S3URL(target)
if not ... |
Thread worker for download operation. | def delete(self, source):
'''Thread worker for download operation.'''
s3url = S3URL(source)
message('Delete %s', source)
if not self.opt.dry_run:
self.s3.delete_object(Bucket=s3url.bucket, Key=s3url.path) |
Delete a list of files in batch of batch_delete_size ( default = 1000 ). | def batch_delete(self, sources):
'''Delete a list of files in batch of batch_delete_size (default=1000).'''
assert(type(sources) == list)
if len(sources) == 0:
return
elif len(sources) == 1:
self.delete(sources[0])
elif len(sources) > self.opt.batch_delete_size:
for i in range(0, ... |
Main entry to handle commands. Dispatch to individual command handler. | def run(self, args):
'''Main entry to handle commands. Dispatch to individual command handler.'''
if len(args) == 0:
raise InvalidArgument('No command provided')
cmd = args[0]
if cmd + '_handler' in CommandHandler.__dict__:
CommandHandler.__dict__[cmd + '_handler'](self, args)
else:
... |
Validate input parameters with given format. This function also checks for wildcards for recursive mode. | def validate(self, format, args):
'''Validate input parameters with given format.
This function also checks for wildcards for recursive mode.
'''
fmtMap = {
'cmd': 'Command',
's3': 's3 path',
'local': 'local path'
}
fmts = format.split('|')
if len(fmts) != len(args):
... |
Pretty print the result of s3walk. Here we calculate the maximum width of each column and align them. | def pretty_print(self, objlist):
'''Pretty print the result of s3walk. Here we calculate the maximum width
of each column and align them.
'''
def normalize_time(timestamp):
'''Normalize the timestamp format for pretty print.'''
if timestamp is None:
return ' ' * 16
return ... |
Handler for ls command | def ls_handler(self, args):
'''Handler for ls command'''
if len(args) == 1:
self.pretty_print(self.s3handler().list_buckets())
return
self.validate('cmd|s3', args)
self.pretty_print(self.s3handler().s3walk(args[1])) |
Handler for mb command | def mb_handler(self, args):
'''Handler for mb command'''
if len(args) == 1:
raise InvalidArgument('No s3 bucketname provided')
self.validate('cmd|s3', args)
self.s3handler().create_bucket(args[1]) |
Handler for put command | def put_handler(self, args):
'''Handler for put command'''
# Special check for shell expansion
if len(args) < 3:
raise InvalidArgument('Invalid number of parameters')
self.validate('|'.join(['cmd'] + ['local'] * (len(args) - 2) + ['s3']), args)
source = args[1:-1] # shell expansion
targe... |
Handler for get command | def get_handler(self, args):
'''Handler for get command'''
# Special case when we don't have target directory.
if len(args) == 2:
args += ['.']
self.validate('cmd|s3|local', args)
source = args[1]
target = args[2]
self.s3handler().get_files(source, target) |
Handler for cat command | def cat_handler(self, args):
'''Handler for cat command'''
self.validate('cmd|s3', args)
source = args[1]
self.s3handler().print_files(source) |
Handler for dsync command. | def dsync_handler(self, args):
'''Handler for dsync command.'''
self.opt.recursive = True
self.opt.sync_check = True
self.opt.force = True
self.validate('cmd|s3,local|s3,local', args)
source = args[1]
target = args[2]
self.s3handler().dsync_files(source, target) |
Handler for sync command. XXX Here we emulate sync command with get/ put - r - f -- sync - check. So it doesn t provide delete operation. | def sync_handler(self, args):
'''Handler for sync command.
XXX Here we emulate sync command with get/put -r -f --sync-check. So
it doesn't provide delete operation.
'''
self.opt.recursive = True
self.opt.sync_check = True
self.opt.force = True
self.validate('cmd|s3,local|s3,lo... |
Handler for cp command | def cp_handler(self, args):
'''Handler for cp command'''
self.validate('cmd|s3|s3', args)
source = args[1]
target = args[2]
self.s3handler().cp_files(source, target) |
Handler for mv command | def mv_handler(self, args):
'''Handler for mv command'''
self.validate('cmd|s3|s3', args)
source = args[1]
target = args[2]
self.s3handler().cp_files(source, target, delete_source=True) |
Handler for del command | def del_handler(self, args):
'''Handler for del command'''
self.validate('cmd|s3', args)
source = args[1]
self.s3handler().del_files(source) |
Handler for size command | def du_handler(self, args):
'''Handler for size command'''
for src, size in self.s3handler().size(args[1:]):
message('%s\t%s' % (size, src)) |
Handler of total_size command | def _totalsize_handler(self, args):
'''Handler of total_size command'''
total_size = 0
for src, size in self.s3handler().size(args[1:]):
total_size += size
message(str(total_size)) |
Search for date information in the string | def match_date(self, value):
'''Search for date information in the string'''
m = self.REGEX_DATE.search(value)
date = datetime.datetime.utcnow().date()
if m:
date = datetime.date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
value = self.REGEX_DATE.sub('', value)
return (date, value... |
Search for time information in the string | def match_time(self, value):
'''Search for time information in the string'''
m = self.REGEX_TIME.search(value)
time = datetime.datetime.utcnow().time()
if m:
time = datetime.time(int(m.group(1)), int(m.group(2)))
value = self.REGEX_TIME.sub('', value)
return (time, value) |
Search for timedelta information in the string | def match_delta(self, value):
'''Search for timedelta information in the string'''
m = self.REGEX_DELTA.search(value)
delta = datetime.timedelta(days=0)
if m:
d = int(m.group(1))
if m.group(3) == 'ago' or m.group(3) == 'before':
d = -d
if m.group(2) == 'minute':
delta ... |
Take json as dictionary parameter | def check_dict(self, opt, value):
'''Take json as dictionary parameter'''
try:
return json.loads(value)
except:
raise optparse.OptionValueError("Option %s: invalid dict value: %r" % (opt, value)) |
Discover gateways using multicast | def discover_gateways(self):
"""Discover gateways using multicast"""
_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_socket.settimeout(5.0)
if self._interface != 'any':
_socket.bind((self._interface, 0))
for gateway in self._gateways_config:
... |
Start listening. | def listen(self):
"""Start listening."""
_LOGGER.info('Creating Multicast Socket')
self._mcastsocket = self._create_mcast_socket()
self._listening = True
thread = Thread(target=self._listen_to_msg, args=())
self._threads.append(thread)
thread.daemon = True
... |
Stop listening. | def stop_listen(self):
"""Stop listening."""
self._listening = False
if self._mcastsocket is not None:
_LOGGER.info('Closing multisocket')
self._mcastsocket.close()
self._mcastsocket = None
for thread in self._threads:
thread.join() |
Send data to gateway to turn on/ off device | def write_to_hub(self, sid, **kwargs):
"""Send data to gateway to turn on / off device"""
if self.key is None:
_LOGGER.error('Gateway Key is not provided. Can not send commands to the gateway.')
return False
data = {}
for key in kwargs:
data[key] = kwa... |
Get data from gateway | def get_from_hub(self, sid):
"""Get data from gateway"""
cmd = '{ "cmd":"read","sid":"' + sid + '"}'
resp = self._send_cmd(cmd, "read_ack") if int(self.proto[0:1]) == 1 else self._send_cmd(cmd, "read_rsp")
_LOGGER.debug("read_ack << %s", resp)
return self.push_data(resp) |
Push data broadcasted from gateway to device | def push_data(self, data):
"""Push data broadcasted from gateway to device"""
if not _validate_data(data):
return False
jdata = json.loads(data['data']) if int(self.proto[0:1]) == 1 else _list2map(data['params'])
if jdata is None:
return False
sid = data['... |
Get key using token from gateway | def _get_key(self):
"""Get key using token from gateway"""
init_vector = bytes(bytearray.fromhex('17996d093d28ddb3ba695a2e6f58562e'))
encryptor = Cipher(algorithms.AES(self.key.encode()), modes.CBC(init_vector),
backend=default_backend()).encryptor()
ciphertext... |
Train your awesome model. | def train(hparams, *args):
"""Train your awesome model.
:param hparams: The arguments to run the model with.
"""
# Initialize experiments and track all the hyperparameters
exp = Experiment(
name=hparams.test_tube_exp_name,
# Location to save the metrics.
save_dir=hparams.log... |
Train your awesome model. | def train(hparams, *args):
"""Train your awesome model.
:param hparams: The arguments to run the model with.
"""
# Initialize experiments and track all the hyperparameters
exp = Experiment(
name=hparams.test_tube_exp_name,
# Location to save the metrics.
save_dir=hparams.log... |
Called by RQ when there is a failure in a worker. | def exception_handler(job, *exc_info):
"""
Called by RQ when there is a failure in a worker.
NOTE: Make sure that in your RQ worker process, rollbar.init() has been called with
handler='blocking'. The default handler, 'thread', does not work from inside an RQ worker.
"""
# Report data about the... |
Patches the pyramid_debugtoolbar ( if installed ) to display a link to the related rollbar item. | def patch_debugtoolbar(settings):
"""
Patches the pyramid_debugtoolbar (if installed) to display a link to the related rollbar item.
"""
try:
from pyramid_debugtoolbar import tbtools
except ImportError:
return
rollbar_web_base = settings.get('rollbar.web_base', DEFAULT_WEB_BASE)... |
Pyramid entry point | def includeme(config):
"""
Pyramid entry point
"""
settings = config.registry.settings
config.add_tween('rollbar.contrib.pyramid.rollbar_tween_factory', over=EXCVIEW)
# run patch_debugtoolbar, unless they disabled it
if asbool(settings.get('rollbar.patch_debugtoolbar', True)):
patc... |
If there s no log configuration set up a default handler. | def _ensure_log_handler(self):
"""
If there's no log configuration, set up a default handler.
"""
if log.handlers:
return
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(m... |
Get the current request object. Implementation varies on library support. Modified below when we know which framework is being used. | def get_request():
"""
Get the current request object. Implementation varies on
library support. Modified below when we know which framework
is being used.
"""
# TODO(cory): add in a generic _get_locals_request() which
# will iterate up through the call stack and look for a variable
# t... |
Saves configuration variables in this module s SETTINGS. | def init(access_token, environment='production', scrub_fields=None, url_fields=None, **kw):
"""
Saves configuration variables in this module's SETTINGS.
access_token: project access token. Get this from the Rollbar UI:
- click "Settings" in the top nav
- click "Projects"... |
Decorator for making error handling on AWS Lambda easier | def lambda_function(f):
"""
Decorator for making error handling on AWS Lambda easier
"""
@functools.wraps(f)
def wrapper(event, context):
global _CURRENT_LAMBDA_CONTEXT
_CURRENT_LAMBDA_CONTEXT = context
try:
result = f(event, context)
return wait(lambd... |
Reports an exception to Rollbar using exc_info ( from calling sys. exc_info () ) | def report_exc_info(exc_info=None, request=None, extra_data=None, payload_data=None, level=None, **kw):
"""
Reports an exception to Rollbar, using exc_info (from calling sys.exc_info())
exc_info: optional, should be the result of calling sys.exc_info(). If omitted, sys.exc_info() will be called here.
r... |
Reports an arbitrary string message to Rollbar. | def report_message(message, level='error', request=None, extra_data=None, payload_data=None):
"""
Reports an arbitrary string message to Rollbar.
message: the string body of the message
level: level to report at. One of: 'critical', 'error', 'warning', 'info', 'debug'
request: the request object fo... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.