code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def getArraysByName(elem, name):
name = StripArrayName(name)
return elem.getElements(lambda e: (e.tagName == ligolw.Array.tagName) and (e.Name == name)) | Return a list of arrays with name name under elem. |
def image_export(cls, source_path, dest_url, **kwargs):
dest_path = urlparse.urlparse(dest_url).path
if kwargs['remote_host']:
target_path = ':'.join([kwargs['remote_host'], dest_path])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target_path])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Error happened when copying image file to remote "
"host with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=21, msg=output)
else:
LOG.debug("Remote_host not specified, will copy to local server")
try:
shutil.copyfile(source_path, dest_path)
except Exception as err:
msg = ("Export image from %(src)s to local file system"
" %(dest)s failed: %(err)s" %
{'src': source_path,
'dest': dest_path,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=22,
err=six.text_type(err)) | Export the specific image to remote host or local file system |
def _syntax_style_changed(self):
if self._highlighter is None:
return
if self.syntax_style:
self._highlighter._style = create_style_class(self.syntax_style)
self._highlighter._clear_caches()
else:
self._highlighter.set_style_sheet(self.style_sheet) | Refresh the highlighting with the current syntax style by class. |
def remove_all_files_of_img_id(img_id):
files = get_files_by_img_id(img_id, check_hash=False)
if files:
os.remove(media_path(files['main']))
for fn in files['variants'].values():
os.remove(media_path(fn)) | Removes all img_id's files. |
def _log_python_version(self):
self.log.info(
u'Python Version: {}.{}.{}'.format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro
)
) | Log the current Python version. |
def configure():
log_levels = {
5: logging.NOTSET,
4: logging.DEBUG,
3: logging.INFO,
2: logging.WARNING,
1: logging.ERROR,
0: logging.CRITICAL
}
logging.captureWarnings(True)
root_logger = logging.getLogger()
if settings.CFG["debug"]:
details_format = logging.Formatter(
'%(name)s (%(filename)s:%(lineno)s) [%(levelname)s] %(message)s')
details_hdl = logging.StreamHandler()
details_hdl.setFormatter(details_format)
root_logger.addHandler(details_hdl)
else:
brief_format = logging.Formatter('%(message)s')
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(brief_format)
root_logger.addHandler(console_hdl)
root_logger.setLevel(log_levels[int(settings.CFG["verbosity"])])
configure_plumbum_log()
configure_migrate_log()
configure_parse_log() | Load logging configuration from our own defaults. |
def validate(tool_class, model_class):
if not hasattr(tool_class, 'name'):
raise ImproperlyConfigured("No 'name' attribute found for tool %s." % (
tool_class.__name__
))
if not hasattr(tool_class, 'label'):
raise ImproperlyConfigured("No 'label' attribute found for tool %s." % (
tool_class.__name__
))
if not hasattr(tool_class, 'view'):
raise NotImplementedError("No 'view' method found for tool %s." % (
tool_class.__name__
)) | Does basic ObjectTool option validation. |
def transfer_owner(self, new_owner):
if not self.owner and not self.admin:
raise RuntimeError("You need more street creed to do this")
new_owner = new_owner.strip().lower()
if not new_owner:
raise ValueError("Empty strings cannot be new owners")
self.__set_config_value("owner", new_owner) | You had good run at it, it's time for someone else to get dirty |
def PrintErrorCounts(self):
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count) | Print a summary of errors by category, and the total. |
def process_boolean(self, tag):
tag.set_address(self.normal_register.current_bit_address)
self.normal_register.move_to_next_bit_address() | Process Boolean type tags |
def _render_string(self, template, *context, **kwargs):
template = self._to_unicode_hard(template)
render_func = lambda engine, stack: engine.render(template, stack)
return self._render_final(render_func, *context, **kwargs) | Render the given template string using the given context. |
def freeze(dest_dir, opt):
tmp_dir = ensure_tmpdir()
dest_prefix = "%s/dest" % tmp_dir
ensure_dir(dest_dir)
ensure_dir(dest_prefix)
config = get_secretfile(opt)
Context.load(config, opt) \
.freeze(dest_prefix)
zip_filename = freeze_archive(tmp_dir, dest_prefix)
ice_file = freeze_encrypt(dest_dir, zip_filename, config, opt)
shutil.rmtree(tmp_dir)
LOG.debug("Generated file is %s", ice_file) | Iterates over the Secretfile looking for secrets to freeze |
def _maybe_append_chunk(chunk_info, line_index, column, contents, chunks):
if chunk_info:
chunks.append(_chunk_from_ranges(contents,
chunk_info[0],
chunk_info[1],
line_index,
column)) | Append chunk_info to chunks if it is set. |
def pop_scope(self):
child_scope = self.stack.current.current.copy()
self.stack.current.pop()
parent_scope = self.stack.current.current.copy()
self.stack.current.current = {
key: child_scope[key] for key in child_scope if key in parent_scope
} | Delete the current scope in the current scope. |
def _consume_errback(failure):
global _exit_code
if failure.check(exceptions.BadDeclaration):
_log.error(
"Unable to declare the %s object on the AMQP broker. The "
"broker responded with %s. Check permissions for your user.",
failure.value.obj_type,
failure.value.reason,
)
_exit_code = 10
elif failure.check(exceptions.PermissionException):
_exit_code = 15
_log.error(
"The consumer could not proceed because of a permissions problem: %s",
str(failure.value),
)
elif failure.check(exceptions.ConnectionException):
_exit_code = 14
_log.error(failure.value.reason)
else:
_exit_code = 11
_log.exception(
"An unexpected error (%r) occurred while registering the "
"consumer, please report this bug.",
failure.value,
)
try:
reactor.stop()
except error.ReactorNotRunning:
pass | Handle any errors that occur during consumer registration. |
def createDatabase(self, name, **dbArgs) :
"use dbArgs for arguments other than name. for a full list of arguments please have a look at arangoDB's doc"
dbArgs['name'] = name
payload = json.dumps(dbArgs, default=str)
url = self.URL + "/database"
r = self.session.post(url, data = payload)
data = r.json()
if r.status_code == 201 and not data["error"] :
db = Database(self, name)
self.databases[name] = db
return self.databases[name]
else :
raise CreationError(data["errorMessage"], r.content) | use dbArgs for arguments other than name. for a full list of arguments please have a look at arangoDB's doc |
def store_value(self, name, value):
self.spine.send_command("storeSetting", self.group, name, value) | Store a value to DB |
def _examine_val(self, k, val):
'should only be called internally'
if not isinstance(val, (types.FunctionType, partial)): return val
vid = id(val)
if vid in self._memoized:
return self._memoized[vid]
elif [] != getargspec_py27like(val)[0]:
return val
else:
val = val()
object.__setattr__(self, '_memoized', self._memoized.set(vid, val))
return val | should only be called internally |
def consume(self, msg):
log.debug("Got message %r" % msg)
topic, body = msg.get('topic'), msg.get('body')
for client in self.irc_clients:
if not client.factory.filters or (
client.factory.filters and
self.apply_filters(client.factory.filters, topic, body)
):
raw_msg = self.prettify(
topic=topic,
msg=body,
pretty=client.factory.pretty,
terse=client.factory.terse,
short=client.factory.short,
)
send = getattr(client, self.hub.config['irc_method'], 'notice')
send(client.factory.channel, raw_msg.encode('utf-8'))
backlog = self.incoming.qsize()
if backlog and (backlog % 20) == 0:
warning = "* backlogged by %i messages" % backlog
log.warning(warning)
send(client.factory.channel, warning.encode('utf-8')) | Forward on messages from the bus to all IRC connections. |
def sort(self, cmp=None, key=None, reverse=False):
if not key and self._keys:
key = self.KeyValue
super(CliTable, self).sort(cmp=cmp, key=key, reverse=reverse) | Overrides sort func to use the KeyValue for the key. |
def import_class(class_path):
module_name, class_name = class_path.rsplit(".", 1)
module = import_module(module_name)
claz = getattr(module, class_name)
return claz | Imports the class for the given class name. |
def extract_payload(self, request, verify=True, *args, **kwargs):
payload = self._verify(
request, return_payload=True, verify=verify, *args, **kwargs
)
return payload | Extract a payload from a request object. |
def rpyhttp(value):
if value.startswith("http"):
return value
try:
parts = value.split("_")
del parts[0]
_uri = base64.b64decode(parts.pop(0)).decode()
return _uri + "_".join(parts)
except (IndexError, UnicodeDecodeError, binascii.Error):
return value | converts a no namespace pyuri back to a standard uri |
def getBoneHierarchy(self, action, unIndexArayCount):
fn = self.function_table.getBoneHierarchy
pParentIndices = BoneIndex_t()
result = fn(action, byref(pParentIndices), unIndexArayCount)
return result, pParentIndices | Fills the given array with the index of each bone's parent in the skeleton associated with the given action |
def to_detach(b:Tensors, cpu:bool=True):
"Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`."
if is_listy(b): return [to_detach(o, cpu) for o in b]
if not isinstance(b,Tensor): return b
b = b.detach()
return b.cpu() if cpu else b | Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`. |
def graphql_queries_to_json(*queries):
rtn = {}
for i, query in enumerate(queries):
rtn["q{}".format(i)] = query.value
return json.dumps(rtn) | Queries should be a list of GraphQL objects |
def __put_slice_in_slim(slim, dataim, sh, i):
a, b = np.unravel_index(int(i), sh)
st0 = int(dataim.shape[0] * a)
st1 = int(dataim.shape[1] * b)
sp0 = int(st0 + dataim.shape[0])
sp1 = int(st1 + dataim.shape[1])
slim[
st0:sp0,
st1:sp1
] = dataim
return slim | put one small slice as a tile in a big image |
def executeSQL(self, sql, args=()):
sql = self._execSQL(sql, args)
result = self.cursor.lastRowID()
if self.executedThisTransaction is not None:
self.executedThisTransaction.append((result, sql, args))
return result | For use with UPDATE or INSERT statements. |
def _len_ea_entry(self):
return EaEntry._REPR.size + len(self.name.encode("ascii")) + self.value_len | Returns the size of the entry |
def execute_command(command=None):
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stdin = process.communicate()
process.wait()
return (stdout, stdin), process.returncode | Execute a command and return the stdout and stderr. |
def reset_script(self):
self.remote_bridge.status = BRIDGE_STATUS.IDLE
self.remote_bridge.error = 0
self.remote_bridge.parsed_script = None
self._device.script = bytearray()
return [0] | Clear any partially received script. |
def render_in_browser(self, **kwargs):
try:
from lxml.html import open_in_browser
except ImportError:
raise ImportError('You must install lxml to use render in browser')
kwargs.setdefault('force_uri_protocol', 'https')
open_in_browser(self.render_tree(**kwargs), encoding='utf-8') | Render the graph, open it in your browser with black magic |
def add(assetclass: int, symbol: str):
assert isinstance(symbol, str)
assert isinstance(assetclass, int)
symbol = symbol.upper()
app = AppAggregate()
new_item = app.add_stock_to_class(assetclass, symbol)
print(f"Record added: {new_item}.") | Add a stock to an asset class |
async def queue_declare(self):
await self.channel.queue_declare(
self.queue,
durable=self.durable,
exclusive=self.exclusive,
no_wait=self.no_wait
) | Override this method to change how a queue is declared |
def remaining_time(self):
elapsed_time = (datetime.datetime.now() - self.start_time).total_seconds()
if self.progress == 0:
self.progress = 1
estimated_total_time = 100. / self.progress * elapsed_time
return datetime.timedelta(seconds = max(estimated_total_time - elapsed_time, 0)) | estimates the time remaining until script is finished |
def prebuild_arch(self, arch):
path = self.get_build_dir(arch.arch)
if not exists(path):
info("creating {}".format(path))
shprint(sh.mkdir, '-p', path) | Make the build and target directories |
def remove_zone(self, zone_id):
updated_zones = []
for zone in self.my_osid_object_form._my_map['zones']:
if zone['id'] != zone_id:
updated_zones.append(zone)
self.my_osid_object_form._my_map['zones'] = updated_zones | remove a zone, given the id |
def request(self, method, params):
identifier = random.randint(1, 1000)
self._transport.write(jsonrpc_request(method, identifier, params))
self._buffer[identifier] = {'flag': asyncio.Event()}
yield from self._buffer[identifier]['flag'].wait()
result = self._buffer[identifier]['data']
del self._buffer[identifier]['data']
return result | Send a JSONRPC request. |
def _generate_configs_from_default(self, overrides=None):
config = DEFAULT_CONFIG.copy()
if not overrides:
overrides = {}
for k, v in overrides.items():
config[k] = v
return config | Generate configs by inheriting from defaults |
def decode(self, byteStream):
self.header = bytearray(byteStream[:HEADER_SIZE])
self.payload = byteStream[HEADER_SIZE:] | Decode the RTP packet. |
def rename_tabs_after_change(self, given_name):
client = self.get_current_client()
repeated = False
for cl in self.get_clients():
if id(client) != id(cl) and given_name == cl.given_name:
repeated = True
break
if client.allow_rename and not u'/' in given_name and not repeated:
self.rename_client_tab(client, given_name)
else:
self.rename_client_tab(client, None)
if client.allow_rename and not u'/' in given_name and not repeated:
for cl in self.get_related_clients(client):
self.rename_client_tab(cl, given_name) | Rename tabs after a change in name. |
def getSharedReports(self):
response = self.request('GET',
'reports.shared').json()['shared']['banners']
reports = [PingdomSharedReport(self, x) for x in response]
return reports | Returns a list of PingdomSharedReport instances |
def from_xsc(cls, path):
def parse(path):
with open(path) as f:
lines = f.readlines()
NamedXsc = namedtuple('NamedXsc', lines[1].split()[1:])
return NamedXsc(*map(float, lines[2].split()))
xsc = parse(path)
return u.Quantity([[xsc.a_x, xsc.a_y, xsc.a_z],
[xsc.b_x, xsc.b_y, xsc.b_z],
[xsc.c_x, xsc.c_y, xsc.c_z]], unit=u.angstroms) | Returns u.Quantity with box vectors from XSC file |
def _get_result_paths(self,data):
result = {}
result['Tree'] = ResultPath(Path=splitext(self._input_filename)[0] + \
'.tree')
return result | Get the resulting tree |
def _domain_event_block_job_cb(conn, domain, disk, job_type, status, opaque):
_salt_send_domain_event(opaque, conn, domain, opaque['event'], {
'disk': disk,
'type': _get_libvirt_enum_string('VIR_DOMAIN_BLOCK_JOB_TYPE_', job_type),
'status': _get_libvirt_enum_string('VIR_DOMAIN_BLOCK_JOB_', status)
}) | Domain block job events handler |
def jsonload(model, fp):
dumped_list = json.load(fp)
for link in dumped_list:
if len(link) == 2:
sid, (s, p, o, a) = link
elif len(link) == 4:
(s, p, o, a) = link
tt = a.get('@target-type')
if tt == '@iri-ref':
o = I(o)
a.pop('@target-type', None)
else:
continue
model.add(s, p, o, a)
return | Load Versa model dumped into JSON form, either raw or canonical |
def send_request(self, http_request):
self.request_object = http_request
self.build_socket()
self.build_request()
try:
self.sock.send(self.request)
except socket.error as err:
raise errors.TestError(
'We were unable to send the request to the socket',
{
'msg': err,
'function': 'http.HttpUA.send_request'
})
finally:
self.get_response() | Send a request and get response |
def sortByIndex(self, index):
self.table_level.horizontalHeader().setSortIndicatorShown(True)
sort_order = self.table_level.horizontalHeader().sortIndicatorOrder()
self.table_index.model().sort(index, sort_order)
self._sort_update() | Implement a Index sort. |
def end_step(self, lineno, timestamp=None, result_code=None):
self.state = self.STATES['step_finished']
step_errors = self.sub_parser.get_artifact()
step_error_count = len(step_errors)
if step_error_count > settings.PARSER_MAX_STEP_ERROR_LINES:
step_errors = step_errors[:settings.PARSER_MAX_STEP_ERROR_LINES]
self.artifact["errors_truncated"] = True
self.current_step.update({
"finished": timestamp,
"finished_linenumber": lineno,
"result": self.RESULT_DICT.get(result_code, "unknown"),
"errors": step_errors
})
self.sub_parser.clear() | Fill in the current step's summary and update the state to show the current step has ended. |
def _info(self, args, **extra_args):
if not isinstance(args, argparse.Namespace):
raise logger.error(Exception("args should of an instance of argparse.Namespace"))
logger.info("Freight Forwarder: {0}".format(VERSION))
logger.info("docker-py: {0}".format(docker_py_version))
logger.info("Docker Api: {0}".format(DOCKER_API_VERSION))
logger.info("{0} version: {1}".format(platform.python_implementation(), platform.python_version())) | Print freight forwarder info to the user. |
def get(cls, fpath=None, create_missing=True):
"Retrieve the `Config` in `fpath`."
fpath = _expand_path(fpath or cls.DEFAULT_CONFIG_PATH)
if not fpath.exists() and create_missing: cls.create(fpath)
assert fpath.exists(), f'Could not find config at: {fpath}. Please create'
with open(fpath, 'r') as yaml_file: return yaml.safe_load(yaml_file) | Retrieve the `Config` in `fpath`. |
def __prefix_key(self, key):
if self.prefix is None:
return key
if key.startswith(self.prefix + "-"):
return key
return "{0}-{1}".format(self.prefix, key) | This will add the prefix to the key if one exists on the store |
def _call_watcher(self_, watcher, event):
if self_.self_or_cls.param._TRIGGER:
pass
elif watcher.onlychanged and (not self_._changed(event)):
return
if self_.self_or_cls.param._BATCH_WATCH:
self_._events.append(event)
if watcher not in self_._watchers:
self_._watchers.append(watcher)
elif watcher.mode == 'args':
with batch_watch(self_.self_or_cls, run=False):
watcher.fn(self_._update_event_type(watcher, event, self_.self_or_cls.param._TRIGGER))
else:
with batch_watch(self_.self_or_cls, run=False):
event = self_._update_event_type(watcher, event, self_.self_or_cls.param._TRIGGER)
watcher.fn(**{event.name: event.new}) | Invoke the given the watcher appropriately given a Event object. |
def _handle_github(self):
value = click.prompt(
_BUG + click.style(
'1. Open an issue by typing "open";\n',
fg='green',
) + click.style(
'2. Print human-readable information by typing '
'"print";\n',
fg='yellow',
) + click.style(
'3. See the full traceback without submitting details '
'(default: "ignore").\n\n',
fg='red',
) + 'Please select an action by typing its name',
type=click.Choice([
'open',
'print',
'ignore',
], ),
default='ignore',
)
getattr(self, '_process_' + value)() | Handle exception and submit it as GitHub issue. |
def usage(ecode, msg=''):
print >> sys.stderr, __doc__
if msg:
print >> sys.stderr, msg
sys.exit(ecode) | Print usage and msg and exit with given code. |
def _extract_file(self, tgz, tarinfo, dst_path, buffer_size=10<<20):
src = tgz.extractfile(tarinfo)
dst = tf_v1.gfile.GFile(dst_path, "wb")
while 1:
buf = src.read(buffer_size)
if not buf:
break
dst.write(buf)
self._log_progress(len(buf))
dst.close()
src.close() | Extracts 'tarinfo' from 'tgz' and writes to 'dst_path'. |
def description(self):
if self.release.get('body'):
return markdown(self.release['body'])
elif self.repository.get('description'):
return self.repository['description']
return 'No description provided.' | Extract description from a release. |
def walk(self, top, topdown=True, onerror=None, **kwargs):
try:
listing = self.list_status(top, **kwargs)
except HdfsException as e:
if onerror is not None:
onerror(e)
return
dirnames, filenames = [], []
for f in listing:
if f.type == 'DIRECTORY':
dirnames.append(f.pathSuffix)
elif f.type == 'FILE':
filenames.append(f.pathSuffix)
else:
raise AssertionError("Unexpected type {}".format(f.type))
if topdown:
yield top, dirnames, filenames
for name in dirnames:
new_path = posixpath.join(top, name)
for x in self.walk(new_path, topdown, onerror, **kwargs):
yield x
if not topdown:
yield top, dirnames, filenames | See ``os.walk`` for documentation |
def extract_geo(self):
altitude = self.extract_altitude()
dop = self.extract_dop()
lon, lat = self.extract_lon_lat()
d = {}
if lon is not None and lat is not None:
d['latitude'] = lat
d['longitude'] = lon
if altitude is not None:
d['altitude'] = altitude
if dop is not None:
d['dop'] = dop
return d | Extract geo-related information from exif |
def to_reminders(self, ical, label=None, priority=None, tags=None,
tail=None, sep=" ", postdate=None, posttime=None):
if not hasattr(ical, 'vevent_list'):
return ''
reminders = [self.to_remind(vevent, label, priority, tags, tail, sep,
postdate, posttime)
for vevent in ical.vevent_list]
return ''.join(reminders) | Return Remind commands for all events of a iCalendar |
def connect(self, mode='default_reset'):
print('Connecting...', end='')
sys.stdout.flush()
last_error = None
try:
for _ in range(7):
last_error = self._connect_attempt(mode=mode, esp32r0_delay=False)
if last_error is None:
return
last_error = self._connect_attempt(mode=mode, esp32r0_delay=True)
if last_error is None:
return
finally:
print('')
raise FatalError('Failed to connect to %s: %s' % (self.CHIP_NAME, last_error)) | Try connecting repeatedly until successful, or giving up |
def _flds_append(flds, addthese, dont_add):
for fld in addthese:
if fld not in flds and fld not in dont_add:
flds.append(fld) | Retain order of fields as we add them once to the list. |
def auth(username, password):
_info = __get_connection_info()
if _info is None:
return False
try:
conn = MySQLdb.connect(_info['hostname'],
_info['username'],
_info['password'],
_info['database'])
except OperationalError as e:
log.error(e)
return False
cur = conn.cursor()
cur.execute(_info['auth_sql'].format(username, password))
if cur.rowcount == 1:
return True
return False | Authenticate using a MySQL user table |
def visit_assignname(self, node, parent, node_name=None):
newnode = nodes.AssignName(
node_name,
getattr(node, "lineno", None),
getattr(node, "col_offset", None),
parent,
)
self._save_assignment(newnode)
return newnode | visit a node and return a AssignName node |
def process(self):
assert self.queue is not None
while True:
event = self.queue.get()
if self.pre_process_event(event):
self.invoke_handlers(event)
self.queue.task_done() | Loops over the underlying queue of events and processes them in order. |
def _run_command(self, packet):
create_temp_cli = self.client_states is None
if create_temp_cli:
pane_id = int(packet['pane_id'])
self._create_app()
with set_app(self.client_state.app):
self.pymux.arrangement.set_active_window_from_pane_id(pane_id)
with set_app(self.client_state.app):
try:
self.pymux.handle_command(packet['data'])
finally:
self._close_connection() | Execute a run command from the client. |
def search(self, keyword, children=None, arg=None):
if children is None:
children = self.substmts
return [ ch for ch in children
if (ch.keyword == keyword and
(arg is None or ch.arg == arg))] | Return list of receiver's substmts with `keyword`. |
def fetch_all(self, R, depth=1, **kwargs):
"Request multiple objects from API"
d, e = self._fetcher.fetch_all(R, depth, kwargs)
if e: raise e
return d | Request multiple objects from API |
def enb64_app(parser, cmd, args):
parser.add_argument('value', help='the value to base64 encode, read from stdin if omitted', nargs='?')
args = parser.parse_args(args)
return enb64(pwnypack.main.binary_value_or_stdin(args.value)) | base64 encode a value. |
def open(self):
if self.protocol==0:
_in = open(self.name)
else:
_in = open(self.name,'rb')
self.fields = cPickle.load(_in)
self.next_id = cPickle.load(_in)
self.records = cPickle.load(_in)
self.indices = cPickle.load(_in)
for f in self.indices.keys():
setattr(self,'_'+f,Index(self,f))
_in.close()
self.mode = "open"
return self | Open an existing database and load its content into memory |
def init_sentry(self,):
if not self.use_sentry:
return
sentry_config = self.keychain.get_service("sentry")
tags = {
"repo": self.repo_name,
"branch": self.repo_branch,
"commit": self.repo_commit,
"cci version": cumulusci.__version__,
}
tags.update(self.config.get("sentry_tags", {}))
env = self.config.get("sentry_environment", "CumulusCI CLI")
self.sentry = raven.Client(
dsn=sentry_config.dsn,
environment=env,
tags=tags,
processors=("raven.processors.SanitizePasswordsProcessor",),
) | Initializes sentry.io error logging for this session |
def roll(rest, nick):
"Roll a die, default = 100."
if rest:
rest = rest.strip()
die = int(rest)
else:
die = 100
myroll = random.randint(1, die)
return "%s rolls %s" % (nick, myroll) | Roll a die, default = 100. |
def launch_ssh(self, name, port):
if options.user:
name = '%s@%s' % (options.user, name)
evaluated = options.ssh % {'host': name, 'port': port}
if evaluated == options.ssh:
evaluated = '%s %s' % (evaluated, name)
os.execlp('/bin/sh', 'sh', '-c', evaluated) | Launch the ssh command in the child process |
def _set_token(self):
try:
self.token = os.environ['CERBERUS_TOKEN']
if self.verbose:
print("Overriding Cerberus token with environment variable.", file=sys.stderr)
logger.info("Overriding Cerberus token with environment variable.")
return
except:
pass
if self.username:
ua = UserAuth(self.cerberus_url, self.username, self.password)
self.token = ua.get_token()
else:
awsa = AWSAuth(self.cerberus_url, region=self.region, aws_session=self.aws_session, verbose=self.verbose)
self.token = awsa.get_token() | Set the Cerberus token based on auth type |
def metadata_sorter(x, y):
if x == y:
return 0
if x in METADATA_SORTER_FIRST and y in METADATA_SORTER_FIRST:
return -1 if METADATA_SORTER_FIRST.index(x) < METADATA_SORTER_FIRST.index(y) else 1
elif x in METADATA_SORTER_FIRST:
return -1
elif y in METADATA_SORTER_FIRST:
return 1
else:
if x.startswith('_') and y.startswith('_'):
return cmp(x[1:], y[1:])
elif x.startswith('_'):
return 1
elif y.startswith('_'):
return -1
else:
return cmp(x, y) | Sort metadata keys by priority. |
async def get_source_list(self, scheme: str = "") -> List[Source]:
res = await self.services["avContent"]["getSourceList"](scheme=scheme)
return [Source.make(**x) for x in res] | Return available sources for playback. |
def information(filename):
check_if_this_file_exist(filename)
filename = os.path.abspath(filename)
result = get_json(filename)
result = result[0]
return result | Returns the file exif |
def _send_flow(self, active):
args = Writer()
args.write_bit(active)
self.send_frame(MethodFrame(self.channel_id, 20, 20, args))
self.channel.add_synchronous_cb(self._recv_flow_ok) | Send a flow control command. |
def iter_transport_opts(opts):
transports = set()
for transport, opts_overrides in six.iteritems(opts.get('transport_opts', {})):
t_opts = dict(opts)
t_opts.update(opts_overrides)
t_opts['transport'] = transport
transports.add(transport)
yield transport, t_opts
if opts['transport'] not in transports:
yield opts['transport'], opts | Yield transport, opts for all master configured transports |
def grep_file(query, item):
return ['%s: %s' % (item, line) for line in open(item)
if re.search(query, line)] | This function performs the actual grep on a given file. |
def _create_info_struct(file, mode, samplerate, channels,
format, subtype, endian):
original_format = format
if format is None:
format = _get_format_from_filename(file, mode)
assert isinstance(format, (_unicode, str))
else:
_check_format(format)
info = _ffi.new("SF_INFO*")
if 'r' not in mode or format.upper() == 'RAW':
if samplerate is None:
raise TypeError("samplerate must be specified")
info.samplerate = samplerate
if channels is None:
raise TypeError("channels must be specified")
info.channels = channels
info.format = _format_int(format, subtype, endian)
else:
if any(arg is not None for arg in (
samplerate, channels, original_format, subtype, endian)):
raise TypeError("Not allowed for existing files (except 'RAW'): "
"samplerate, channels, format, subtype, endian")
return info | Check arguments and create SF_INFO struct. |
def event_table(events):
table = formatting.Table([
"Id",
"Start Date",
"End Date",
"Subject",
"Status",
"Acknowledged",
"Updates",
"Impacted Resources"
], title="Upcoming Events")
table.align['Subject'] = 'l'
table.align['Impacted Resources'] = 'l'
for event in events:
table.add_row([
event.get('id'),
utils.clean_time(event.get('startDate')),
utils.clean_time(event.get('endDate')),
utils.clean_splitlines(event.get('subject')),
utils.lookup(event, 'statusCode', 'name'),
event.get('acknowledgedFlag'),
event.get('updateCount'),
event.get('impactedResourceCount')
])
return table | Formats a table for events |
def alter_targets(self):
if self.is_derived():
return [], None
return self.fs.variant_dir_target_climb(self, self.dir, [self.name]) | Return any corresponding targets in a variant directory. |
def insert_tree(self, items, node, headers):
first = items[0]
child = node.get_child(first)
if child is not None:
child.count += 1
else:
child = node.add_child(first)
if headers[first] is None:
headers[first] = child
else:
current = headers[first]
while current.link is not None:
current = current.link
current.link = child
remaining_items = items[1:]
if len(remaining_items) > 0:
self.insert_tree(remaining_items, child, headers) | Recursively grow FP tree. |
def _parse_qualimap_coverage(table):
out = {}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
if col == "Mean":
out["Coverage (Mean)"] = val
return out | Parse summary qualimap coverage metrics. |
def gatk_indel_realignment_cl(runner, align_bam, ref_file, intervals,
tmp_dir, region=None, deep_coverage=False,
known_vrns=None):
if not known_vrns:
known_vrns = {}
params = ["-T", "IndelRealigner",
"-I", align_bam,
"-R", ref_file,
"-targetIntervals", intervals,
]
if region:
params += ["-L", region]
if known_vrns.get("train_indels"):
params += ["--knownAlleles", known_vrns["train_indels"]]
if deep_coverage:
params += ["--maxReadsInMemory", "300000",
"--maxReadsForRealignment", str(int(5e5)),
"--maxReadsForConsensuses", "500",
"--maxConsensuses", "100"]
return runner.cl_gatk(params, tmp_dir) | Prepare input arguments for GATK indel realignment. |
def initauth(self):
headers = {'User-agent': 'CLAMClientAPI-' + clam.common.data.VERSION}
if self.oauth:
if not self.oauth_access_token:
r = requests.get(self.url,headers=headers, verify=self.verify)
if r.status_code == 404:
raise clam.common.data.NotFound("Authorization provider not found")
elif r.status_code == 403:
raise clam.common.data.PermissionDenied("Authorization provider denies access")
elif not (r.status_code >= 200 and r.status_code <= 299):
raise Exception("An error occured, return code " + str(r.status_code))
data = self._parse(r.text)
if data is True:
raise Exception("No access token provided, but Authorization Provider requires manual user input. Unable to authenticate automatically. Obtain an access token from " + r.geturl())
else:
self.oauth_access_token = data.oauth_access_token
headers['Authorization'] = 'Bearer ' + self.oauth_access_token
return headers | Initialise authentication, for internal use |
def remove_namespaces(self, rpc_reply):
self.__xslt=self.__transform_reply
self.__parser = etree.XMLParser(remove_blank_text=True)
self.__xslt_doc = etree.parse(io.BytesIO(self.__xslt), self.__parser)
self.__transform = etree.XSLT(self.__xslt_doc)
self.__root = etree.fromstring(str(self.__transform(etree.parse(StringIO(str(rpc_reply))))))
return self.__root | remove xmlns attributes from rpc reply |
def init_from_datastore(self):
self._data = {}
client = self._datastore_client
for entity in client.query_fetch(kind=KIND_CLASSIFICATION_BATCH):
class_batch_id = entity.key.flat_path[-1]
self.data[class_batch_id] = dict(entity) | Initializes data by reading it from the datastore. |
def RNN_step(weights, gates):
def rnn_step_fwd(prevstate_inputs, drop=0.0):
prevstate, inputs = prevstate_inputs
cell_tm1, hidden_tm1 = prevstate
acts, bp_acts = weights.begin_update((inputs, hidden_tm1), drop=drop)
(cells, hiddens), bp_gates = gates.begin_update((acts, cell_tm1), drop=drop)
def rnn_step_bwd(d_state_d_hiddens, sgd=None):
(d_cells, d_hiddens), d_hiddens = d_state_d_hiddens
d_acts, d_cell_tm1 = bp_gates((d_cells, d_hiddens), sgd=sgd)
d_inputs, d_hidden_tm1 = bp_acts(d_acts, sgd=sgd)
return (d_cell_tm1, d_hidden_tm1), d_inputs
return ((cells, hiddens), hiddens), rnn_step_bwd
model = wrap(rnn_step_fwd, weights, gates)
model.nO = weights.nO
model.nI = weights.nI
model.weights = weights
model.gates = gates
return model | Create a step model for an RNN, given weights and gates functions. |
def getCurrentFadeColor(self, bBackground):
fn = self.function_table.getCurrentFadeColor
result = fn(bBackground)
return result | Get current fade color value. |
def _flush(self):
self._buffer.sort()
self._replace_batch()
self._buffer = []
self._compress() | Purges the buffer and commits all pending values into the estimator. |
def error(self, i: int=None) -> str:
head = "[" + colors.red("error") + "]"
if i is not None:
head = str(i) + " " + head
return head | Returns an error message |
def _buildExecutor(self):
info = addict.Dict()
info.name = "toil"
info.command.value = resolveEntryPoint('_toil_mesos_executor')
info.executor_id.value = "toil-%i" % os.getpid()
info.source = pwd.getpwuid(os.getuid()).pw_name
return info | Creates and returns an ExecutorInfo-shaped object representing our executor implementation. |
def register(self, cls, instance):
if not issubclass(cls, DropletInterface):
raise TypeError('Given class is not a NAZInterface subclass: %s'
% cls)
if not isinstance(instance, cls):
raise TypeError('Given instance does not implement the class: %s'
% instance)
if instance.name in self.INSTANCES_BY_NAME:
if self.INSTANCES_BY_NAME[instance.name] != instance:
raise ValueError('Given name is registered '
'by other instance: %s' % instance.name)
self.INSTANCES_BY_INTERFACE[cls].add(instance)
self.INSTANCES_BY_NAME[instance.name] = instance | Register the given instance as implementation for a class interface |
def _limit_call_handler(self):
with self.limit_lock:
if self.limit_per_min <= 0:
return
now = time.time()
self.limits = [l for l in self.limits if l > now]
self.limits.append(now + 60)
if len(self.limits) >= self.limit_per_min:
time.sleep(self.limits[0] - now) | Ensure we don't exceed the N requests a minute limit by leveraging a thread lock |
def _notify_remove_at(self, index, length=1):
slice_ = self._slice_at(index, length)
self._notify_remove(slice_) | Notify about an RemoveChange at a caertain index and length. |
def digest_packets(packets, hasher):
data_to_hash = io.BytesIO()
for p in packets:
data_to_hash.write(p['_to_hash'])
hasher.update(data_to_hash.getvalue())
return hasher.digest() | Compute digest on specified packets, according to '_to_hash' field. |
def re_run_file(self):
if self.get_option('save_all_before_run'):
self.save_all()
if self.__last_ec_exec is None:
return
(fname, wdir, args, interact, debug,
python, python_args, current, systerm,
post_mortem, clear_namespace) = self.__last_ec_exec
if not systerm:
self.run_in_current_ipyclient.emit(fname, wdir, args,
debug, post_mortem,
current, clear_namespace)
else:
self.main.open_external_console(fname, wdir, args, interact,
debug, python, python_args,
systerm, post_mortem) | Re-run last script |
def step(self):
current_state = max(self.infos(type=State),
key=attrgetter('creation_time'))
current_contents = float(current_state.contents)
new_contents = 1 - current_contents
info_out = State(origin=self, contents=new_contents)
transformations.Mutation(info_in=current_state, info_out=info_out) | Prompt the environment to change. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.