code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def update(target, path, value):
names = path.split(".")
names_length = len(names)
node = target
for index in range(names_length):
name = names[index]
if index == names_length - 1:
last = True
else:
last = False
if isinstance(node, dict):
if last:
node[name] = value
return
else:
if not name in node:
node[name] = {}
node = node[name]
elif isinstance(node, list):
name = int(name)
listpad(node, name+1)
if last:
node[name] = value
return
else:
node[name] = {}
node = node[name]
else:
if last:
setattr(node, name, value)
else:
setattr(node, name, {})
node = getattr(node, name) | Update item in path of target with given value. |
def pull(self, *, index=None):
item = self.__list.pop(index)
name = getattr(item, 'name', None)
if name is not None:
del self.__dict[name]
return item | Pull item from the chain. |
def destroy_volume(agent, role):
volumes = []
agent_id = agent['id']
reserved_resources_full = agent.get('reserved_resources_full', None)
if not reserved_resources_full:
return True
reserved_resources = reserved_resources_full.get(role, None)
if not reserved_resources:
return True
for reserved_resource in reserved_resources:
name = reserved_resource.get('name', None)
disk = reserved_resource.get('disk', None)
if name == 'disk' and disk is not None and 'persistence' in disk:
volumes.append(reserved_resource)
req_url = urljoin(master_url(), 'destroy-volumes')
data = {
'slaveId': agent_id,
'volumes': json.dumps(volumes)
}
success = False
try:
response = http.post(req_url, data=data)
success = 200 <= response.status_code < 300
if response.status_code == 409:
print()
except DCOSHTTPException as e:
print("HTTP {}: Unabled to delete volume based on: {}".format(
e.response.status_code,
e.response.text))
return success | Deletes the volumes on the specific agent for the role |
def rolling_percentileofscore(series, window, min_periods=None):
import scipy.stats as stats
def _percentile(arr):
score = arr[-1]
vals = arr[:-1]
return stats.percentileofscore(vals, score)
notnull = series.dropna()
min_periods = min_periods or window
if notnull.empty:
return pd.Series(np.nan, index=series.index)
else:
return pd.rolling_apply(notnull, window, _percentile, min_periods=min_periods).reindex(series.index) | Computue the score percentile for the specified window. |
def batch_per(hyps: Sequence[Sequence[T]],
refs: Sequence[Sequence[T]]) -> float:
macro_per = 0.0
for i in range(len(hyps)):
ref = [phn_i for phn_i in refs[i] if phn_i != 0]
hyp = [phn_i for phn_i in hyps[i] if phn_i != 0]
macro_per += distance.edit_distance(ref, hyp)/len(ref)
return macro_per/len(hyps) | Calculates the phoneme error rate of a batch. |
def convert_weights_and_inputs(node, **kwargs):
name, _, _ = get_inputs(node, kwargs)
if kwargs["is_input"] is False:
weights = kwargs["weights"]
initializer = kwargs["initializer"]
np_arr = weights[name]
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np_arr.dtype]
dims = np.shape(np_arr)
tensor_node = onnx.helper.make_tensor_value_info(name, data_type, dims)
initializer.append(
onnx.helper.make_tensor(
name=name,
data_type=data_type,
dims=dims,
vals=np_arr.flatten().tolist(),
raw=False,
)
)
return [tensor_node]
else:
tval_node = onnx.helper.make_tensor_value_info(name, kwargs["in_type"], kwargs["in_shape"])
return [tval_node] | Helper function to convert weights and inputs. |
def dump(self, fname):
url = '%s/v1/calc/%d/datastore' % (self.server, self.calc_id)
resp = self.sess.get(url, stream=True)
down = 0
with open(fname, 'wb') as f:
logging.info('Saving %s', fname)
for chunk in resp.iter_content(CHUNKSIZE):
f.write(chunk)
down += len(chunk)
println('Downloaded {:,} bytes'.format(down))
print() | Dump the remote datastore on a local path. |
def get(self, id, no_summary=False):
resp = self.client.accounts.get(id)
if no_summary:
return self.display(resp)
results = []
client = LunrClient(self.get_admin(), debug=self.debug)
volumes = client.volumes.list(account_id=resp['id'])
for volume in volumes:
if volume['status'] == 'DELETED':
continue
results.append(volume)
self.display(resp, ['name', 'status', 'last_modified', 'created_at'])
if results:
return self.display(response(results, 200),
['id', 'status', 'size'])
else:
print("-- This account has no active volumes --")
print("\nThis is a summary, use --no-summary "
"to see the entire response") | List details for a specific tenant id |
def preview(klass, account, **kwargs):
params = {}
params.update(kwargs)
if 'media_ids' in params and isinstance(params['media_ids'], list):
params['media_ids'] = ','.join(map(str, params['media_ids']))
resource = klass.TWEET_ID_PREVIEW if params.get('id') else klass.TWEET_PREVIEW
resource = resource.format(account_id=account.id, id=params.get('id'))
response = Request(account.client, 'get', resource, params=params).perform()
return response.body['data'] | Returns an HTML preview of a tweet, either new or existing. |
def complete_rules(rules, cmd):
if not isinstance(rules, list):
rules = [rules]
ret = []
for r in rules:
ret += complete_rule(r, cmd)
return ret | complete using a list of completion rules |
def _create_initstate_and_embeddings(self):
self._init_state = self.cell.zero_state(self.batch_size, tf.float32)
embedding = tf.get_variable(
"embedding", [self.vocab_size, self.num_hidden])
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
self.inputs = tf.nn.dropout(inputs, self.dropout) | Create the initial state for the cell and the data embeddings. |
def list_id(self):
list_id = getattr(self, '_list_id', None)
if list_id is None:
for l in self.api.lists.all()['lists']:
if l['name'] == self.list_name:
self._list_id = l['id']
if not getattr(self, '_list_id', None):
self._list_id = self.api.lists.create(
label=self.list_label, name=self.list_name,
method='POST')['list_id']
return self._list_id | Get or create the list id. |
def prompt(default=None):
editor = 'nano'
with tempfile.NamedTemporaryFile(mode='r+') as tmpfile:
if default:
tmpfile.write(default)
tmpfile.flush()
child_pid = os.fork()
is_child = child_pid == 0
if is_child:
os.execvp(editor, [editor, tmpfile.name])
else:
os.waitpid(child_pid, 0)
tmpfile.seek(0)
return tmpfile.read().strip() | Present the user a prompt. |
def generate(env):
global PDFTeXAction
if PDFTeXAction is None:
PDFTeXAction = SCons.Action.Action('$PDFTEXCOM', '$PDFTEXCOMSTR')
global PDFLaTeXAction
if PDFLaTeXAction is None:
PDFLaTeXAction = SCons.Action.Action("$PDFLATEXCOM", "$PDFLATEXCOMSTR")
global PDFTeXLaTeXAction
if PDFTeXLaTeXAction is None:
PDFTeXLaTeXAction = SCons.Action.Action(PDFTeXLaTeXFunction,
strfunction=SCons.Tool.tex.TeXLaTeXStrFunction)
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
from . import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.tex', PDFTeXLaTeXAction)
bld.add_emitter('.tex', SCons.Tool.tex.tex_pdf_emitter)
pdf.generate2(env)
SCons.Tool.tex.generate_common(env) | Add Builders and construction variables for pdftex to an Environment. |
def _validate_sections(cls, sections):
names = []
for section in sections:
if not hasattr(section, 'name'):
raise ConfigurationError('`sections` attribute requires a list of Section')
name = section.name
if name in names:
raise ConfigurationError('`%s` section name must be unique' % name)
names.append(name) | Validates sections types and uniqueness. |
def parse_modeString(s):
refresh, width, height = None, None, None
if '@' in s:
s, refresh = s.split('@', 1)
refresh = int(refresh)
if 'x' in s:
width, height = [int(x) if x.strip() else None
for x in s.split('x', 1)]
elif s.strip():
width = int(s)
else:
width = None
return (width, height, refresh) | Parses a modeString like '1024 x 768 @ 60' |
def xrdb(xrdb_files=None):
xrdb_files = xrdb_files or \
[os.path.join(CACHE_DIR, "colors.Xresources")]
if shutil.which("xrdb") and OS != "Darwin":
for file in xrdb_files:
subprocess.run(["xrdb", "-merge", "-quiet", file]) | Merge the colors into the X db so new terminals use them. |
def _get_pathcost_func(
name: str
) -> Callable[[int, int, int, int, Any], float]:
return ffi.cast(
"TCOD_path_func_t", ffi.addressof(lib, name)
) | Return a properly cast PathCostArray callback. |
def SheetList(name, src, **kwargs):
'Creates a Sheet from a list of homogenous dicts or namedtuples.'
if not src:
status('no content in ' + name)
return
if isinstance(src[0], dict):
return ListOfDictSheet(name, source=src, **kwargs)
elif isinstance(src[0], tuple):
if getattr(src[0], '_fields', None):
return ListOfNamedTupleSheet(name, source=src, **kwargs)
return ListOfPyobjSheet(name, source=src, **kwargs) | Creates a Sheet from a list of homogenous dicts or namedtuples. |
def find_xenon_grpc_jar():
prefix = Path(sys.prefix)
locations = [
prefix / 'lib',
prefix / 'local' / 'lib'
]
for location in locations:
jar_file = location / 'xenon-grpc-{}-all.jar'.format(
xenon_grpc_version)
if not jar_file.exists():
continue
else:
return str(jar_file)
return None | Find the Xenon-GRPC jar-file, windows version. |
def accept( self ):
if ( not self.uiNameTXT.text() ):
QMessageBox.information(self,
'Invalid Name',
'You need to supply a name for your layout.')
return
prof = self.profile()
if ( not prof ):
prof = XViewProfile()
prof.setName(nativestring(self.uiNameTXT.text()))
prof.setVersion(self.uiVersionSPN.value())
prof.setDescription(nativestring(self.uiDescriptionTXT.toPlainText()))
prof.setIcon(self.uiIconBTN.filepath())
super(XViewProfileDialog, self).accept() | Saves the data to the profile before closing. |
def main():
source_lines = (line.rstrip() for line in sys.stdin)
console = InteractiveInterpreter()
console.runsource('import turicreate')
source = ''
try:
while True:
source = source_lines.next()
more = console.runsource(source)
while more:
next_line = source_lines.next()
print '...', next_line
source += '\n' + next_line
more = console.runsource(source)
except StopIteration:
if more:
print '... '
more = console.runsource(source + '\n') | Print lines of input along with output. |
def best_fts_version():
"Discovers the most advanced supported SQLite FTS version"
conn = sqlite3.connect(":memory:")
for fts in ("FTS5", "FTS4", "FTS3"):
try:
conn.execute("CREATE VIRTUAL TABLE v USING {} (t TEXT);".format(fts))
return fts
except sqlite3.OperationalError:
continue
return None | Discovers the most advanced supported SQLite FTS version |
def disconnect(self):
self._state_mutex.acquire()
self._state = mqtt_cs_disconnecting
self._state_mutex.release()
self._backoffCore.stopStableConnectionTimer()
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
return self._send_disconnect() | Disconnect a connected client from the broker. |
def frequency_measurement(shell_ctx):
freq = update_frequency(shell_ctx)
count = 0
base = datetime.datetime.utcnow()
date_list = [base - datetime.timedelta(days=x) for x in range(0, DAYS_AGO)]
for day in date_list:
count += 1 if freq.get(day_format(day), 0) > 0 else 0
return count | measures how many times a user has used this program in the last calendar week |
def check_running(self, role, number):
instances = self.get_instances_in_role(role, "running")
if len(instances) != number:
print "Expected %s instances in role %s, but was %s %s" % \
(number, role, len(instances), instances)
return False
else:
return instances | Check that a certain number of instances in a role are running. |
def Wait(self):
time.sleep(self.sleep_time - int(self.sleep_time))
for _ in range(int(self.sleep_time)):
time.sleep(1)
self.sleep_time = min(self.poll_max,
max(self.poll_min, self.sleep_time) * self.poll_slew) | Wait until the next action is needed. |
def on_config_inited(app, config):
extension_paths = config["uqbar_book_extensions"] or [
"uqbar.book.extensions.GraphExtension"
]
app.uqbar_book_extensions = []
for extension_path in extension_paths:
module_name, _, class_name = extension_path.rpartition(".")
module = importlib.import_module(module_name)
extension_class = getattr(module, class_name)
extension_class.setup_sphinx(app)
app.uqbar_book_extensions.append(extension_class) | Hooks into Sphinx's ``config-inited`` event. |
def __expr_str(cls, expr, level):
ident = ' ' * level * 4
if isinstance(expr, tuple):
return '{}{}'.format(ident, str(expr))
if expr.etype[0] in ['pvar', 'constant']:
return '{}Expression(etype={}, args={})'.format(ident, expr.etype, expr.args)
if not isinstance(expr, Expression):
return '{}{}'.format(ident, str(expr))
args = list(cls.__expr_str(arg, level + 1) for arg in expr.args)
args = '\n'.join(args)
return '{}Expression(etype={}, args=\n{})'.format(ident, expr.etype, args) | Returns string representing the expression. |
def Nstars(self):
if self._Nstars is None:
N = {}
for n in self.get_model_nodes():
if n.index not in N:
N[n.index] = 1
else:
N[n.index] += 1
self._Nstars = N
return self._Nstars | dictionary of number of stars per system |
def read_dir(directory):
content = dir_list(directory)
text = ''
for filename in content:
text += read_file(directory + '/' + filename)
text += ' '
return text | Returns the text of all files in a directory. |
def gather_command_line_options(filter_disabled=None):
if filter_disabled is None:
filter_disabled = not SETTINGS.COMMAND_LINE.SHOW_DISABLED_OPTIONS
options = [opt for opt in get_inheritors(CommandLineOption)
if not filter_disabled or opt._enabled]
return sorted(options, key=lambda opt: opt.__name__) | Get a sorted list of all CommandLineOption subclasses. |
def gev_expval(xi, mu=0, sigma=1):
return mu - (sigma / xi) + (sigma / xi) * flib.gamfun(1 - xi) | Expected value of generalized extreme value distribution. |
def _log_message(self, level, freerun_entry, msg):
self.logger.log(level, msg)
assert isinstance(freerun_entry, FreerunProcessEntry)
event_log = freerun_entry.event_log
if len(event_log) > MAX_NUMBER_OF_EVENTS:
del event_log[-1]
event_log.insert(0, msg)
self.freerun_process_dao.update(freerun_entry) | method performs logging into log file and the freerun_entry |
def buildMessage(headers, parts):
message = multipart.MIMEMultipart('alternative')
for name, value in headers.iteritems():
name = name.title()
if name == "From":
multipart[name] = _encodeAddress(value)
elif name in ["To", "Cc", "Bcc"]:
multipart[name] = _encodeAddresses(value)
else:
multipart[name] = _encodeHeader(value)
for partType, part in parts.iteritems():
mimeText = text.MIMEText(part.encode("utf-8"), partType, "UTF-8")
message.attach(mimeText.encode())
return message | Builds a message from some headers and MIME parts. |
def delete_rule(self, rule_id):
if rule_id not in self.rules:
LOG.error("No Rule id present for deleting %s", rule_id)
return
del self.rules[rule_id]
self.rule_cnt -= 1 | Delete the specific Rule from dictionary indexed by rule id. |
def normalize_cpp_function(self, function, line):
for ref in ('const', 'const&', '&&', '&'):
if function.endswith(ref):
function = function[:-len(ref)].strip()
if '::operator' not in function:
function = drop_prefix_and_return_type(function)
function = collapse(
function,
open_string='<',
close_string='>',
replacement='<T>',
exceptions=('name omitted', 'IPC::ParamTraits')
)
if self.collapse_arguments:
function = collapse(
function,
open_string='(',
close_string=')',
replacement='',
exceptions=('anonymous namespace', 'operator')
)
if 'clone .cold' in function:
function = collapse(
function,
open_string='[',
close_string=']',
replacement=''
)
if self.signatures_with_line_numbers_re.match(function):
function = '{}:{}'.format(function, line)
function = self.fixup_space.sub('', function)
function = self.fixup_comma.sub(', ', function)
return function | Normalizes a single cpp frame with a function |
def bandwidth_factor(nbr_data_pts, deriv_order=0):
if deriv_order == 0:
return (3.0*nbr_data_pts/4)**(-1.0/5)
if deriv_order == 2:
return (7.0*nbr_data_pts/4)**(-1.0/9)
raise ValueError('Not implemented for derivative of order {}'.format(deriv_order)) | Scale factor for one-dimensional plug-in bandwidth selection. |
def name_to_system_object(self, value):
if not self.system:
raise SystemNotReady
if isinstance(value, (str, Object)):
rv = self.system.name_to_system_object(value)
return rv if rv else value
else:
return value | Return object for given name registered in System namespace. |
def positive_report(binary_report, sha256hash, project, patch_file):
failure = True
report_url = binary_report['permalink']
scan_date = binary_report['scan_date']
logger.error("Virus Found!")
logger.info('File scan date for %s shows a infected status on: %s', patch_file, scan_date)
logger.info('Full report avaliable here: %s', report_url) | If a Positive match is found |
def _flatten(l):
res = []
for sublist in l:
if isinstance(sublist, whaaaaat.Separator):
res.append(sublist)
else:
for item in sublist:
res.append(item)
return res | helper to flatten a list of lists |
def load_model_from_link(name, **overrides):
path = get_data_path() / name / "__init__.py"
try:
cls = import_file(name, path)
except AttributeError:
raise IOError(Errors.E051.format(name=name))
return cls.load(**overrides) | Load a model from a shortcut link, or directory in spaCy data path. |
def handle_presence(self, old_present):
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present.clear()
old_present.update(present) | Fire presence events if enabled |
def _set_query_data_fast_2(self, page):
self.data['pageid'] = page.get('pageid')
redirects = page.get('redirects')
if redirects:
self.data['redirects'] = redirects
terms = page.get('terms')
if terms:
if terms.get('alias'):
self.data['aliases'] = terms['alias']
if terms.get('description'):
self.data['description'] = next(iter(terms['description']),
None)
if terms.get('label'):
self.data['label'] = next(iter(terms['label']), None)
title = page.get('title')
self.data['title'] = title
if not self.params.get('title'):
self.params['title'] = title
watchers = page.get('watchers')
if watchers:
self.data['watchers'] = watchers
self._set_query_image(page) | set less expensive action=query response data PART 2 |
def circles_pycairo(width, height, color):
cairo_color = color / rgb(255, 255, 255)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
ctx = cairo.Context(surface)
ctx.new_path()
ctx.set_source_rgb(cairo_color.red, cairo_color.green, cairo_color.blue)
ctx.arc(width / 2, height / 2, width / 2, 0, 2 * pi)
ctx.fill()
surface.write_to_png('circles.png') | Implementation of circle border with PyCairo. |
def plan_to_assignment(plan):
assignment = {}
for elem in plan['partitions']:
assignment[
(elem['topic'], elem['partition'])
] = elem['replicas']
return assignment | Convert the plan to the format used by cluster-topology. |
def index(self, *args, **kwargs):
self.check_session()
result = self.session.index(*args, **kwargs)
if self.autosession:
self.commit()
return result | Index documents, in the current session |
def extras_to_string(extras):
if isinstance(extras, six.string_types):
if extras.startswith("["):
return extras
else:
extras = [extras]
if not extras:
return ""
return "[{0}]".format(",".join(sorted(set(extras)))) | Turn a list of extras into a string |
def handle_lease(queue_name):
owner = request.form.get('owner', request.remote_addr, type=str)
try:
task_list = work_queue.lease(
queue_name,
owner,
request.form.get('count', 1, type=int),
request.form.get('timeout', 60, type=int))
except work_queue.Error, e:
return utils.jsonify_error(e)
if not task_list:
return flask.jsonify(tasks=[])
db.session.commit()
task_ids = [t['task_id'] for t in task_list]
logging.debug('Task leased: queue=%r, task_ids=%r, owner=%r',
queue_name, task_ids, owner)
return flask.jsonify(tasks=task_list) | Leases a task from a queue. |
def generic(self, input_string, **kwargs):
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
return input_string.format(**kwargs_copy) | return a generic filename for a given dataset and component |
def send_wp_requests(self, wps=None):
if wps is None:
wps = self.missing_wps_to_request()
tnow = time.time()
for seq in wps:
self.wp_requested[seq] = tnow
self.master.waypoint_request_send(seq) | send some more WP requests |
def checkrange(bch, fieldname):
fieldvalue = bch[fieldname]
therange = bch.getrange(fieldname)
if therange['maximum'] != None:
if fieldvalue > therange['maximum']:
astr = "Value %s is not less or equal to the 'maximum' of %s"
astr = astr % (fieldvalue, therange['maximum'])
raise RangeError(astr)
if therange['minimum'] != None:
if fieldvalue < therange['minimum']:
astr = "Value %s is not greater or equal to the 'minimum' of %s"
astr = astr % (fieldvalue, therange['minimum'])
raise RangeError(astr)
if therange['maximum<'] != None:
if fieldvalue >= therange['maximum<']:
astr = "Value %s is not less than the 'maximum<' of %s"
astr = astr % (fieldvalue, therange['maximum<'])
raise RangeError(astr)
if therange['minimum>'] != None:
if fieldvalue <= therange['minimum>']:
astr = "Value %s is not greater than the 'minimum>' of %s"
astr = astr % (fieldvalue, therange['minimum>'])
raise RangeError(astr)
return fieldvalue | throw exception if the out of range |
def __get_lowpoints(node, dfs_data):
ordering_lookup = dfs_data['ordering_lookup']
t_u = T(node, dfs_data)
sorted_t_u = sorted(t_u, key=lambda a: ordering_lookup[a])
lowpoint_1 = sorted_t_u[0]
lowpoint_2 = sorted_t_u[1]
return lowpoint_1, lowpoint_2 | Calculates the lowpoints for a single node in a graph. |
def extract_sort(self, params):
sorts = params.pop('sort', [])
sorts = [sorts] if isinstance(sorts, basestring) else sorts
sorts = [(s[1:], 'desc')
if s.startswith('-') else (s, 'asc')
for s in sorts]
self.sorts = [
{self.adapter.sorts[s]: d}
for s, d in sorts if s in self.adapter.sorts
] | Extract and build sort query from parameters |
def cli(env, group_id, name, description):
mgr = SoftLayer.NetworkManager(env.client)
data = {}
if name:
data['name'] = name
if description:
data['description'] = description
if not mgr.edit_securitygroup(group_id, **data):
raise exceptions.CLIAbort("Failed to edit security group") | Edit details of a security group. |
def remove_files(self, common_name, delete_dir=False):
record = self.remove_record(common_name)
if delete_dir:
delete_dirs = []
if 'files' in record:
key_containing_dir = os.path.dirname(record['files']['key'])
delete_dirs.append(key_containing_dir)
cert_containing_dir = os.path.dirname(record['files']['cert'])
if key_containing_dir != cert_containing_dir:
delete_dirs.append(cert_containing_dir)
for d in delete_dirs:
shutil.rmtree(d)
return record | Delete files and record associated with this common name |
def delete_model(self, model):
if SessionActivity.is_current(sid_s=model.sid_s):
flash('You could not remove your current session', 'error')
return
delete_session(sid_s=model.sid_s)
db.session.commit() | Delete a specific session. |
def zone_calc(surf, cortex, src):
cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex)
dist_vals = np.zeros((len(source_nodes), len(cortex_vertices)))
for x in range(len(source_nodes)):
translated_source_nodes = translate_src(source_nodes[x], cortex)
dist_vals[x, :] = gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes)
data = np.argsort(dist_vals, axis=0)[0, :] + 1
zone = recort(data, surf, cortex)
del data
return zone | Calculate closest nodes to each source node using exact geodesic distance along the cortical surface. |
def _run(self):
while self._is_running:
yield from self._task()
self._run_complete.set_result(True) | The actor's main work loop |
def click(self, jquery=False):
if jquery:
e = JQuery(self)
e.click()
else:
super(Clickable, self).click() | Click by WebElement, if not, JQuery click |
def trainable_params(m:nn.Module)->ParamList:
"Return list of trainable params in `m`."
res = filter(lambda p: p.requires_grad, m.parameters())
return res | Return list of trainable params in `m`. |
def unregister_editorstack(self, editorstack):
self.remove_last_focus_editorstack(editorstack)
if len(self.editorstacks) > 1:
index = self.editorstacks.index(editorstack)
self.editorstacks.pop(index)
return True
else:
return False | Removing editorstack only if it's not the last remaining |
def editColormap(self):
self.editor = pg.ImageView()
self.editor.ui.roiBtn.setVisible(False)
self.editor.ui.menuBtn.setVisible(False)
self.editor.setImage(self.imageArray)
if self.imgArgs['state'] is not None:
self.editor.getHistogramWidget().item.gradient.restoreState(self.imgArgs['state'])
self.editor.getHistogramWidget().item.setLevels(*self.imgArgs['levels'])
self.editor.closeEvent = self._editor_close
self.editor.setWindowModality(QtCore.Qt.ApplicationModal)
self.editor.show() | Prompts the user with a dialog to change colormap |
def get(self, id, **options):
if not self._item_path:
raise AttributeError('get is not available for %s' % self._item_name)
target = self._item_path % id
json_data = self._redmine.get(target, **options)
data = self._redmine.unwrap_json(self._item_type, json_data)
data['_source_path'] = target
return self._objectify(data=data) | Get a single item with the given ID |
def to_csv_path(graph: BELGraph, path: str, sep: Optional[str] = None) -> None:
with open(path, 'w') as file:
to_csv(graph, file, sep=sep) | Write the graph as a tab-separated edge list to a file at the given path. |
def _ensure_started(self):
if not self.started:
async_handlers = [startup_handler for startup_handler in self.startup_handlers if
introspect.is_coroutine(startup_handler)]
if async_handlers:
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*[handler(self) for handler in async_handlers], loop=loop))
for startup_handler in self.startup_handlers:
if not startup_handler in async_handlers:
startup_handler(self) | Marks the API as started and runs all startup handlers |
def closeEditor(self, editor, hint):
configItemDelegate = self.itemDelegate()
configItemDelegate.finalizeEditor(editor)
super(ConfigTreeView, self).closeEditor(editor, hint) | Finalizes, closes and releases the given editor. |
def WriteBlobToFile(self, request):
if request.offset == 0:
mode = "w+b"
else:
mode = "r+b"
temp_file = tempfiles.CreateGRRTempFile(
filename=request.write_path, mode=mode)
with temp_file:
path = temp_file.name
temp_file.seek(0, 2)
if temp_file.tell() != request.offset:
raise IOError("Chunks out of order Error.")
temp_file.write(request.executable.data)
return path | Writes the blob to a file and returns its path. |
def serialized(func):
@wraps(func)
def wrapper(*args, **kwargs):
return '\n'.join(
' '.join(parts) if isinstance(parts, tuple) else parts
for parts in func(*args, **kwargs)
)
return wrapper | Write a serializer by yielding each line of output |
def decyear2dt(t):
year = int(t)
rem = t - year
base = datetime(year, 1, 1)
dt = base + timedelta(seconds=(base.replace(year=base.year+1) - base).total_seconds() * rem)
return dt | Convert decimal year to datetime |
def bchar(posh, posv, border_style):
index = '{}{}'.format(posv, posh).lower()
return BORDER_STYLES[border_style][index] | Retrieve table border style for particular box border piece. |
def create_ssh_key(kwargs=None, call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The create_ssh_key function must be called with '
'-f or --function'
)
conn = get_conn()
ssh_key = _get_ssh_key(kwargs)
data = conn.create_ssh_key(ssh_key=ssh_key)
return {'SshKey': data} | Create an ssh key |
def error(self, message):
sys.stderr.write('error: %s\n\n' % message)
self.print_help()
sys.exit(2) | Prints error message, then help. |
def history_add(self, value):
if self._history_max_size is None or self.history_len() < self._history_max_size:
self._history.append(value)
else:
self._history = self._history[1:] + [value] | Add a value in the history |
def call_task_fn(self):
if not self.fn:
return self.log_finished()
future = asyncio.Future()
future.add_done_callback(lambda x: self.log_finished())
if inspect.iscoroutinefunction(self.fn):
f = asyncio.ensure_future(self.fn())
f.add_done_callback(lambda x: self.bind_end(x.result(), future))
else:
self.bind_end(self.fn(), future)
return future | Call the function attached to the task. |
def state_push(self):
"Save the state of the output functions, to be restored with state_pop."
for of in self.output_fns:
if hasattr(of,'state_push'):
of.state_push()
super(PatternGenerator, self).state_push() | Save the state of the output functions, to be restored with state_pop. |
def load_frame(url, skiprows):
return pd.read_csv(
url,
skiprows=skiprows,
skipinitialspace=True,
na_values=["Bank holiday", "Not available"],
parse_dates=["Date"],
index_col="Date",
).dropna(how='all') \
.tz_localize('UTC') \
.rename(columns=COLUMN_NAMES) | Load a DataFrame of data from a Bank of Canada site. |
def _set_format_scope(self, fmt):
self._prev_fmt = QtGui.QTextCharFormat(fmt)
self._prev_fmt_closed = False | Opens the format scope. |
def analysis(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [word[ANALYSIS] for word in self.words] | The list of analysis of ``words`` layer elements. |
def current_kv_names(self):
return current_kv_names(self.sci, self.username, self.appname, request=self._request) | Return set of string names of current available Splunk KV collections |
def load_waypoints(self, filename):
self.wploader.target_system = self.target_system
self.wploader.target_component = self.target_component
try:
self.wploader.load(filename)
except Exception as msg:
print("Unable to load %s - %s" % (filename, msg))
return
print("Loaded %u waypoints from %s" % (self.wploader.count(), filename))
self.send_all_waypoints() | load waypoints from a file |
def _get_username_from_userinfo(self, user_info):
subject = user_info['sub']
email = user_info['email']
if "Mozilla-LDAP" in subject:
return "mozilla-ldap/" + email
elif "email" in subject:
return "email/" + email
elif "github" in subject:
return "github/" + email
elif "google" in subject:
return "google/" + email
elif "oauth2" in subject:
return "oauth2/" + email
else:
raise AuthenticationFailed("Unrecognized identity") | Get the user's username from the jwt sub property |
def track_statistic(self, name, description='', max_rows=None):
if name in self._tables:
raise TableConflictError(name)
if max_rows is None:
max_rows = AnonymousUsageTracker.MAX_ROWS_PER_TABLE
self.register_table(name, self.uuid, 'Statistic', description)
self._tables[name] = Statistic(name, self, max_rows=max_rows) | Create a Statistic object in the Tracker. |
def _get_default_value(self, value):
if isinstance(value, QueryExpression):
return value
if isinstance(value, bool):
return "'%s'" % int(value)
return "'%s'" % value | Format a value so that it can be used in "default" clauses. |
def df_metrics_to_num(self, df, query_object):
metrics = [metric for metric in query_object.metrics]
for col, dtype in df.dtypes.items():
if dtype.type == np.object_ and col in metrics:
df[col] = pd.to_numeric(df[col], errors='coerce') | Converting metrics to numeric when pandas.read_sql cannot |
def prepare_place_layer(self):
if os.path.exists(self.input_place.text()):
self.place_layer = QgsVectorLayer(
self.input_place.text(),
tr('Nearby Cities'),
'ogr'
)
if self.place_layer.isValid():
LOGGER.debug('Get field information')
self.name_field.setLayer(self.place_layer)
self.population_field.setLayer(self.place_layer)
else:
LOGGER.debug('failed to set name field') | Action when input place layer name is changed. |
def find_databases(databases):
proteins = ['L15', 'L18', 'L6', 'S8', 'L5', 'L24', 'L14',
'S17', 'L16', 'S3', 'L22', 'S19', 'L2', 'L4', 'L3', 'S10']
protein_databases = {
'L14': 'rpL14_JGI_MDM.filtered.faa',
'L15': 'rpL15_JGI_MDM.filtered.faa',
'L16': 'rpL16_JGI_MDM.filtered.faa',
'L18': 'rpL18_JGI_MDM.filtered.faa',
'L22': 'rpL22_JGI_MDM.filtered.faa',
'L24': 'rpL24_JGI_MDM.filtered.faa',
'L2': 'rpL2_JGI_MDM.filtered.faa',
'L3': 'rpL3_JGI_MDM.filtered.faa',
'L4': 'rpL4_JGI_MDM.filtered.faa',
'L5': 'rpL5_JGI_MDM.filtered.faa',
'L6': 'rpL6_JGI_MDM.filtered.faa',
'S10': 'rpS10_JGI_MDM.filtered.faa',
'S17': 'rpS17_JGI_MDM.filtered.faa',
'S19': 'rpS19_JGI_MDM.filtered.faa',
'S3': 'rpS3_JGI_MDM.filtered.faa',
'S8': 'rpS8_JGI_MDM.filtered.faa'}
protein_databases = {key: '%s/%s' % (databases, database) \
for key, database in list(protein_databases.items())}
return proteins, protein_databases | define ribosomal proteins and location of curated databases |
def _check_cors_origin(origin, allowed_origins):
if isinstance(allowed_origins, list):
if origin in allowed_origins:
return origin
elif allowed_origins == '*':
return allowed_origins
elif allowed_origins == origin:
return allowed_origins | Check if an origin match cors allowed origins |
def similar_email(anon, obj, field, val):
return val if 'betterworks.com' in val else '@'.join([anon.faker.user_name(field=field), val.split('@')[-1]]) | Generate a random email address using the same domain. |
def save(self, collection):
assert isinstance(collection, predix.data.asset.AssetCollection), "Expected AssetCollection"
collection.validate()
self.put_collection(collection.uri, collection.__dict__) | Save an asset collection to the service. |
def _handle_read_chunk(self):
new_data = b''
buffer_length = len(self.read_buffer)
try:
while buffer_length < self.MAX_BUFFER_SIZE:
try:
piece = self.recv(4096)
except OSError as e:
if e.errno == errno.EAGAIN:
break
elif e.errno == errno.EIO and new_data:
break
else:
raise
if not piece:
break
new_data += piece
buffer_length += len(piece)
finally:
new_data = new_data.replace(b'\r', b'\n')
self.read_buffer += new_data
return new_data | Some data can be read |
def computeAccuracyEnding(predictions, truths, iterations,
resets=None, randoms=None, num=None,
sequenceCounter=None):
accuracy = []
numIteration = []
numSequences = []
for i in xrange(len(predictions) - 1):
if num is not None and i > num:
continue
if truths[i] is None:
continue
if resets is not None or randoms is not None:
if not (resets[i+1] or randoms[i+1]):
continue
correct = truths[i] is None or truths[i] in predictions[i]
accuracy.append(correct)
numSequences.append(sequenceCounter[i])
numIteration.append(iterations[i])
return (accuracy, numIteration, numSequences) | Compute accuracy on the sequence ending |
def copy_job_order(job, job_order_object):
if not hasattr(job, "tool"):
return job_order_object
customised_job = {}
for each, i in enumerate(job.tool["inputs"]):
with SourceLine(job.tool["inputs"], each, WorkflowException,
_logger.isEnabledFor(logging.DEBUG)):
iid = shortname(i["id"])
if iid in job_order_object:
customised_job[iid] = copy.deepcopy(job_order_object[iid])
elif "default" in i:
customised_job[iid] = copy.deepcopy(i["default"])
else:
pass
return customised_job | Create copy of job object for provenance. |
def write_log(title, message=''):
sys.stderr.write(''.join([
title.center(40).center(60, '-'), '\n', message
])) | Write formatted log message to stderr. |
def update_batches(self, X_batch, L, Min):
self.X_batch = X_batch
if X_batch is not None:
self.r_x0, self.s_x0 = self._hammer_function_precompute(X_batch, L, Min, self.model) | Updates the batches internally and pre-computes the |
def remove(self, method: Method):
self._table = [fld for fld in self._table if fld is not method] | Removes a `method` from the table by identity. |
def get(self, **kwargs):
for chain in self.chains:
for key in kwargs:
getter_name = "get_"+key
if (hasattr(chain, getter_name)):
getter = getattr(chain, getter_name)
if (getter() == kwargs[key]):
return chain
return None | Find correct filterchain based on generic variables |
def _is_non_public_numeric_address(host):
try:
a = ipaddress.ip_address(six.text_type(host))
except ValueError:
return False
if a.is_loopback or a.is_multicast or a.is_private or a.is_reserved \
or a.is_unspecified:
return True
return False | returns True if 'host' is not public |
def strip_empty_values(obj):
if isinstance(obj, dict):
new_obj = {}
for key, val in obj.items():
new_val = strip_empty_values(val)
if new_val is not None:
new_obj[key] = new_val
return new_obj or None
elif isinstance(obj, (list, tuple, set)):
new_obj = []
for val in obj:
new_val = strip_empty_values(val)
if new_val is not None:
new_obj.append(new_val)
return type(obj)(new_obj) or None
elif obj or obj is False or obj == 0:
return obj
else:
return None | Recursively strips empty values. |
def execute(options):
package_name = options['<package>']
source_directory = options['<output_dir>']
if options['upload'] is True:
upstream = True
else:
upstream = False
sub_tasks = {'images': options['--images'], 'listings': options['--listings'], 'inapp': options['--inapp']}
if sub_tasks == {'images': False, 'listings': False, 'inapp': False}:
sub_tasks = {'images': True, 'listings': True, 'inapp': True}
credentials = create_credentials(credentials_file=options['--credentials'],
service_email=options['--service-email'],
service_key=options['--key'])
command = SyncCommand(
package_name, source_directory, upstream, credentials, **sub_tasks)
command.execute() | execute the tool with given options. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.