code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
return op(self.get_values(), skipna=skipna, **kwds) | perform a reduction operation |
def update_base_image(path: str):
with open(path, 'r') as file_handle:
contents = file_handle.read()
regex = re.compile('from\s+(?P<source>[^\s]+)', re.IGNORECASE)
matches = regex.findall(contents)
if not matches:
return None
match = matches[0]
os.system('docker pull {}'.format(match))
return match | Pulls the latest version of the base image |
def clean(self, *args, **kwargs):
if not self.pk:
node = self.node
layer = Layer.objects.get(pk=node.layer_id)
if layer.participation_settings.rating_allowed is not True:
raise ValidationError("Rating not allowed for this layer")
if node.participation_settings.rating_allowed is not True:
raise ValidationError("Rating not allowed for this node") | Check if rating can be inserted for parent node or parent layer |
def add_pane(self, pane, vsplit=False):
assert isinstance(pane, Pane)
assert isinstance(vsplit, bool)
split_cls = VSplit if vsplit else HSplit
if self.active_pane is None:
self.root.append(pane)
else:
parent = self._get_parent(self.active_pane)
same_direction = isinstance(parent, split_cls)
index = parent.index(self.active_pane)
if same_direction:
parent.insert(index + 1, pane)
else:
new_split = split_cls([self.active_pane, pane])
parent[index] = new_split
parent.weights[new_split] = parent.weights[self.active_pane]
self.active_pane = pane
self.zoom = False | Add another pane to this Window. |
def apply_patch(self):
patch = self.patches.get(self.storage.__class__.__name__)
if patch:
patch.apply(self) | apply adjustment patch for storage |
def _get_route_info(self, request):
resolve_match = resolve(request.path)
app_name = resolve_match.app_name
namespace = resolve_match.namespace
url_name = resolve_match.url_name
view_name = resolve_match.view_name
return {
"app_name": app_name or None,
"namespace": namespace or None,
"url_name": url_name or None,
"view_name": view_name or None,
} | Return information about the current URL. |
def unregister(self, collector):
with self._lock:
for name in self._collector_to_names[collector]:
del self._names_to_collectors[name]
del self._collector_to_names[collector] | Remove a collector from the registry. |
def angSepVincenty(ra1, dec1, ra2, dec2):
ra1_rad = np.radians(ra1)
dec1_rad = np.radians(dec1)
ra2_rad = np.radians(ra2)
dec2_rad = np.radians(dec2)
sin_dec1, cos_dec1 = np.sin(dec1_rad), np.cos(dec1_rad)
sin_dec2, cos_dec2 = np.sin(dec2_rad), np.cos(dec2_rad)
delta_ra = ra2_rad - ra1_rad
cos_delta_ra, sin_delta_ra = np.cos(delta_ra), np.sin(delta_ra)
diffpos = np.arctan2(np.sqrt((cos_dec2 * sin_delta_ra) ** 2 +
(cos_dec1 * sin_dec2 -
sin_dec1 * cos_dec2 * cos_delta_ra) ** 2),
sin_dec1 * sin_dec2 + cos_dec1 * cos_dec2 * cos_delta_ra)
return np.degrees(diffpos) | Vincenty formula for distances on a sphere |
def commit_account_vesting(self, block_height):
log.debug("Commit all database state before vesting")
self.db.commit()
if block_height in self.vesting:
traceback.print_stack()
log.fatal("Tried to vest tokens twice at {}".format(block_height))
os.abort()
cur = self.db.cursor()
namedb_query_execute(cur, 'BEGIN', ())
res = namedb_accounts_vest(cur, block_height)
namedb_query_execute(cur, 'END', ())
self.vesting[block_height] = True
return True | vest any tokens at this block height |
def width_aware_slice(self, index):
if wcswidth(self.s) == -1:
raise ValueError('bad values for width aware slicing')
index = normalize_slice(self.width, index)
counter = 0
parts = []
for chunk in self.chunks:
if index.start < counter + chunk.width and index.stop > counter:
start = max(0, index.start - counter)
end = min(index.stop - counter, chunk.width)
if end - start == chunk.width:
parts.append(chunk)
else:
s_part = width_aware_slice(chunk.s, max(0, index.start - counter), index.stop - counter)
parts.append(Chunk(s_part, chunk.atts))
counter += chunk.width
if index.stop < counter:
break
return FmtStr(*parts) if parts else fmtstr('') | Slice based on the number of columns it would take to display the substring. |
def repr_node(self, dist, level=1):
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output) | Prints only a subgraph |
def check_geo(geo):
geo = copy.copy(geo)
def fix_item(item):
if isinstance(item, six.binary_type):
return item.decode()
return item
def fix_list(lst):
return [fix_item(i) for i in lst]
if isinstance(geo.reduce, six.binary_type):
geo.reduce = geo.reduce.decode()
for key in geo.kwargs.keys():
if geo.kwargs[key] is not None:
if isinstance(geo.kwargs[key], (list, np.ndarray)):
geo.kwargs[key] = fix_list(geo.kwargs[key])
elif isinstance(geo.kwargs[key], six.binary_type):
geo.kwargs[key] = fix_item(geo.kwargs[key])
return geo | Checks a geo and makes sure the text fields are not binary |
def format_jid_instance_ext(jid, job):
ret = format_job_instance(job)
ret.update({
'JID': jid,
'StartTime': jid_to_time(jid)})
return ret | Format the jid correctly with jid included |
def print_subprocess_output(subp):
if subp:
if subp.errorcode != 0:
print('<error errorcode="%s">' % str(subp.errorcode))
print(subp.stderr)
print("</error>")
print_tag('stdout', '\n%s\n' % subp.stdout)
else:
print_tag('success', '\n%s\n' % subp.stdout)
print_tag('warnings', '\n%s\n' % subp.stderr) | Prints the stdout and stderr output. |
def read_tree_from_json(srcpath):
with open(srcpath) as infile:
json_tree = json.load(infile)
if json_tree is None:
raise ValueError('Could not find ricecooker json tree')
return json_tree | Load ricecooker json tree data from json file at `srcpath`. |
def send_json_message(address, message, **kwargs):
data = {
'message': message,
}
if not kwargs.get('subject_id'):
data['subject_id'] = address
data.update(kwargs)
hxdispatcher.send(address, data) | a shortcut for message sending |
def __buttonEvent(event):
global boxRoot, __widgetTexts, __replyButtonText
__replyButtonText = __widgetTexts[event.widget]
boxRoot.quit() | Handle an event that is generated by a person clicking a button. |
def from_quad_tree(cls, quad_tree):
assert bool(re.match('^[0-3]*$', quad_tree)), 'QuadTree value can only consists of the digits 0, 1, 2 and 3.'
zoom = len(str(quad_tree))
offset = int(math.pow(2, zoom)) - 1
google_x, google_y = [reduce(lambda result, bit: (result << 1) | bit, bits, 0)
for bits in zip(*(reversed(divmod(digit, 2))
for digit in (int(c) for c in str(quad_tree))))]
return cls(tms_x=google_x, tms_y=(offset - google_y), zoom=zoom) | Creates a tile from a Microsoft QuadTree |
def any_has_focus(self):
f = (self.hasFocus() or self.parent.hasFocus() or
self.tips.hasFocus() or self.canvas.hasFocus())
return f | Returns if tour or any of its components has focus. |
def create_roteiro(self):
return Roteiro(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of roteiro services facade. |
def save_swagger_spec(self, filepath=None):
if filepath is True or filepath is None:
filepath = self.file_spec.format(server=self.server)
json.dump(self.origin_spec, open(filepath, 'w+'), indent=3) | Saves a copy of the origin_spec to a local file in JSON format |
def _settings_changed(self, *args, **kwargs):
setting, value = kwargs['setting'], kwargs['value']
if setting == self.name:
self._reload(value) | Handle changes to core settings. |
def _set_slots_to_null(self, cls):
if hasattr(cls, "__slots__"):
for s in cls.__slots__:
self.__setattr__(s, Null)
for b in cls.__bases__:
self._set_slots_to_null(b) | WHY ARE SLOTS NOT ACCESIBLE UNTIL WE ASSIGN TO THEM? |
def _set_state(self, state):
if state != self._association_state:
self.__log_debug('- %s -> %s', self._association_state, state)
self._association_state = state
if state == self.State.ESTABLISHED:
self.__state = 'connected'
for channel in list(self._data_channels.values()):
if channel.negotiated and channel.readyState != 'open':
channel._setReadyState('open')
asyncio.ensure_future(self._data_channel_flush())
elif state == self.State.CLOSED:
self._t1_cancel()
self._t2_cancel()
self._t3_cancel()
self.__state = 'closed'
for stream_id in list(self._data_channels.keys()):
self._data_channel_closed(stream_id)
self.remove_all_listeners() | Transition the SCTP association to a new state. |
def cli(env, ipv6, test):
mgr = SoftLayer.NetworkManager(env.client)
version = 4
if ipv6:
version = 6
if not (test or env.skip_confirmations):
if not formatting.confirm("This action will incur charges on your "
"account. Continue?"):
raise exceptions.CLIAbort('Cancelling order.')
result = mgr.add_global_ip(version=version, test_order=test)
table = formatting.Table(['item', 'cost'])
table.align['Item'] = 'r'
table.align['cost'] = 'r'
total = 0.0
for price in result['orderDetails']['prices']:
total += float(price.get('recurringFee', 0.0))
rate = "%.2f" % float(price['recurringFee'])
table.add_row([price['item']['description'], rate])
table.add_row(['Total monthly cost', "%.2f" % total])
env.fout(table) | Creates a global IP. |
def timeout(self, value):
if value == TIMEOUT_SESSION:
self._config.timeout = None
self._backend_client.expires = None
else:
self._config.timeout = value
self._calculate_expires() | Sets a custom timeout value for this session |
def free(self):
if self._ptr is None:
return
Gauged.array_free(self.ptr)
FloatArray.ALLOCATIONS -= 1
self._ptr = None | Free the underlying C array |
def refreshTitles(self):
for index in range(self.count()):
widget = self.widget(index)
self.setTabText(index, widget.windowTitle()) | Refreshes the titles for each view within this tab panel. |
def _determine_nTrackIterations(self,nTrackIterations):
if not nTrackIterations is None:
self.nTrackIterations= nTrackIterations
return None
if numpy.fabs(self.misalignment(quantity=False)) < 1./180.*numpy.pi:
self.nTrackIterations= 0
elif numpy.fabs(self.misalignment(quantity=False)) >= 1./180.*numpy.pi \
and numpy.fabs(self.misalignment(quantity=False)) < 3./180.*numpy.pi:
self.nTrackIterations= 1
elif numpy.fabs(self.misalignment(quantity=False)) >= 3./180.*numpy.pi:
self.nTrackIterations= 2
return None | Determine a good value for nTrackIterations based on the misalignment between stream and orbit; just based on some rough experience for now |
def read_hdf5_dict(h5f, names=None, group=None, **kwargs):
if group:
h5g = h5f[group]
else:
h5g = h5f
if names is None:
names = [key for key in h5g if _is_timeseries_dataset(h5g[key])]
out = kwargs.pop('dict_type', TimeSeriesDict)()
kwargs.setdefault('array_type', out.EntryClass)
for name in names:
out[name] = read_hdf5_timeseries(h5g[name], **kwargs)
return out | Read a `TimeSeriesDict` from HDF5 |
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s)) | With fixed precision s, maximize mean m |
def _sha256_sign(self, method, url, headers, body):
d = ''
sign_headers = method.upper() + '|' + url + '|'
for key, value in sorted(headers.items()):
if key.startswith('X-Mcash-'):
sign_headers += d + key.upper() + '=' + value
d = '&'
rsa_signature = base64.b64encode(
self.signer.sign(SHA256.new(sign_headers)))
return 'RSA-SHA256 ' + rsa_signature | Sign the request with SHA256. |
def errReceived(self, data):
lines = data.splitlines()
for line in lines:
log_error("*** {name} stderr *** {line}",
name=self.name,
line=self.errFilter(line)) | Connected process wrote to stderr |
def add_user(self, recipient_email):
self.import_key(emailid=recipient_email)
emailid_list = self.list_user_emails()
self.y = self.decrypt()
emailid_list.append(recipient_email)
self.encrypt(emailid_list=emailid_list) | Add user to encryption |
def parse_at_element(
self,
element,
state
):
if self._attribute:
parsed_value = self._parse_attribute(element, self._attribute, state)
else:
parsed_value = self._parser_func(element.text, state)
return _hooks_apply_after_parse(self._hooks, state, parsed_value) | Parse the primitive value at the XML element. |
def cache_data(self):
if not self.slug_name:
self.slug_name = slugify(self.name).strip()
if len(self.slug_name) > 255:
self.slug_name = self.slug_name[0:254] | Cache some basic data such as financial statement metrics |
def _get_rsa_key(self):
url = 'https://steamcommunity.com/mobilelogin/getrsakey/'
values = {
'username': self._username,
'donotcache' : self._get_donotcachetime(),
}
req = self.post(url, data=values)
data = req.json()
if not data['success']:
raise SteamWebError('Failed to get RSA key', data)
mod = int(str(data['publickey_mod']), 16)
exp = int(str(data['publickey_exp']), 16)
rsa = RSA.construct((mod, exp))
self.rsa_cipher = PKCS1_v1_5.new(rsa)
self.rsa_timestamp = data['timestamp'] | get steam RSA key, build and return cipher |
def _start_index(self, start=None):
if start is None:
return 0
start_stage = translate_stage_name(start)
internal_names = [translate_stage_name(s.name) for s in self._stages]
try:
return internal_names.index(start_stage)
except ValueError:
raise UnknownPipelineStageError(start, self) | Seek to the first stage to run. |
def send(self, data):
if self.readyState != 'open':
raise InvalidStateError
if not isinstance(data, (str, bytes)):
raise ValueError('Cannot send unsupported data type: %s' % type(data))
self.transport._data_channel_send(self, data) | Send `data` across the data channel to the remote peer. |
def _require_bucket(self, bucket_name):
if not self.exists(bucket_name) and not self.claim_bucket(bucket_name):
raise OFSException("Invalid bucket: %s" % bucket_name)
return self._get_bucket(bucket_name) | Also try to create the bucket. |
def _linux_stp(br, state):
brctl = _tool_path('brctl')
return __salt__['cmd.run']('{0} stp {1} {2}'.format(brctl, br, state),
python_shell=False) | Internal, sets STP state |
def _get_filename(self):
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname | Return a unique file name. |
def raise_error(self, message, *params, **key_params):
s = 'Parser error in '
self.xml_node_stack.reverse()
if len(self.xml_node_stack) > 1:
node = self.xml_node_stack[0]
s += '<{0}'.format(node.tag)
if 'name' in node.lattrib:
s += ' name=\"{0}\"'.format(node.lattrib['name'])
if 'id' in node.lattrib:
s += ' id=\"{0}\"'.format(node.lattrib['id'])
s += '>'
for node in self.xml_node_stack[1:]:
s += '.<{0}'.format(node.tag)
if 'name' in node.lattrib:
s += ' name=\"{0}\"'.format(node.lattrib['name'])
if 'id' in node.lattrib:
s += ' id=\"{0}\"'.format(node.lattrib['id'])
s += '>'
s += ':\n ' + message
raise ParseError(s, *params, **key_params)
self.xml_node_stack.reverse() | Raise a parse error. |
def from_record(cls, record, crs):
if 'type' not in record:
raise TypeError("The data isn't a valid record.")
return cls(to_shape(record), crs) | Load vector from record. |
def parent_org_sdo_ids(self):
return [sdo.get_owner()._narrow(SDOPackage.SDO).get_sdo_id() \
for sdo in self._obj.get_organizations() if sdo] | The SDO IDs of the compositions this RTC belongs to. |
def add_child(self, child):
if not isinstance(child, DependencyNode):
raise TypeError('"child" must be a DependencyNode')
self._children.append(child) | Add a child node |
def sink_pubsub(client, to_delete):
topic = _sink_pubsub_setup(client)
to_delete.append(topic)
SINK_NAME = "robots-pubsub-%d" % (_millis(),)
FILTER = "logName:apache-access AND textPayload:robot"
UPDATED_FILTER = "textPayload:robot"
DESTINATION = "pubsub.googleapis.com/%s" % (topic.full_name,)
sink = client.sink(SINK_NAME, filter_=FILTER, destination=DESTINATION)
assert not sink.exists()
sink.create()
assert sink.exists()
to_delete.insert(0, sink)
for sink in client.list_sinks():
do_something_with(sink)
existing_sink = client.sink(SINK_NAME)
existing_sink.reload()
assert existing_sink.filter_ == FILTER
assert existing_sink.destination == DESTINATION
existing_sink.filter_ = UPDATED_FILTER
existing_sink.update()
existing_sink.reload()
assert existing_sink.filter_ == UPDATED_FILTER
sink.delete()
to_delete.pop(0) | Sink log entries to pubsub. |
def _get_internal_field_by_name(self, name):
field = self._all_fields.get(name, self._all_fields.get('%s.%s' % (self._full_name, name)))
if field is not None:
return field
for field_name in self._all_fields:
if field_name.endswith('.%s' % name):
return self._all_fields[field_name] | Gets the field by name, or None if not found. |
def parse_assessor_content(experiment_config):
if experiment_config.get('assessor'):
if experiment_config['assessor'].get('builtinAssessorName'):
experiment_config['assessor']['className'] = experiment_config['assessor']['builtinAssessorName']
else:
validate_customized_file(experiment_config, 'assessor') | Validate whether assessor in experiment_config is valid |
def read_kwfile(fname):
d={}
f=open(fname)
for line in f:
try:
kvpair=re.findall("(.*):: (.*)=(.*)$",line)[0]
d['name']=os.path.basename(kvpair[0])
key,val=kvpair[1:]
d[key.lower()]=val
except (ValueError,IndexError):
break
f.close()
return d | Syntax used as of r452 in commissioning tests |
def id(self, opts_id):
old_id = self._id
self._id = opts_id
if old_id is not None:
cleanup_custom_options(old_id)
if opts_id is not None and opts_id != old_id:
if opts_id not in Store._weakrefs:
Store._weakrefs[opts_id] = []
ref = weakref.ref(self, partial(cleanup_custom_options, opts_id))
Store._weakrefs[opts_id].append(ref) | Handles tracking and cleanup of custom ids. |
def reformat_cmd(self, text):
text = text.replace('az', '')
if text and SELECT_SYMBOL['scope'] == text[0:2]:
text = text.replace(SELECT_SYMBOL['scope'], "")
if self.shell_ctx.default_command:
text = self.shell_ctx.default_command + ' ' + text
return text | reformat the text to be stripped of noise |
def _find_sock():
if socket.has_ipv6:
try:
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
except socket.gaierror:
pass
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM) | Create a UDP socket |
def list_organizations(self):
try:
res = self._send_request('GET', self._org_url, '', 'organizations')
if res and res.status_code in self._resp_ok:
return res.json()
except dexc.DfaClientRequestFailed:
LOG.error("Failed to send request to DCNM.") | Return list of organizations from DCNM. |
def create_table(self, names=None):
scan_shape = (1,)
for src in self._srcs:
scan_shape = max(scan_shape, src['dloglike_scan'].shape)
tab = create_source_table(scan_shape)
for s in self._srcs:
if names is not None and s.name not in names:
continue
s.add_to_table(tab)
return tab | Create an astropy Table object with the contents of the ROI model. |
def __isValidZIP(self, suffix):
if suffix and isinstance(suffix, string_types):
if suffix.endswith(".zip"):
return True
return False | Determine if the suffix is `.zip` format |
def height(cls, path):
if os.path.exists( path ):
sb = os.stat( path )
h = (sb.st_size / BLOCK_HEADER_SIZE) - 1
return h
else:
return None | Get the locally-stored block height |
def cli_resp_formatter(cls, resp):
if not resp.value:
return ''
if resp.status == STATUS_OK:
if type(resp.value) in (str, bool, int, float, six.text_type):
return str(resp.value)
ret = ''
val = resp.value
if not isinstance(val, list):
val = [val]
for line in val:
for k, v in line.items():
if isinstance(v, dict):
ret += cls.cli_resp_line_template.format(
k, '\n' + pprint.pformat(v)
)
else:
ret += cls.cli_resp_line_template.format(k, v)
return ret
else:
return "Error: {0}".format(resp.value) | Override this method to provide custom formatting of cli response. |
def _send_commit_request(self, retry_delay=None, attempt=None):
if self._commit_call and not self._commit_call.active():
self._commit_call = None
if self._commit_req is not None:
raise OperationInProgress(self._commit_req)
if retry_delay is None:
retry_delay = self.retry_init_delay
if attempt is None:
attempt = 1
commit_offset = self._last_processed_offset
commit_request = OffsetCommitRequest(
self.topic, self.partition, commit_offset,
TIMESTAMP_INVALID, self.commit_metadata)
log.debug("Committing off=%d grp=%s tpc=%s part=%s req=%r",
self._last_processed_offset, self.consumer_group,
self.topic, self.partition, commit_request)
self._commit_req = d = self.client.send_offset_commit_request(
self.consumer_group, [commit_request])
d.addBoth(self._clear_commit_req)
d.addCallbacks(
self._update_committed_offset, self._handle_commit_error,
callbackArgs=(commit_offset,),
errbackArgs=(retry_delay, attempt)) | Send a commit request with our last_processed_offset |
def compiler_preprocessor_verbose(compiler, extraflags):
lines = []
with open(os.devnull, 'r') as devnull:
cmd = [compiler, '-E']
cmd += extraflags
cmd += ['-', '-v']
p = Popen(cmd, stdin=devnull, stdout=PIPE, stderr=PIPE)
p.wait()
p.stdout.close()
lines = p.stderr.read()
lines = lines.decode('utf-8')
lines = lines.splitlines()
return lines | Capture the compiler preprocessor stage in verbose mode |
def _cross_validation_for_one_voxel(clf, vid, num_folds, subject_data, labels):
skf = model_selection.StratifiedKFold(n_splits=num_folds,
shuffle=False)
scores = model_selection.cross_val_score(clf, subject_data,
y=labels,
cv=skf, n_jobs=1)
logger.debug(
'cross validation for voxel %d is done' %
vid
)
return (vid, scores.mean()) | Score classifier on data using cross validation. |
def _key(self, username, frozen=False):
if frozen:
return self.frozen + username
return self.prefix + username | Translate a username into a key for Redis. |
def enable_all_cpu(self):
for cpu in self.__get_ranges("offline"):
fpath = path.join("cpu%i"%cpu,"online")
self.__write_cpu_file(fpath, b"1") | Enable all offline cpus |
def remove_product_version_from_build_configuration(id=None, name=None, product_version_id=None):
data = remove_product_version_from_build_configuration_raw(id, name, product_version_id)
if data:
return utils.format_json_list(data) | Remove a ProductVersion from association with a BuildConfiguration |
def merge_dictionaries(a, b):
res = {}
for k in a:
res[k] = a[k]
for k in b:
res[k] = b[k]
return res | Merge two dictionaries; duplicate keys get value from b. |
def clean_whitespace(statement):
import re
statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
statement.text = statement.text.strip()
statement.text = re.sub(' +', ' ', statement.text)
return statement | Remove any consecutive whitespace characters from the statement text. |
def cmd_loadfile(args):
if len(args) != 1:
fileargs = " ".join(args)
else:
fileargs = args[0]
if not os.path.exists(fileargs):
print("Error loading file ", fileargs);
return
if os.name == 'nt':
fileargs = fileargs.replace("\\", "/")
loadfile(fileargs.strip('"')) | callback from menu to load a log file |
def comment_sync(self, comment):
self.host.update(key="comment", value=comment)
self.host.emit("commented", comment=comment) | Update comments to host and notify subscribers |
def branches(config, **kwargs):
with alembic_lock(
config.registry["sqlalchemy.engine"], config.alembic_config()
) as alembic_config:
alembic.command.branches(alembic_config, **kwargs) | Show current branch points. |
def center_of_mass(self):
weights = [s.species.weight for s in self]
center_of_mass = np.average(self.frac_coords,
weights=weights, axis=0)
return center_of_mass | Calculates the center of mass of the slab |
def _build_query_url(self, page = None, verbose = False):
query = []
if len(self.filters) > 0:
query.append(urlencode(self.filters))
if self.sort:
query_str = u"%s=%s" % (u"sort", self.sort)
query.append(query_str)
if self.sort_by:
query_str = u"%s=%s" % (u"sort_by", self.sort_by)
query.append(query_str)
if self.per_page:
query_str = u"%s=%s" % (u"per_page", self.per_page)
query.append(query_str)
if page:
query_str = u"%s=%s" % (u"page", page)
query.append(query_str)
query = u"?%s" % (u"&".join(query))
url = u"%s%s" % (self.get_list_endpoint()['href'],query)
url = u"%s%s%s" % (self.__api__.base_url, API_BASE_PATH, url)
msg = "_build_query_url: url:%s" % url
log.debug(msg)
if verbose:
print msg
return url | builds the url to call |
def delete_local_operator(self, onnx_name):
if onnx_name not in self.onnx_operator_names or onnx_name not in self.operators:
raise RuntimeError('The operator to be removed not found')
self.onnx_operator_names.discard(onnx_name)
del self.operators[onnx_name] | Remove the operator whose onnx_name is the input onnx_name |
def _iter_walk(
self,
fs,
path,
namespaces=None,
):
if self.search == "breadth":
return self._walk_breadth(fs, path, namespaces=namespaces)
else:
return self._walk_depth(fs, path, namespaces=namespaces) | Get the walk generator. |
def agent_error(e: requests.HTTPError, fatal=True):
try:
data = e.response.json()
details = data['detail']
except JSONDecodeError:
details = e.response.text or str(e.response)
lines = ('[AGENT] {}'.format(line) for line in details.splitlines())
msg = '\n' + '\n'.join(lines)
if fatal:
fatal_error(msg)
else:
error(msg) | Prints an agent error and exits |
def provide_data(self):
return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self.data] | The name and shape of data provided by this iterator |
def reduce_json(data):
return reduce(lambda x, y: int(x) + int(y), data.values()) | Reduce a JSON object |
def clear_content(self, content):
content = _unicode(content)
return self.wrapper_match.sub("", content) | Clear the injected content from the content buffer, and return the results |
def load_cash_balances(self):
from gnucash_portfolio.accounts import AccountsAggregate, AccountAggregate
cfg = self.__get_config()
cash_root_name = cfg.get(ConfigKeys.cash_root)
gc_db = self.config.get(ConfigKeys.gnucash_book_path)
with open_book(gc_db, open_if_lock=True) as book:
svc = AccountsAggregate(book)
root_account = svc.get_by_fullname(cash_root_name)
acct_svc = AccountAggregate(book, root_account)
cash_balances = acct_svc.load_cash_balances_with_children(cash_root_name)
self.__store_cash_balances_per_currency(cash_balances) | Loads cash balances from GnuCash book and recalculates into the default currency |
def validate_available_choice(enum, to_value):
if to_value is None:
return
if type(to_value) is not int:
try:
to_value = int(to_value)
except ValueError:
message_str = "'{value}' cannot be converted to int"
message = _(six.text_type(message_str))
raise InvalidStatusOperationError(message.format(value=to_value))
if to_value not in list(dict(enum.choices()).keys()):
message = _(six.text_type('Select a valid choice. {value} is not one of the available choices.'))
raise InvalidStatusOperationError(message.format(value=to_value)) | Validate that to_value is defined as a value in enum. |
def _get_user_info(self, access_token):
info_response = self._call('GET', self.info_url, params={'access_token': access_token})
user_info = info_response.get('info')
return user_info | Return Clef user info. |
def _conv_general_shape_tuple(self, lhs_shape, rhs_shape, window_strides,
padding, dimension_numbers):
lhs_perm, rhs_perm, out_perm = self._conv_general_permutations(
dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
out_trans = self._conv_shape_tuple(
lhs_trans, rhs_trans, window_strides, padding)
return tuple(onp.take(out_trans, onp.argsort(out_perm))) | Generalized computation of conv shape. |
def status_message(self):
msg = None
if self.last_ddns_response in response_messages.keys():
return response_messages.get(self.last_ddns_response)
if 'good' in self.last_ddns_response:
ip = re.search(r'(\d{1,3}\.?){4}', self.last_ddns_response).group()
msg = "SUCCESS: DNS hostname IP (%s) successfully updated." % ip
elif 'nochg' in self.last_ddns_response:
ip = re.search(r'(\d{1,3}\.?){4}', self.last_ddns_response).group()
msg = "SUCCESS: IP address (%s) is up to date, nothing was changed. " \
"Additional 'nochg' updates may be considered abusive." % ip
else:
msg = "ERROR: Ooops! Something went wrong !!!"
return msg | Return friendly response from API based on response code. |
def format(self, value):
if not isinstance(value, Arguments):
value = value.iteritems()
return dict((k, self.fields[k].format(v)) for k, v in value) | Convert a dictionary of processed values to a dictionary of raw values. |
def close(self):
if self._connection:
self._connection_file.close()
self._connection_file = None
self._connection.close()
self._connection = None | Closes connection with the q service. |
def schema_from_context(context):
item_class = context.get('class')
return (
serializer_mapping[item_class] if item_class else BaseSchema,
context.get('many', False)
) | Determine which schema to use. |
def pretty_dict_string(d, indent=0):
s = ''
for key, value in sorted(d.items()):
s += ' ' * indent + str(key)
if isinstance(value, dict):
s += '\n' + pretty_dict_string(value, indent+1)
else:
s += '=' + str(value) + '\n'
return s | Pretty output of nested dictionaries. |
def ping(self):
randomToken = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for x in range(32))
r = self.doQuery('ping?data=' + randomToken)
if r.status_code == 200:
if r.json()['data'] == randomToken:
return True
return False | Return true if the server successfully pinged |
def _disbatch_runner(self, chunk):
full_return = chunk.pop('full_return', False)
pub_data = self.saltclients['runner'](chunk)
tag = pub_data['tag'] + '/ret'
try:
event = yield self.application.event_listener.get_event(self, tag=tag)
ret = event if full_return else event['data']['return']
raise tornado.gen.Return(ret)
except TimeoutException:
raise tornado.gen.Return('Timeout waiting for runner to execute') | Disbatch runner client commands |
def update_db():
logger = get_logger(PROCESS_SCHEDULER)
managed_process_dao = ManagedProcessDao(logger)
managed_process_dao.clear()
for process_name, process_entry in context.process_context.items():
if not isinstance(process_entry, ManagedProcessEntry):
continue
managed_process_dao.update(process_entry)
logger.info('Updated DB with process entry {0} from the context.'.format(process_entry.key)) | writes to managed_process table records from the context.process_context |
def add_item(self, item):
item.parent = self
self.items.append(item) | Add a new script or phrase to the folder. |
def delete_downloads():
shutil.rmtree(vtki.EXAMPLES_PATH)
os.makedirs(vtki.EXAMPLES_PATH)
return True | Delete all downloaded examples to free space or update the files |
def increment(cls, v):
if not isinstance(v, ObjectNumber):
v = ObjectNumber.parse(v)
return v.rev(v.revision+1) | Increment the version number of an object number of object number string |
def parse_route(cls, template):
regex = ''
last_pos = 0
for match in cls.ROUTES_RE.finditer(template):
regex += re.escape(template[last_pos:match.start()])
var_name = match.group(1)
expr = match.group(2) or '[^/]+'
expr = '(?P<%s>%s)' % (var_name, expr)
regex += expr
last_pos = match.end()
regex += re.escape(template[last_pos:])
regex = '^%s$' % regex
return re.compile(regex) | Parse a route definition, and return the compiled regex that matches it. |
def write_config(ip, mac, single, double, long, touch):
click.echo("Write configuration to device %s" % ip)
data = {
'single': single,
'double': double,
'long': long,
'touch': touch,
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration of %s set" % mac) | Write the current configuration of a myStrom button. |
def _save_fastq_space(items):
to_cleanup = {}
for data in (utils.to_single_data(x) for x in items):
for fname in data.get("files", []):
if os.path.realpath(fname).startswith(dd.get_work_dir(data)):
to_cleanup[fname] = data["config"]
for fname, config in to_cleanup.items():
utils.save_diskspace(fname, "Cleanup prep files after alignment finished", config) | Potentially save fastq space prior to merging, since alignments done. |
def _construct_regex(cls, fmt):
return re.compile(fmt.format(**vars(cls)), flags=re.U) | Given a format string, construct the regex with class attributes. |
def rollsingle(self, func, window=20, name=None, fallback=False,
align='right', **kwargs):
rname = 'roll_{0}'.format(func)
if fallback:
rfunc = getattr(lib.fallback, rname)
else:
rfunc = getattr(lib, rname, None)
if not rfunc:
rfunc = getattr(lib.fallback, rname)
data = np.array([list(rfunc(serie, window)) for serie in self.series()])
name = name or self.makename(func, window=window)
dates = asarray(self.dates())
desc = settings.desc
if (align == 'right' and not desc) or desc:
dates = dates[window-1:]
else:
dates = dates[:-window+1]
return self.clone(dates, data.transpose(), name=name) | Efficient rolling window calculation for min, max type functions |
def device_initialize(self):
existing_device_initialize(self)
self.type = 'Other'
self.id = platform.node()
self.os_version = platform.version()
self.locale = locale.getdefaultlocale()[0] | The device initializer used to assign special properties to all device context objects |
def _exec_requested_job(self):
self._timer.stop()
self._job(*self._args, **self._kwargs) | Execute the requested job after the timer has timeout. |
def minimum(attrs, inputs, proto_obj):
if len(inputs) > 1:
mxnet_op = symbol.minimum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.minimum(mxnet_op, op_input)
else:
mxnet_op = symbol.minimum(inputs[0], inputs[0])
return mxnet_op, attrs, inputs | Elementwise minimum of arrays. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.