code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def match_subselectors(self, el, selectors):
match = True
for sel in selectors:
if not self.match_selectors(el, sel):
match = False
return match | Match selectors. |
def connectionLost(self, reason=protocol.connectionDone):
self.connected = 0
for pending in self._queue.values():
pending.errback(reason)
self._queue.clear()
if self._stopped:
result = self if reason.check(error.ConnectionDone) else reason
self._stopped.callback(result)
self._stopped = None
else:
reason.raiseException() | Check whether termination was intended and invoke the deferred.
If the connection terminated unexpectedly, reraise the failure.
@type reason: L{twisted.python.failure.Failure} |
def angSepVincenty(ra1, dec1, ra2, dec2):
ra1_rad = np.radians(ra1)
dec1_rad = np.radians(dec1)
ra2_rad = np.radians(ra2)
dec2_rad = np.radians(dec2)
sin_dec1, cos_dec1 = np.sin(dec1_rad), np.cos(dec1_rad)
sin_dec2, cos_dec2 = np.sin(dec2_rad), np.cos(dec2_rad)
delta_ra = ra2_rad - ra1_rad
cos_delta_ra, sin_delta_ra = np.cos(delta_ra), np.sin(delta_ra)
diffpos = np.arctan2(np.sqrt((cos_dec2 * sin_delta_ra) ** 2 +
(cos_dec1 * sin_dec2 -
sin_dec1 * cos_dec2 * cos_delta_ra) ** 2),
sin_dec1 * sin_dec2 + cos_dec1 * cos_dec2 * cos_delta_ra)
return np.degrees(diffpos) | Vincenty formula for distances on a sphere |
def rosen_nesterov(self, x, rho=100):
f = 0.25 * (x[0] - 1)**2
f += rho * sum((x[1:] - 2 * x[:-1]**2 + 1)**2)
return f | needs exponential number of steps in a non-increasing f-sequence.
x_0 = (-1,1,...,1)
See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function" |
def set_input(self, p_name, value):
name = self.python_names.get(p_name)
if p_name is None or name not in self.get_input_names():
raise ValueError('Invalid input "{}"'.format(p_name))
self.step_inputs[name] = value | Set a Step's input variable to a certain value.
The value comes either from a workflow input or output of a previous
step.
Args:
name (str): the name of the Step input
value (str): the name of the output variable that provides the
value for this input.
Raises:
ValueError: The name provided is not a valid input name for this
Step. |
def do_file_show(client, args):
for src_uri in args.uris:
client.download_file(src_uri, sys.stdout.buffer)
return True | Output file contents to stdout |
def normalize_time(timestamp):
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset | Normalize time in arbitrary timezone to UTC naive object. |
def find_network_by_label(self, label):
networks = self.list()
match = [network for network in networks
if network.label == label]
if not match:
raise exc.NetworkNotFound("No network with the label '%s' exists" %
label)
elif len(match) > 1:
raise exc.NetworkLabelNotUnique("There were %s matches for the label "
"'%s'." % (len(match), label))
return match[0] | This is inefficient; it gets all the networks and then filters on
the client side to find the matching name. |
def _decode_data(self, data, charset):
try:
return smart_unicode(data, charset)
except UnicodeDecodeError:
raise errors.BadRequest('wrong charset') | Decode string data.
:returns: unicode string |
def split_denovos(denovo_path, temp_dir):
with open(denovo_path, "r") as handle:
lines = handle.readlines()
header = lines.pop(0)
basename = os.path.basename(denovo_path)
counts = count_missense_per_gene(lines)
counts = dict((k, v) for k, v in counts.items() if v > 1 )
genes = set([])
for line in sorted(lines):
gene = line.split("\t")[0]
if gene not in genes and gene in counts:
genes.add(gene)
path = os.path.join(temp_dir, "tmp.{}.txt".format(len(genes)))
output = open(path, "w")
output.write(header)
if gene in counts:
output.write(line)
return len(genes) | split de novos from an input file into files, one for each gene |
def get_cf_distribution_class():
if LooseVersion(troposphere.__version__) == LooseVersion('2.4.0'):
cf_dist = cloudfront.Distribution
cf_dist.props['DistributionConfig'] = (DistributionConfig, True)
return cf_dist
return cloudfront.Distribution | Return the correct troposphere CF distribution class. |
def reflection_matrix(point, normal):
normal = unit_vector(normal[:3])
M = np.identity(4)
M[:3, :3] -= 2.0 * np.outer(normal, normal)
M[:3, 3] = (2.0 * np.dot(point[:3], normal)) * normal
return M | Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = np.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = np.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> np.allclose(2, np.trace(R))
True
>>> np.allclose(v0, np.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> np.allclose(v2, np.dot(R, v3))
True |
def _get_grain(name, proxy=None):
grains = _retrieve_grains_cache(proxy=proxy)
if grains.get('result', False) and grains.get('out', {}):
return grains.get('out').get(name) | Retrieves the grain value from the cached dictionary. |
def tokens(self):
if self._tokens is None:
self._tokens = TokenList(self._version, account_sid=self._solution['sid'], )
return self._tokens | Access the tokens
:returns: twilio.rest.api.v2010.account.token.TokenList
:rtype: twilio.rest.api.v2010.account.token.TokenList |
def autoencoder_residual():
hparams = autoencoder_autoregressive()
hparams.optimizer = "Adafactor"
hparams.clip_grad_norm = 1.0
hparams.learning_rate_constant = 0.5
hparams.learning_rate_warmup_steps = 500
hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay"
hparams.num_hidden_layers = 5
hparams.hidden_size = 64
hparams.max_hidden_size = 1024
hparams.add_hparam("num_residual_layers", 2)
hparams.add_hparam("residual_kernel_height", 3)
hparams.add_hparam("residual_kernel_width", 3)
hparams.add_hparam("residual_filter_multiplier", 2.0)
hparams.add_hparam("residual_dropout", 0.2)
hparams.add_hparam("residual_use_separable_conv", int(True))
hparams.add_hparam("kl_beta", 1.0)
return hparams | Residual autoencoder model. |
def remote(ctx):
with command():
m = RepoManager(ctx.obj['agile'])
click.echo(m.github_repo().repo_path) | Display repo github path |
def create_notification_plan(self, label=None, name=None,
critical_state=None, ok_state=None, warning_state=None):
return self._notification_plan_manager.create(label=label, name=name,
critical_state=critical_state, ok_state=ok_state,
warning_state=warning_state) | Creates a notification plan to be executed when a monitoring check
triggers an alarm. |
def put(
self, item: _T, timeout: Union[float, datetime.timedelta] = None
) -> "Future[None]":
future = Future()
try:
self.put_nowait(item)
except QueueFull:
self._putters.append((item, future))
_set_timeout(future, timeout)
else:
future.set_result(None)
return future | Put an item into the queue, perhaps waiting until there is room.
Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout.
``timeout`` may be a number denoting a time (on the same
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. |
def vertex_normals(vertices, faces):
def normalize_v3(arr):
lens = np.sqrt(arr[:, 0]**2 + arr[:, 1]**2 + arr[:, 2]**2)
arr /= lens[:, np.newaxis]
tris = vertices[faces]
facenorms = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0])
normalize_v3(facenorms)
norm = np.zeros(vertices.shape, dtype=vertices.dtype)
norm[faces[:, 0]] += facenorms
norm[faces[:, 1]] += facenorms
norm[faces[:, 2]] += facenorms
normalize_v3(norm)
return norm | Calculates the normals of a triangular mesh |
def export(self, outFormat="shp", outFolder=None):
export_formats = {'shp':".zip", 'kml':'.kml', 'geojson':".geojson",'csv': '.csv'}
url = "%s/%s%s" % (self._url, self._itemId, export_formats[outFormat])
results = self._get(url=url,
securityHandler=self._securityHandler,
out_folder=outFolder)
if 'status' in results:
self.time.sleep(7)
results = self.export(outFormat=outFormat, outFolder=outFolder)
return results | exports a dataset t |
def color_prompt(self):
prompt = '[' + colorize('psiTurk', 'bold')
server_string = ''
server_status = self.server.is_server_running()
if server_status == 'yes':
server_string = colorize('on', 'green')
elif server_status == 'no':
server_string = colorize('off', 'red')
elif server_status == 'maybe':
server_string = colorize('unknown', 'yellow')
elif server_status == 'blocked':
server_string = colorize('blocked', 'red')
prompt += ' server:' + server_string
prompt += ' mode:' + colorize('cabin', 'bold')
prompt += ']$ '
self.prompt = prompt | Construct psiTurk shell prompt |
def include_theme_files(self, fragment):
theme = self.get_theme()
if not theme or 'package' not in theme:
return
theme_package, theme_files = theme.get('package', None), theme.get('locations', [])
resource_loader = ResourceLoader(theme_package)
for theme_file in theme_files:
fragment.add_css(resource_loader.load_unicode(theme_file)) | Gets theme configuration and renders theme css into fragment |
def _setup_logging(args):
log_conf = getattr(args, 'logging', None)
if log_conf:
logging.config.fileConfig(log_conf)
else:
logging.basicConfig() | Set up logging for the script, based on the configuration
specified by the 'logging' attribute of the command line
arguments.
:param args: A Namespace object containing a 'logging' attribute
specifying the name of a logging configuration file
to use. If not present or not given, a basic logging
configuration will be set. |
def add_thesis(
self,
defense_date=None,
degree_type=None,
institution=None,
date=None
):
self.record.setdefault('thesis_info', {})
thesis_item = {}
for key in ('defense_date', 'date'):
if locals()[key] is not None:
thesis_item[key] = locals()[key]
if degree_type is not None:
thesis_item['degree_type'] = degree_type.lower()
if institution is not None:
thesis_item['institutions'] = [{'name': institution}]
self.record['thesis_info'] = thesis_item | Add thesis info.
:param defense_date: defense date for the current thesis
:type defense_date: string. A formatted date is required (yyyy-mm-dd)
:param degree_type: degree type for the current thesis
:type degree_type: string
:param institution: author's affiliation for the current thesis
:type institution: string
:param date: publication date for the current thesis
:type date: string. A formatted date is required (yyyy-mm-dd) |
def partition(self):
if not self.partitions:
return None
if len(self.partitions) > 1:
raise ValueError(
"Can't use this method when there is more than one partition")
return list(self.partitions.values())[0] | Convenience function for accessing the first partition in the
partitions list, when there is only one. |
def resolution(self):
geo_coords = self.to_geographic()
resolution = abs(_initialresolution * math.cos(geo_coords.lat * _pi_180) / (2**self.zoom))
return resolution | Get the tile resolution at the current position.
The scale in WG84 depends on
* the zoom level (obviously)
* the latitude
* the tile size
References:
* http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Resolution_and_Scale
* http://gis.stackexchange.com/questions/7430/what-ratio-scales-do-google-maps-zoom-levels-correspond-to
Returns:
float: meters per pixel |
def remove_too_short(utterances: List[Utterance],
_winlen=25, winstep=10) -> List[Utterance]:
def is_too_short(utterance: Utterance) -> bool:
charlen = len(utterance.text)
if (duration(utterance) / winstep) < charlen:
return True
else:
return False
return [utter for utter in utterances if not is_too_short(utter)] | Removes utterances that will probably have issues with CTC because of
the number of frames being less than the number of tokens in the
transcription. Assuming char tokenization to minimize false negatives. |
def _netstat_route_sunos():
ret = []
cmd = 'netstat -f inet -rn | tail +5'
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
ret.append({
'addr_family': 'inet',
'destination': comps[0],
'gateway': comps[1],
'netmask': '',
'flags': comps[2],
'interface': comps[5] if len(comps) >= 6 else ''})
cmd = 'netstat -f inet6 -rn | tail +5'
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
ret.append({
'addr_family': 'inet6',
'destination': comps[0],
'gateway': comps[1],
'netmask': '',
'flags': comps[2],
'interface': comps[5] if len(comps) >= 6 else ''})
return ret | Return netstat routing information for SunOS |
def _push_render(self):
bokeh.io.push_notebook(handle=self.handle)
self.last_update = time.time() | Render the plot with bokeh.io and push to notebook. |
def print_raw_data(raw_data, start_index=0, limit=200, flavor='fei4b', index_offset=0, select=None, tdc_trig_dist=False, trigger_data_mode=0):
if not select:
select = ['DH', 'TW', "AR", "VR", "SR", "DR", 'TDC', 'UNKNOWN FE WORD', 'UNKNOWN WORD']
total_words = 0
for index in range(start_index, raw_data.shape[0]):
dw = FEI4Record(raw_data[index], chip_flavor=flavor, tdc_trig_dist=tdc_trig_dist, trigger_data_mode=trigger_data_mode)
if dw in select:
print index + index_offset, '{0:12d} {1:08b} {2:08b} {3:08b} {4:08b}'.format(raw_data[index], (raw_data[index] & 0xFF000000) >> 24, (raw_data[index] & 0x00FF0000) >> 16, (raw_data[index] & 0x0000FF00) >> 8, (raw_data[index] & 0x000000FF) >> 0), dw
total_words += 1
if limit and total_words >= limit:
break
return total_words | Printing FEI4 raw data array for debugging. |
def _calculateGlyph(self, targetGlyphObject, instanceLocationObject, glyphMasters):
sources = None
items = []
for item in glyphMasters:
locationObject = item['location']
fontObject = item['font']
glyphName = item['glyphName']
if not glyphName in fontObject:
continue
glyphObject = MathGlyph(fontObject[glyphName])
items.append((locationObject, glyphObject))
bias, m = buildMutator(items, axes=self.axes)
instanceObject = m.makeInstance(instanceLocationObject)
if self.roundGeometry:
try:
instanceObject = instanceObject.round()
except AttributeError:
if self.verbose and self.logger:
self.logger.info("MathGlyph object missing round() method.")
try:
instanceObject.extractGlyph(targetGlyphObject, onlyGeometry=True)
except TypeError:
pPen = targetGlyphObject.getPointPen()
targetGlyphObject.clear()
instanceObject.drawPoints(pPen)
targetGlyphObject.width = instanceObject.width | Build a Mutator object for this glyph.
* name: glyphName
* location: Location object
* glyphMasters: dict with font objects. |
def cos_distance(t1, t2, epsilon=1e-12, name=None):
with tf.name_scope(name, 'cos_distance', [t1, t2]) as scope:
t1 = tf.convert_to_tensor(t1, name='t1')
t2 = tf.convert_to_tensor(t2, name='t2')
x_inv_norm = tf.rsqrt(tf.maximum(length_squared(t1) * length_squared(t2),
epsilon))
return tf.subtract(1.0, dot_product(t1, t2) * x_inv_norm, name=scope) | Cos distance between t1 and t2 and caps the gradient of the Square Root.
Args:
t1: A tensor
t2: A tensor that can be multiplied by t1.
epsilon: A lower bound value for the distance. The square root is used as
the normalizer.
name: Optional name for this op.
Returns:
The cos distance between t1 and t2. |
def get_DID_subdomain(did, db_path=None, zonefiles_dir=None, atlasdb_path=None, check_pending=False):
opts = get_blockstack_opts()
if not is_subdomains_enabled(opts):
log.warn("Subdomain support is disabled")
return None
if db_path is None:
db_path = opts['subdomaindb_path']
if zonefiles_dir is None:
zonefiles_dir = opts['zonefiles']
if atlasdb_path is None:
atlasdb_path = opts['atlasdb_path']
db = SubdomainDB(db_path, zonefiles_dir)
try:
subrec = db.get_DID_subdomain(did)
except Exception as e:
if BLOCKSTACK_DEBUG:
log.exception(e)
log.warn("Failed to load subdomain for {}".format(did))
return None
if check_pending:
subrec.pending = db.subdomain_check_pending(subrec, atlasdb_path)
return subrec | Static method for resolving a DID to a subdomain
Return the subdomain record on success
Return None on error |
def get_json(session, url, params: dict = None):
res = session.get(url, params=params)
if res.status_code >= 400:
raise parse_error(res)
return res.json() | Get JSON from a Forest endpoint. |
def _full_pipeline(self):
options = self._pipeline_options()
full_pipeline = [{'$changeStream': options}]
full_pipeline.extend(self._pipeline)
return full_pipeline | Return the full aggregation pipeline for this ChangeStream. |
def is_complete(self):
try:
[t for t in self.tokens]
ret = True
logger.debug('CallString [{}] is complete'.format(self.strip()))
except tokenize.TokenError:
logger.debug('CallString [{}] is NOT complete'.format(self.strip()))
ret = False
return ret | Return True if this call string is complete, meaning it has a function
name and balanced parens |
def inject_environment_variables(self, d):
if not d:
return d
if isinstance(d, six.string_types):
return os.path.expandvars(d)
for k, v in d.items():
if isinstance(v, six.string_types):
d[k] = os.path.expandvars(v)
elif isinstance(v, dict):
d[k] = self.inject_environment_variables(v)
elif isinstance(v, list):
d[k] = [self.inject_environment_variables(e) for e in v]
return d | Recursively injects environment variables into TOML values |
def containers(self) -> list:
all_containers: List = list()
for slot in self:
all_containers += slot.get_children_list()
for container in all_containers:
if getattr(container, 'stackable', False):
all_containers += container.get_children_list()
return all_containers | Returns all containers on a deck as a list |
async def delete(self):
return await self.bot.delete_message(self.chat.id, self.message_id) | Delete this message
:return: bool |
def get_frequencies_with_eigenvectors(self, q):
self._set_dynamical_matrix()
if self._dynamical_matrix is None:
msg = ("Dynamical matrix has not yet built.")
raise RuntimeError(msg)
self._dynamical_matrix.set_dynamical_matrix(q)
dm = self._dynamical_matrix.get_dynamical_matrix()
frequencies = []
eigvals, eigenvectors = np.linalg.eigh(dm)
frequencies = []
for eig in eigvals:
if eig < 0:
frequencies.append(-np.sqrt(-eig))
else:
frequencies.append(np.sqrt(eig))
return np.array(frequencies) * self._factor, eigenvectors | Calculate phonon frequencies and eigenvectors at a given q-point
Parameters
----------
q: array_like
A q-vector.
shape=(3,)
Returns
-------
(frequencies, eigenvectors)
frequencies: ndarray
Phonon frequencies
shape=(bands, ), dtype='double', order='C'
eigenvectors: ndarray
Phonon eigenvectors
shape=(bands, bands), dtype='complex', order='C' |
def bulk_upsert(self, docs, namespace, timestamp):
for doc in docs:
self.upsert(doc, namespace, timestamp) | Upsert each document in a set of documents.
This method may be overridden to upsert many documents at once. |
def title(self):
return (u'[{}] {}>>'.format(
os.path.split(os.path.abspath('.'))[-1],
u' '.join(self.command))).encode('utf8') | Returns the UTF-8 encoded title |
def tohexstring(self):
val = self.tostring()
st = "{0:0x}".format(int(val, 2))
return st.zfill(len(self.bitmap)*2) | Returns a hexadecimal string |
def wrap_count(method):
number = 0
while hasattr(method, '__aspects_orig'):
number += 1
method = method.__aspects_orig
return number | Returns number of wraps around given method. |
def volume_list(self, search_opts=None):
if self.volume_conn is None:
raise SaltCloudSystemExit('No cinder endpoint available')
nt_ks = self.volume_conn
volumes = nt_ks.volumes.list(search_opts=search_opts)
response = {}
for volume in volumes:
response[volume.display_name] = {
'name': volume.display_name,
'size': volume.size,
'id': volume.id,
'description': volume.display_description,
'attachments': volume.attachments,
'status': volume.status
}
return response | List all block volumes |
def searchsorted(arr, N, x):
L = 0
R = N-1
done = False
m = (L+R)//2
while not done:
if arr[m] < x:
L = m + 1
elif arr[m] > x:
R = m - 1
elif arr[m] == x:
done = True
m = (L+R)//2
if L>R:
done = True
return L | N is length of arr |
def import_name_or_class(name):
" Import an obect as either a fully qualified, dotted name, "
if isinstance(name, str):
module_name, object_name = name.rsplit('.',1)
mod = __import__(module_name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
else:
return name | Import an obect as either a fully qualified, dotted name, |
def _lock_fxn(direction, lock_mode, xact):
if direction == "unlock" or lock_mode == LockMode.wait:
try_mode = ""
else:
try_mode = "_try"
if direction == "lock" and xact:
xact_mode = "_xact"
else:
xact_mode = ""
return "pg{}_advisory{}_{}".format(try_mode, xact_mode, direction) | Builds a pg advisory lock function name based on various options.
:direction: one of "lock" or "unlock"
:lock_mode: a member of the LockMode enum
:xact: a boolean, if True the lock will be automatically released at the end
of the transaction and cannot be manually released. |
def _get_instance_repo(self, namespace):
self._validate_namespace(namespace)
if namespace not in self.instances:
self.instances[namespace] = []
return self.instances[namespace] | Returns the instance repository for the specified CIM namespace
within the mock repository. This is the original instance variable,
so any modifications will change the mock repository.
Validates that the namespace exists in the mock repository.
If the instance repository does not contain the namespace yet, it is
added.
Parameters:
namespace(:term:`string`): Namespace name. Must not be `None`.
Returns:
list of CIMInstance: Instance repository.
Raises:
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist. |
def hookScreenshot(self, numTypes):
fn = self.function_table.hookScreenshot
pSupportedTypes = EVRScreenshotType()
result = fn(byref(pSupportedTypes), numTypes)
return result, pSupportedTypes | Called by the running VR application to indicate that it
wishes to be in charge of screenshots. If the
application does not call this, the Compositor will only
support VRScreenshotType_Stereo screenshots that will be
captured without notification to the running app.
Once hooked your application will receive a
VREvent_RequestScreenshot event when the user presses the
buttons to take a screenshot. |
def set_consistent(self, consistent_config):
self.topology._add_job_control_plane()
self.oport.operator.consistent(consistent_config)
return self._make_placeable() | Indicates that the stream is the start of a consistent region.
Args:
consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region.
Returns:
Stream: Returns this stream.
.. versionadded:: 1.11 |
def nltides_gw_phase_diff_isco(f_low, f0, amplitude, n, m1, m2):
f0, amplitude, n, m1, m2, input_is_array = ensurearray(
f0, amplitude, n, m1, m2)
f_low = numpy.zeros(m1.shape) + f_low
phi_l = nltides_gw_phase_difference(
f_low, f0, amplitude, n, m1, m2)
f_isco = f_schwarzchild_isco(m1+m2)
phi_i = nltides_gw_phase_difference(
f_isco, f0, amplitude, n, m1, m2)
return formatreturn(phi_i - phi_l, input_is_array) | Calculate the gravitational-wave phase shift bwtween
f_low and f_isco due to non-linear tides.
Parameters
----------
f_low: float
Frequency from which to compute phase. If the other
arguments are passed as numpy arrays then the value
of f_low is duplicated for all elements in the array
f0: float or numpy.array
Frequency that NL effects switch on
amplitude: float or numpy.array
Amplitude of effect
n: float or numpy.array
Growth dependence of effect
m1: float or numpy.array
Mass of component 1
m2: float or numpy.array
Mass of component 2
Returns
-------
delta_phi: float or numpy.array
Phase in radians |
def grad_log_q(self,z):
param_count = 0
grad = np.zeros((np.sum(self.approx_param_no),self.sims))
for core_param in range(len(self.q)):
for approx_param in range(self.q[core_param].param_no):
grad[param_count] = self.q[core_param].vi_score(z[core_param],approx_param)
param_count += 1
return grad | The gradients of the approximating distributions |
def update(self, params):
dev_info = self.json_state.get('deviceInfo')
dev_info.update({k: params[k] for k in params if dev_info.get(k)}) | Update the dev_info data from a dictionary.
Only updates if it already exists in the device. |
def prepare(self, inputstring, strip=False, nl_at_eof_check=False, **kwargs):
if self.strict and nl_at_eof_check and inputstring and not inputstring.endswith("\n"):
end_index = len(inputstring) - 1 if inputstring else 0
raise self.make_err(CoconutStyleError, "missing new line at end of file", inputstring, end_index)
original_lines = inputstring.splitlines()
if self.keep_lines:
self.original_lines = original_lines
inputstring = "\n".join(original_lines)
if strip:
inputstring = inputstring.strip()
return inputstring | Prepare a string for processing. |
def delete(self, *names):
names = [self.redis_key(n) for n in names]
with self.pipe as pipe:
return pipe.delete(*names) | Remove the key from redis
:param names: tuple of strings - The keys to remove from redis.
:return: Future() |
def to_dict(self):
return {
"total": self.total,
"subtotal": self.subtotal,
"items": self.items,
"extra_amount": self.extra_amount
} | Attribute values to dict |
def setup():
l_mitogen = logging.getLogger('mitogen')
l_mitogen_io = logging.getLogger('mitogen.io')
l_ansible_mitogen = logging.getLogger('ansible_mitogen')
for logger in l_mitogen, l_mitogen_io, l_ansible_mitogen:
logger.handlers = [Handler(display.vvv)]
logger.propagate = False
if display.verbosity > 2:
l_ansible_mitogen.setLevel(logging.DEBUG)
l_mitogen.setLevel(logging.DEBUG)
else:
l_mitogen.setLevel(logging.ERROR)
l_ansible_mitogen.setLevel(logging.ERROR)
if display.verbosity > 3:
l_mitogen_io.setLevel(logging.DEBUG) | Install handlers for Mitogen loggers to redirect them into the Ansible
display framework. Ansible installs its own logging framework handlers when
C.DEFAULT_LOG_PATH is set, therefore disable propagation for our handlers. |
def _selectView( self ):
scene = self.uiGanttVIEW.scene()
scene.blockSignals(True)
scene.clearSelection()
for item in self.uiGanttTREE.selectedItems():
item.viewItem().setSelected(True)
scene.blockSignals(False)
curr_item = self.uiGanttTREE.currentItem()
vitem = curr_item.viewItem()
if vitem:
self.uiGanttVIEW.centerOn(vitem) | Matches the view selection to the trees selection. |
def append(self, pipeline):
for stage in pipeline.pipe:
self._pipe.append(stage)
return self | Append a pipeline to this pipeline.
:param pipeline: Pipeline to append.
:returns: This pipeline. |
def query(key, value=None, service=None, profile=None):
comps = key.split('?')
key = comps[0]
key_vars = {}
for pair in comps[1].split('&'):
pair_key, pair_val = pair.split('=')
key_vars[pair_key] = pair_val
renderer = __opts__.get('renderer', 'jinja|yaml')
rend = salt.loader.render(__opts__, {})
blacklist = __opts__.get('renderer_blacklist')
whitelist = __opts__.get('renderer_whitelist')
url = compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=profile[key]['url'],
**key_vars
)
extras = {}
for item in profile[key]:
if item not in ('backend', 'url'):
extras[item] = profile[key][item]
result = http.query(
url,
decode=True,
**extras
)
return result['dict'] | Get a value from the REST interface |
async def trigger_all(self, *args, **kwargs):
tasks = []
for a in self.get_agents(addr=False, include_manager=False):
task = asyncio.ensure_future(self.trigger_act
(*args, agent=a, **kwargs))
tasks.append(task)
rets = await asyncio.gather(*tasks)
return rets | Trigger all agents in the environment to act asynchronously.
:returns: A list of agents' :meth:`act` return values.
Given arguments and keyword arguments are passed down to each agent's
:meth:`creamas.core.agent.CreativeAgent.act`.
.. note::
By design, the environment's manager agent, i.e. if the environment
has :attr:`manager`, is excluded from acting. |
def _expand_json(self, j):
decompressed_json = copy.copy(j)
decompressed_json.pop('blob', None)
compressed_data = base64.b64decode(j['blob'])
original_json = zlib.decompress(compressed_data).decode('utf-8')
decompressed_json['users'] = json.loads(original_json)
return decompressed_json | Decompress the BLOB portion of the usernotes.
Arguments:
j: the JSON returned from the wiki page (dict)
Returns a Dict with the 'blob' key removed and a 'users' key added |
def serv(args):
if not args.no_debug:
tornado.autoreload.start()
extra = []
if sys.stdout.isatty():
sys.stdout.write('\x1b]2;rw: {}\x07'.format(' '.join(sys.argv[2:])))
if args.cfg:
extra.append(os.path.abspath(args.cfg))
listen = (int(args.port), args.address)
ioloop = tornado.ioloop.IOLoop.instance()
setup_app(app=args.MODULE, extra_configs=extra,
ioloop=ioloop, listen=listen)
ioloop.start() | Serve a rueckenwind application |
def subdivide(self):
r
nodes_a, nodes_b, nodes_c, nodes_d = _surface_helpers.subdivide_nodes(
self._nodes, self._degree
)
return (
Surface(nodes_a, self._degree, _copy=False),
Surface(nodes_b, self._degree, _copy=False),
Surface(nodes_c, self._degree, _copy=False),
Surface(nodes_d, self._degree, _copy=False),
) | r"""Split the surface into four sub-surfaces.
Does so by taking the unit triangle (i.e. the domain
of the surface) and splitting it into four sub-triangles
.. image:: ../../images/surface_subdivide1.png
:align: center
Then the surface is re-parameterized via the map to / from the
given sub-triangles and the unit triangle.
For example, when a degree two surface is subdivided:
.. image:: ../../images/surface_subdivide2.png
:align: center
.. doctest:: surface-subdivide
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [-1.0, 0.5, 2.0, 0.25, 2.0, 0.0],
... [ 0.0, 0.5, 0.0, 1.75, 3.0, 4.0],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> _, sub_surface_b, _, _ = surface.subdivide()
>>> sub_surface_b
<Surface (degree=2, dimension=2)>
>>> sub_surface_b.nodes
array([[ 1.5 , 0.6875, -0.125 , 1.1875, 0.4375, 0.5 ],
[ 2.5 , 2.3125, 1.875 , 1.3125, 1.3125, 0.25 ]])
.. testcleanup:: surface-subdivide
import make_images
make_images.surface_subdivide1()
make_images.surface_subdivide2(surface, sub_surface_b)
Returns:
Tuple[Surface, Surface, Surface, Surface]: The lower left, central,
lower right and upper left sub-surfaces (in that order). |
def ReadAllClientActionRequests(self, client_id):
res = []
for key, orig_request in iteritems(self.client_action_requests):
request_client_id, _, _ = key
if request_client_id != client_id:
continue
request = orig_request.Copy()
current_lease = self.client_action_request_leases.get(key)
request.ttl = db.Database.CLIENT_MESSAGES_TTL
if current_lease is not None:
request.leased_until, request.leased_by, leased_count = current_lease
request.ttl -= leased_count
else:
request.leased_until = None
request.leased_by = None
res.append(request)
return res | Reads all client action requests available for a given client_id. |
def search_range(self, value):
if value == 0 or not value % 16:
self._search_range = value
else:
raise InvalidSearchRangeError("Search range must be a multiple of "
"16.")
self._replace_bm() | Set private ``_search_range`` and reset ``_block_matcher``. |
def find(self, target, relation):
query = 'select ob1.code from objects as ob1, objects as ob2, relations where relations.dst=ob1.id and relations.name=? and relations.src=ob2.id and ob2.code=?'
for i in self._execute(query, (relation, self.serialize(target))):
yield self.deserialize(i[0]) | returns back all elements the target has a relation to |
def _water(cls, T, P):
water = IAPWS95(P=P, T=T)
prop = {}
prop["g"] = water.h-T*water.s
prop["gt"] = -water.s
prop["gp"] = 1./water.rho
prop["gtt"] = -water.cp/T
prop["gtp"] = water.betas*water.cp/T
prop["gpp"] = -1e6/(water.rho*water.w)**2-water.betas**2*1e3*water.cp/T
prop["gs"] = 0
prop["gsp"] = 0
prop["thcond"] = water.k
return prop | Get properties of pure water, Table4 pag 8 |
def Setup():
if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'cbv')):
os.makedirs(os.path.join(EVEREST_DAT, 'k2', 'cbv'))
GetK2Stars(clobber=False) | Called when the code is installed. Sets up directories and downloads
the K2 catalog. |
def associate_ipv6(self, id_equip, id_ipv6):
if not is_valid_int_param(id_equip):
raise InvalidParameterError(
u'The identifier of equipment is invalid or was not informed.')
if not is_valid_int_param(id_ipv6):
raise InvalidParameterError(
u'The identifier of ip is invalid or was not informed.')
url = 'ipv6/' + str(id_ipv6) + '/equipment/' + str(id_equip) + '/'
code, xml = self.submit(None, 'PUT', url)
return self.response(code, xml) | Associates an IPv6 to a equipament.
:param id_equip: Identifier of the equipment. Integer value and greater than zero.
:param id_ipv6: Identifier of the ip. Integer value and greater than zero.
:return: Dictionary with the following structure:
{'ip_equipamento': {'id': < id_ip_do_equipamento >}}
:raise EquipamentoNaoExisteError: Equipment is not registered.
:raise IpNaoExisteError: IP not registered.
:raise IpError: IP is already associated with the equipment.
:raise InvalidParameterError: Identifier of the equipment and/or IP is null or invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. |
def run_license_checker(config_path):
whitelist_licenses = _get_whitelist_licenses(config_path)
table = PrintTable(ROW_HEADERS)
warnings = []
for pkg in _get_packages():
allowed = pkg.license in whitelist_licenses
table.add_row((pkg.name, pkg.version, pkg.license, str(allowed)))
if not allowed:
warnings.append(pkg)
print(table)
print('{} RESTRICTED LICENSES DETECTED'.format(len(warnings))) | Generate table of installed packages and check for license
warnings based off user defined restricted license values.
:param config_path: str
:return: |
def create_milestones(self, project_id, milestones):
path = '/projects/%u/milestones/create' % project_id
req = ET.Element('request')
for milestone in milestones:
req.append(self._create_milestone_elem(*milestone))
return self._request(path, req) | With this function you can create multiple milestones in a single
request. See the "create" function for a description of the individual
fields in the milestone. |
def traverse_preorder(self, leaves=True, internal=True):
s = deque(); s.append(self)
while len(s) != 0:
n = s.pop()
if (leaves and n.is_leaf()) or (internal and not n.is_leaf()):
yield n
s.extend(n.children) | Perform a preorder traversal starting at this ``Node`` object
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` |
def get_histograms_in_list(filename: str, list_name: str = None) -> Dict[str, Any]:
hists: dict = {}
with RootOpen(filename = filename, mode = "READ") as fIn:
if list_name is not None:
hist_list = fIn.Get(list_name)
else:
hist_list = [obj.ReadObj() for obj in fIn.GetListOfKeys()]
if not hist_list:
fIn.ls()
fIn.Close()
raise ValueError(f"Could not find list with name \"{list_name}\". Possible names are listed above.")
for obj in hist_list:
_retrieve_object(hists, obj)
return hists | Get histograms from the file and make them available in a dict.
Lists are recursively explored, with all lists converted to dictionaries, such that the return
dictionaries which only contains hists and dictionaries of hists (ie there are no ROOT ``TCollection``
derived objects).
Args:
filename: Filename of the ROOT file containing the list.
list_name: Name of the list to retrieve.
Returns:
Contains hists with keys as their names. Lists are recursively added, mirroring
the structure under which the hists were stored.
Raises:
ValueError: If the list could not be found in the given file. |
def remove_existing_links(root_dir):
logger = logging.getLogger(__name__)
for name in os.listdir(root_dir):
full_name = os.path.join(root_dir, name)
if os.path.islink(full_name):
logger.debug('Deleting existing symlink {0}'.format(full_name))
os.remove(full_name) | Delete any symlinks present at the root of a directory.
Parameters
----------
root_dir : `str`
Directory that might contain symlinks.
Notes
-----
This function is used to remove any symlinks created by `link_directories`.
Running ``remove_existing_links`` at the beginning of a build ensures that
builds are isolated. For example, if a package is un-setup it won't
re-appear in the documentation because its symlink still exists. |
def shutdown(self):
self.socket.shutdown()
if scoop:
if scoop.DEBUG:
from scoop import _debug
_debug.writeWorkerDebug(
scoop._control.debug_stats,
scoop._control.QueueLength,
) | Shutdown the ressources used by the queue |
def remove_media(files):
for filename in files:
os.remove(os.path.join(settings.MEDIA_ROOT, filename)) | Delete file from media dir |
def _check_times(self, min_times, max_times, step):
kassert.is_int(min_times)
kassert.is_int(max_times)
kassert.is_int(step)
if not((min_times >= 0) and (max_times > 0) and (max_times >= min_times) and (step > 0)):
raise KittyException('one of the checks failed: min_times(%d)>=0, max_times(%d)>0, max_times>=min_times, step > 0' % (min_times, max_times)) | Make sure that the arguments are valid
:raises: KittyException if not valid |
def browse_dailydeviations(self):
response = self._req('/browse/dailydeviations')
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
return deviations | Retrieves Daily Deviations |
def timeseries_to_matrix( image, mask=None ):
temp = utils.ndimage_to_list( image )
if mask is None:
mask = temp[0]*0 + 1
return image_list_to_matrix( temp, mask ) | Convert a timeseries image into a matrix.
ANTsR function: `timeseries2matrix`
Arguments
---------
image : image whose slices we convert to a matrix. E.g. a 3D image of size
x by y by z will convert to a z by x*y sized matrix
mask : ANTsImage (optional)
image containing binary mask. voxels in the mask are placed in the matrix
Returns
-------
ndarray
array with a row for each image
shape = (N_IMAGES, N_VOXELS)
Example
-------
>>> import ants
>>> img = ants.make_image( (10,10,10,5 ) )
>>> mat = ants.timeseries_to_matrix( img ) |
def get_steps_branch_len(self, length):
return log(length/self.length, min(self.branches[0][0])) | Get, how much steps will needed for a given branch length.
Returns:
float: The age the tree must achieve to reach the given branch length. |
def remove_lock(self):
key = '%s_lock' % self.scheduler_key
if self._lock_acquired:
self.connection.delete(key) | Remove acquired lock. |
def getFailedJobIDs(self, extraLapse = TYPICAL_LAPSE):
scriptsRun = self.scriptsRun
failedJobTimestamps = []
nodata = []
for name, details in sorted(scriptsRun.iteritems()):
if details["lastSuccess"] and expectedScripts.get(name):
if not expectedScripts.check(name, details["lastSuccess"], extraLapse):
if details["lastRun"]:
failedJobTimestamps.append(details["lastRun"])
else:
nodata.append(name)
continue
else:
if details["lastRun"]:
failedJobTimestamps.append(details["lastRun"])
else:
nodata.append(name)
continue
if details["status"] & RETROSPECT_FAIL:
failedJobTimestamps.append(details["lastRun"])
elif details["status"] & RETROSPECT_WARNING:
failedJobTimestamps.append(details["lastRun"])
return failedJobTimestamps, nodata | Returns a list of which identify failed jobs in the scriptsRun table.
If a time stamp for a job can be found, we return this. The time stamp can be used to index the log.
If no time stamp was found, return the name of the script instead. |
def remove_stream(self, ssrc):
_srtp_assert(lib.srtp_remove_stream(self._srtp[0], htonl(ssrc))) | Remove the stream with the given `ssrc` from the SRTP session.
:param ssrc: :class:`int` |
def _ensure_array(self, key, value):
if key not in self._json_dict:
self._json_dict[key] = []
self._size += 2
self._ensure_field(key)
if len(self._json_dict[key]) > 0:
self._size += 2
if isinstance(value, str):
self._size += 2
self._size += len(str(value))
self._json_dict[key].append(value) | Ensure an array field |
def indent(lines, spaces=4):
if isinstance(lines, str):
text = [lines]
text = '\n'.join(lines)
return textwrap.indent(text, ' ' * spaces) | Indent `lines` by `spaces` spaces.
Parameters
----------
lines : Union[str, List[str]]
A string or list of strings to indent
spaces : int
The number of spaces to indent `lines`
Returns
-------
indented_lines : str |
def visit_Dict(self, node: ast.Dict) -> Dict[Any, Any]:
recomputed_dict = dict()
for key, val in zip(node.keys, node.values):
recomputed_dict[self.visit(node=key)] = self.visit(node=val)
self.recomputed_values[node] = recomputed_dict
return recomputed_dict | Visit keys and values and assemble a dictionary with the results. |
def find_bucket(self, bucketing_id, parent_id, traffic_allocations):
bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id)
bucketing_number = self._generate_bucket_value(bucketing_key)
self.config.logger.debug('Assigned bucket %s to user with bucketing ID "%s".' % (
bucketing_number,
bucketing_id
))
for traffic_allocation in traffic_allocations:
current_end_of_range = traffic_allocation.get('endOfRange')
if bucketing_number < current_end_of_range:
return traffic_allocation.get('entityId')
return None | Determine entity based on bucket value and traffic allocations.
Args:
bucketing_id: ID to be used for bucketing the user.
parent_id: ID representing group or experiment.
traffic_allocations: Traffic allocations representing traffic allotted to experiments or variations.
Returns:
Entity ID which may represent experiment or variation. |
def get_graph_metadata(self, graph):
_params = set()
for tensor_vals in graph.initializer:
_params.add(tensor_vals.name)
input_data = []
for graph_input in graph.input:
if graph_input.name not in _params:
shape = [val.dim_value for val in graph_input.type.tensor_type.shape.dim]
input_data.append((graph_input.name, tuple(shape)))
output_data = []
for graph_out in graph.output:
shape = [val.dim_value for val in graph_out.type.tensor_type.shape.dim]
output_data.append((graph_out.name, tuple(shape)))
metadata = {'input_tensor_data' : input_data,
'output_tensor_data' : output_data
}
return metadata | Get the model metadata from a given onnx graph. |
def encode(self, name, as_map_key=False):
if name in self.key_to_value:
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name | Returns the name the first time and the key after that |
def commiter_factory(config: dict) -> BaseCommitizen:
name: str = config["name"]
try:
_cz = registry[name](config)
except KeyError:
msg_error = (
"The commiter has not been found in the system.\n\n"
f"Try running 'pip install {name}'\n"
)
out.error(msg_error)
raise SystemExit(NO_COMMITIZEN_FOUND)
else:
return _cz | Return the correct commitizen existing in the registry. |
def mcscanq(args):
p = OptionParser(mcscanq.__doc__)
p.add_option("--color", help="Add color highlight, used in plotting")
p.add_option("--invert", default=False, action="store_true",
help="Invert query and subject [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
qids, blocksfile = args
b = BlockFile(blocksfile)
fp = open(qids)
for gene in fp:
gene = gene.strip()
for line in b.query_gene(gene, color=opts.color, invert=opts.invert):
print(line) | %prog mcscanq query.ids blocksfile
Query multiple synteny blocks to get the closest alignment feature. Mostly
used for 'highlighting' the lines in the synteny plot, drawn by
graphics.karyotype and graphics.synteny. |
def score(package_path):
python_files = find_files(package_path, '*.py')
total_counter = Counter()
for python_file in python_files:
output = run_pylint(python_file)
counter = parse_pylint_output(output)
total_counter += counter
score_value = 0
for count, stat in enumerate(total_counter):
score_value += SCORING_VALUES[stat] * count
return score_value / 5 | Runs pylint on a package and returns a score
Lower score is better
:param package_path: path of the package to score
:return: number of score |
def deploy(remote, assets_to_s3):
header("Deploying...")
if assets_to_s3:
for mod in get_deploy_assets2s3_list(CWD):
_assets2s3(mod)
remote_name = remote or "ALL"
print("Pushing application's content to remote: %s " % remote_name)
hosts = get_deploy_hosts_list(CWD, remote or None)
git_push_to_master(cwd=CWD, hosts=hosts, name=remote_name)
print("Done!") | To DEPLOY your application |
def refresh_items(self):
items = []
if self.condition:
for nodes, key, f_locals in self.pattern_nodes:
with new_scope(key, f_locals):
for node in nodes:
child = node(None)
if isinstance(child, list):
items.extend(child)
else:
items.append(child)
for old in self.items:
if not old.is_destroyed:
old.destroy()
self.items = items | Refresh the items of the pattern.
This method destroys the old items and creates and initializes
the new items.
It is overridden to NOT insert the children to the parent. The Fragment
adapter handles this. |
def main():
args = parse_input()
args.lock = True
args.question = []
args.all = False
args.timeout = 0
args.verbose = False
args.interactive = False
try:
assign = assignment.load_assignment(args.config, args)
msgs = messages.Messages()
lock.protocol(args, assign).run(msgs)
except (ex.LoadingException, ex.SerializeException) as e:
log.warning('Assignment could not instantiate', exc_info=True)
print('Error: ' + str(e).strip())
exit(1)
except (KeyboardInterrupt, EOFError):
log.info('Quitting...')
else:
assign.dump_tests() | Run the LockingProtocol. |
def iter_languages(self):
default_lang = self.babel.default_locale.language
default_title = self.babel.default_locale.get_display_name(
default_lang)
yield (default_lang, default_title)
for l, title in current_app.config.get('I18N_LANGUAGES', []):
yield l, title | Iterate over list of languages. |
def to_result(self, iface_name, func_name, resp):
if resp.has_key("error"):
e = resp["error"]
data = None
if e.has_key("data"):
data = e["data"]
raise RpcException(e["code"], e["message"], data)
result = resp["result"]
if self.validate_resp:
self.contract.validate_response(iface_name, func_name, result)
return result | Takes a JSON-RPC response and checks for an "error" slot. If it exists,
a RpcException is raised. If no "error" slot exists, the "result" slot is
returned.
If validate_response==True on the Client constructor, the result is validated
against the expected return type for the function and a RpcException raised if it is
invalid.
:Parameters:
iface_name
Interface name that was called
func_name
Function that was called on the interface
resp
Dict formatted as a JSON-RPC response |
def _polarDecomposeInterpolationTransformation(matrix1, matrix2, value):
m1 = MathTransform(matrix1)
m2 = MathTransform(matrix2)
return tuple(m1.interpolate(m2, value)) | Interpolate using the MathTransform method. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.