code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def editproject(self, project_id, **kwargs):
"""
Edit an existing project.
:param name: new project name
:param path: custom repository name for new project. By default generated based on name
:param default_branch: they default branch
:param description: short project description
:param issues_enabled:
:param merge_requests_enabled:
:param wiki_enabled:
:param snippets_enabled:
:param public: if true same as setting visibility_level = 20
:param visibility_level:
:return:
"""
data = {"id": project_id}
if kwargs:
data.update(kwargs)
request = requests.put(
'{0}/{1}'.format(self.projects_url, project_id), headers=self.headers,
data=data, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 200:
return True
elif request.status_code == 400:
if "Your param's are invalid" in request.text:
print(request.text)
return False
else:
return False
|
Edit an existing project.
:param name: new project name
:param path: custom repository name for new project. By default generated based on name
:param default_branch: they default branch
:param description: short project description
:param issues_enabled:
:param merge_requests_enabled:
:param wiki_enabled:
:param snippets_enabled:
:param public: if true same as setting visibility_level = 20
:param visibility_level:
:return:
|
def stackexchange_request(self, path, callback, access_token=None,
post_args=None, **kwargs):
"""Make a request to the StackExchange API, passing in the path, a
callback, the access token, optional post arguments and keyword
arguments to be added as values in the request body or URI
"""
url = self._API_URL + path
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(kwargs)
if all_args:
url += "?" + auth.urllib_parse.urlencode(all_args)
callback = self.async_callback(self._on_stackexchange_request, callback)
http = self._get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST",
body=auth.urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
|
Make a request to the StackExchange API, passing in the path, a
callback, the access token, optional post arguments and keyword
arguments to be added as values in the request body or URI
|
def _compute_term2(self, C, mag, rrup):
"""
This computes the term f2 in equation 32, page 1021
"""
c78_factor = (C['c7'] * np.exp(C['c8'] * mag)) ** 2
R = np.sqrt(rrup ** 2 + c78_factor)
return C['c4'] * np.log(R) + (C['c5'] + C['c6'] * mag) * rrup
|
This computes the term f2 in equation 32, page 1021
|
def getSrcBlockParents(self, url, block):
"""
List block at src DBS
"""
#blockname = block.replace("#", urllib.quote_plus('#'))
#resturl = "%s/blockparents?block_name=%s" % (url, blockname)
params={'block_name':block}
return cjson.decode(self.callDBSService(url, 'blockparents', params, {}))
|
List block at src DBS
|
def _converged(self):
"""Check convergence based on maximum absolute difference
Returns
-------
converged : boolean
Whether the parameter estimation converged.
max_diff : float
Maximum absolute difference between prior and posterior.
"""
diff = self.local_prior - self.local_posterior_
max_diff = np.max(np.fabs(diff))
if self.verbose:
_, mse = self._mse_converged()
diff_ratio = np.sum(diff ** 2) / np.sum(self.local_posterior_ ** 2)
logger.info(
'tfa prior posterior max diff %f mse %f diff_ratio %f' %
((max_diff, mse, diff_ratio)))
if max_diff > self.threshold:
return False, max_diff
else:
return True, max_diff
|
Check convergence based on maximum absolute difference
Returns
-------
converged : boolean
Whether the parameter estimation converged.
max_diff : float
Maximum absolute difference between prior and posterior.
|
def killall(self, everywhere=False):
"""Kills all nailgun servers started by pants.
:param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;
otherwise restricts the nailguns killed to those started for the
current build root.
"""
with self._NAILGUN_KILL_LOCK:
for proc in self._iter_nailgun_instances(everywhere):
logger.info('killing nailgun server pid={pid}'.format(pid=proc.pid))
proc.terminate()
|
Kills all nailgun servers started by pants.
:param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;
otherwise restricts the nailguns killed to those started for the
current build root.
|
def total_length(self):
"""Returns the total length of the captions."""
if not self._captions:
return 0
return int(self._captions[-1].end_in_seconds) - int(self._captions[0].start_in_seconds)
|
Returns the total length of the captions.
|
def _render_condition(field, field_type, comparators):
"""Render a single query condition.
Parameters
----------
field : str
The field the condition applies to
field_type : str
The data type of the field.
comparators : array_like
An iterable of logic operators to use.
Returns
-------
str
a condition string.
"""
field_type = field_type.upper()
negated_conditions, normal_conditions = [], []
for comparator in comparators:
condition = comparator.get("condition").upper()
negated = "NOT " if comparator.get("negate") else ""
value = comparator.get("value")
if condition == "IN":
if isinstance(value, (list, tuple, set)):
value = ', '.join(
sorted([_render_condition_value(v, field_type)
for v in value])
)
else:
value = _render_condition_value(value, field_type)
value = "(" + value + ")"
elif condition == "IS NULL" or condition == "IS NOT NULL":
return field + " " + condition
elif condition == "BETWEEN":
if isinstance(value, (tuple, list, set)) and len(value) == 2:
value = ' AND '.join(
sorted([_render_condition_value(v, field_type)
for v in value])
)
elif isinstance(value, (tuple, list, set)) and len(value) != 2:
logger.warn('Invalid condition passed in: %s' % condition)
else:
value = _render_condition_value(value, field_type)
rendered_sub_condition = "%s%s %s %s" % (
negated, field, condition, value)
if comparator.get("negate"):
negated_conditions.append(rendered_sub_condition)
else:
normal_conditions.append(rendered_sub_condition)
rendered_normal = " AND ".join(normal_conditions)
rendered_negated = " AND ".join(negated_conditions)
if rendered_normal and rendered_negated:
return "((%s) AND (%s))" % (rendered_normal, rendered_negated)
return "(%s)" % (rendered_normal or rendered_negated)
|
Render a single query condition.
Parameters
----------
field : str
The field the condition applies to
field_type : str
The data type of the field.
comparators : array_like
An iterable of logic operators to use.
Returns
-------
str
a condition string.
|
def _identify_dict(core):
"""Specification for a dictionary."""
if not core:
return {}, 1, (), int
core = core.copy()
key = sorted(core.keys(), key=chaospy.poly.base.sort_key)[0]
shape = numpy.array(core[key]).shape
dtype = numpy.array(core[key]).dtype
dim = len(key)
return core, dim, shape, dtype
|
Specification for a dictionary.
|
def rotate_about(self, p, theta):
"""
Rotate counter-clockwise around a point, by theta degrees.
Positive y goes *up,* as in traditional mathematics.
The new position is returned as a new Point.
"""
result = self.clone()
result.translate(-p.x, -p.y)
result.rotate(theta)
result.translate(p.x, p.y)
return result
|
Rotate counter-clockwise around a point, by theta degrees.
Positive y goes *up,* as in traditional mathematics.
The new position is returned as a new Point.
|
def delete(self, filename):
"""Delete a file from the repository.
This method will not delete a script from a migrated JSS.
Please remove migrated scripts with jss.Script.delete.
Args:
filename: String filename only (i.e. no path) of file to
delete. Will handle deleting scripts vs. packages
automatically.
"""
folder = "Packages" if is_package(filename) else "Scripts"
path = os.path.join(self.connection["mount_point"], folder, filename)
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.remove(path)
|
Delete a file from the repository.
This method will not delete a script from a migrated JSS.
Please remove migrated scripts with jss.Script.delete.
Args:
filename: String filename only (i.e. no path) of file to
delete. Will handle deleting scripts vs. packages
automatically.
|
def set_granularity(self, level):
"""
Set the values for
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.MFCC_WINDOW_LENGTH`
and
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.MFCC_WINDOW_SHIFT`
matching the given granularity level.
Currently supported levels:
* ``1`` (paragraph)
* ``2`` (sentence)
* ``3`` (word)
:param int level: the desired granularity level
"""
if level in self.MFCC_GRANULARITY_MAP.keys():
margin_key, mask_key, length_key, shift_key = self.MFCC_GRANULARITY_MAP[level]
self[self.DTW_MARGIN] = self[margin_key]
self[self.MFCC_MASK_NONSPEECH] = self[mask_key]
self[self.MFCC_WINDOW_LENGTH] = self[length_key]
self[self.MFCC_WINDOW_SHIFT] = self[shift_key]
|
Set the values for
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.MFCC_WINDOW_LENGTH`
and
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.MFCC_WINDOW_SHIFT`
matching the given granularity level.
Currently supported levels:
* ``1`` (paragraph)
* ``2`` (sentence)
* ``3`` (word)
:param int level: the desired granularity level
|
def skip_incremental(self, *criteria):
"""Perform an incremental check on a set of criteria.
This can be used to execute a part of a crawler only once per an
interval (which is specified by the ``expire`` setting). If the
operation has already been performed (and should thus be skipped),
this will return ``True``. If the operation needs to be executed,
the returned value will be ``False``.
"""
if not self.incremental:
return False
# this is pure convenience, and will probably backfire at some point.
key = make_key(*criteria)
if key is None:
return False
if self.check_tag(key):
return True
self.set_tag(key, None)
return False
|
Perform an incremental check on a set of criteria.
This can be used to execute a part of a crawler only once per an
interval (which is specified by the ``expire`` setting). If the
operation has already been performed (and should thus be skipped),
this will return ``True``. If the operation needs to be executed,
the returned value will be ``False``.
|
def handleError(self, error_code, message):
'''Log and set the controller state.'''
self._fail = True
self.reason = error_code
self._error = message
|
Log and set the controller state.
|
def registry_storage(cls):
""" Get registry storage
:return: WTaskRegistryBase
"""
if cls.__registry_storage__ is None:
raise ValueError('__registry_storage__ must be defined')
if isinstance(cls.__registry_storage__, WTaskRegistryBase) is False:
raise TypeError("Property '__registry_storage__' is invalid (must derived from WTaskRegistryBase)")
return cls.__registry_storage__
|
Get registry storage
:return: WTaskRegistryBase
|
def delete(self):
"""Delete this object from the One Codex server."""
check_bind(self)
if self.id is None:
raise ServerError("{} object does not exist yet".format(self.__class__.name))
elif not self.__class__._has_schema_method("destroy"):
raise MethodNotSupported("{} do not support deletion.".format(self.__class__.__name__))
try:
self._resource.delete()
except HTTPError as e:
if e.response.status_code == 403:
raise PermissionDenied("") # FIXME: is this right?
else:
raise e
|
Delete this object from the One Codex server.
|
def dimension_set(self, p_dim, s_dim=None, dimensions=None, extant=set()):
"""
Return a dict that describes the combination of one or two dimensions, for a plot
:param p_dim:
:param s_dim:
:param dimensions:
:param extant:
:return:
"""
if not dimensions:
dimensions = self.primary_dimensions
key = p_dim.name
if s_dim:
key += '/' + s_dim.name
# Ignore if the key already exists or the primary and secondary dims are the same
if key in extant or p_dim == s_dim:
return
# Don't allow geography to be a secondary dimension. It must either be a primary dimension
# ( to make a map ) or a filter, or a small-multiple
if s_dim and s_dim.valuetype_class.is_geo():
return
extant.add(key)
filtered = {}
for d in dimensions:
if d != p_dim and d != s_dim:
filtered[d.name] = d.pstats.uvalues.keys()
if p_dim.valuetype_class.is_time():
value_type = 'time'
chart_type = 'line'
elif p_dim.valuetype_class.is_geo():
value_type = 'geo'
chart_type = 'map'
else:
value_type = 'general'
chart_type = 'bar'
return dict(
key=key,
p_dim=p_dim.name,
p_dim_type=value_type,
p_label=p_dim.label_or_self.name,
s_dim=s_dim.name if s_dim else None,
s_label=s_dim.label_or_self.name if s_dim else None,
filters=filtered,
chart_type=chart_type
)
|
Return a dict that describes the combination of one or two dimensions, for a plot
:param p_dim:
:param s_dim:
:param dimensions:
:param extant:
:return:
|
def output_format_lock(self, packages, **kwargs):
""" Text to lock file """
self._output_config['type'] = PLAIN
text = ''
tmp_packages = OrderedDict()
columns = self._config.get_columns()
widths = {}
for _pkg in packages.values():
_pkg_name = _pkg.package_name
_params = _pkg.get_params(columns, merged=True, raw=False)
if _pkg_name not in tmp_packages:
tmp_packages[_pkg_name] = _params
comment = 1
for _col in columns:
widths[_col] = max(widths.get(_col, len(_col)), len(str(_params.get(_col, '')))) + comment
comment = 0
comment = 1
for _col in columns:
text += '{}{} '.format(_col, ' ' * (widths[_col] - len(_col) - comment))
comment = 0
text = '#{}\n'.format(text.strip())
for _pkg_name in sorted(tmp_packages, key=lambda x: str(x).lower()):
_pkg = tmp_packages[_pkg_name]
line = ''
for _col in columns:
line += '{}{} '.format(_pkg[_col], ' ' * (widths[_col] - len(str(_pkg[_col]))))
text += '{}\n'.format(line.strip())
return text
|
Text to lock file
|
def print_obj(arg, frame, format=None, short=False):
"""Return a string representation of an object """
try:
if not frame:
# ?? Should we have set up a dummy globals
# to have persistence?
obj = eval(arg, None, None)
else:
obj = eval(arg, frame.f_globals, frame.f_locals)
pass
except:
return 'No symbol "' + arg + '" in current context.'
# format and print
what = arg
if format:
what = format + ' ' + arg
obj = printf(obj, format)
s = '%s = %s' % (what, obj)
if not short:
s += '\ntype = %s' % type(obj)
if callable(obj):
argspec = print_argspec(obj, arg)
if argspec:
s += ':\n\t'
if inspect.isclass(obj):
s += 'Class constructor information:\n\t'
obj = obj.__init__
elif isinstance(obj, types.InstanceType):
obj = obj.__call__
pass
s+= argspec
pass
# Try to list the members of a class.
# Not sure if this is correct or the
# best way to do.
s = print_dict(s, obj, "object variables")
if hasattr(obj, "__class__"):
s = print_dict(s, obj.__class__, "class variables")
pass
return s
|
Return a string representation of an object
|
def window_flattop(N, mode='symmetric',precision=None):
r"""Flat-top tapering window
Returns symmetric or periodic flat top window.
:param N: window length
:param mode: way the data are normalised. If mode is *symmetric*, then
divide n by N-1. IF mode is *periodic*, divide by N,
to be consistent with octave code.
When using windows for filter design, the *symmetric* mode
should be used (default). When using windows for spectral analysis, the *periodic*
mode should be used. The mathematical form of the flat-top window in the symmetric
case is:
.. math:: w(n) = a_0
- a_1 \cos\left(\frac{2\pi n}{N-1}\right)
+ a_2 \cos\left(\frac{4\pi n}{N-1}\right)
- a_3 \cos\left(\frac{6\pi n}{N-1}\right)
+ a_4 \cos\left(\frac{8\pi n}{N-1}\right)
===== =============
coeff value
===== =============
a0 0.21557895
a1 0.41663158
a2 0.277263158
a3 0.083578947
a4 0.006947368
===== =============
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'bohman')
.. seealso:: :func:`create_window`, :class:`Window`
"""
assert mode in ['periodic', 'symmetric']
t = arange(0, N)
# FIXME: N=1 for mode = periodic ?
if mode == 'periodic':
x = 2*pi*t/float(N)
else:
if N ==1:
return ones(1)
x = 2*pi*t/float(N-1)
a0 = 0.21557895
a1 = 0.41663158
a2 = 0.277263158
a3 = 0.083578947
a4 = 0.006947368
if precision == 'octave':
#to compare with octave, same as above but less precise
d = 4.6402
a0 = 1./d
a1 = 1.93/d
a2 = 1.29/d
a3 = 0.388/d
a4 = 0.0322/d
w = a0-a1*cos(x)+a2*cos(2*x)-a3*cos(3*x)+a4*cos(4*x)
return w
|
r"""Flat-top tapering window
Returns symmetric or periodic flat top window.
:param N: window length
:param mode: way the data are normalised. If mode is *symmetric*, then
divide n by N-1. IF mode is *periodic*, divide by N,
to be consistent with octave code.
When using windows for filter design, the *symmetric* mode
should be used (default). When using windows for spectral analysis, the *periodic*
mode should be used. The mathematical form of the flat-top window in the symmetric
case is:
.. math:: w(n) = a_0
- a_1 \cos\left(\frac{2\pi n}{N-1}\right)
+ a_2 \cos\left(\frac{4\pi n}{N-1}\right)
- a_3 \cos\left(\frac{6\pi n}{N-1}\right)
+ a_4 \cos\left(\frac{8\pi n}{N-1}\right)
===== =============
coeff value
===== =============
a0 0.21557895
a1 0.41663158
a2 0.277263158
a3 0.083578947
a4 0.006947368
===== =============
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'bohman')
.. seealso:: :func:`create_window`, :class:`Window`
|
def salt_ssh(project, target, module, args=None, kwargs=None):
"""
Execute a `salt-ssh` command
"""
cmd = ['salt-ssh']
cmd.extend(generate_salt_cmd(target, module, args, kwargs))
cmd.append('--state-output=mixed')
cmd.append('--roster-file=%s' % project.roster_path)
cmd.append('--config-dir=%s' % project.salt_ssh_config_dir)
cmd.append('--ignore-host-keys')
cmd.append('--force-color')
cmd = ' '.join(cmd)
logger.debug('salt-ssh cmd: %s', cmd)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0 or err:
raise Exception(err)
return out + err
|
Execute a `salt-ssh` command
|
def build_dated_queryset(self):
"""
Build pages for all years in the queryset.
"""
qs = self.get_dated_queryset()
months = self.get_date_list(qs)
[self.build_month(dt) for dt in months]
|
Build pages for all years in the queryset.
|
def get_resampled_coordinates(lons, lats):
"""
Resample polygon line segments and return the coordinates of the new
vertices. This limits distortions when projecting a polygon onto a
spherical surface.
Parameters define longitudes and latitudes of a point collection in the
form of lists or numpy arrays.
:return:
A tuple of two numpy arrays: longitudes and latitudes
of resampled vertices.
"""
num_coords = len(lons)
assert num_coords == len(lats)
lons1 = numpy.array(lons)
lats1 = numpy.array(lats)
lons2 = numpy.concatenate((lons1[1:], lons1[:1]))
lats2 = numpy.concatenate((lats1[1:], lats1[:1]))
distances = geodetic.geodetic_distance(lons1, lats1, lons2, lats2)
resampled_lons = [lons[0]]
resampled_lats = [lats[0]]
for i in range(num_coords):
next_point = (i + 1) % num_coords
lon1, lat1 = lons[i], lats[i]
lon2, lat2 = lons[next_point], lats[next_point]
distance = distances[i]
num_points = int(distance / UPSAMPLING_STEP_KM) + 1
if num_points >= 2:
# We need to increase the resolution of this arc by adding new
# points.
new_lons, new_lats, _ = geodetic.npoints_between(
lon1, lat1, 0, lon2, lat2, 0, num_points)
resampled_lons.extend(new_lons[1:])
resampled_lats.extend(new_lats[1:])
else:
resampled_lons.append(lon2)
resampled_lats.append(lat2)
# NB: we cut off the last point because it repeats the first one
return numpy.array(resampled_lons[:-1]), numpy.array(resampled_lats[:-1])
|
Resample polygon line segments and return the coordinates of the new
vertices. This limits distortions when projecting a polygon onto a
spherical surface.
Parameters define longitudes and latitudes of a point collection in the
form of lists or numpy arrays.
:return:
A tuple of two numpy arrays: longitudes and latitudes
of resampled vertices.
|
def complete_use(self, text, *_):
""" Autocomplete for use """
return [t + " " for t in REGIONS if t.startswith(text)]
|
Autocomplete for use
|
def add_tags(self, md5, tags):
"""Add tags to this sample"""
if not tags: return
tag_set = set(self.get_tags(md5)) if self.get_tags(md5) else set()
if isinstance(tags, str):
tags = [tags]
for tag in tags:
tag_set.add(tag)
self.data_store.store_work_results({'tags': list(tag_set)}, 'tags', md5)
|
Add tags to this sample
|
def get_info(self, component):
""" Get the information about this component """
# Grab it, clean it and ship it
work_results = self._get_work_results('info', component)
return self.data_store.clean_for_serialization(work_results)
|
Get the information about this component
|
def _find_highest_supported_command(self, *segment_classes, **kwargs):
"""Search the BPD for the highest supported version of a segment."""
return_parameter_segment = kwargs.get("return_parameter_segment", False)
parameter_segment_name = "{}I{}S".format(segment_classes[0].TYPE[0], segment_classes[0].TYPE[2:])
version_map = dict((clazz.VERSION, clazz) for clazz in segment_classes)
max_version = self.bpd.find_segment_highest_version(parameter_segment_name, version_map.keys())
if not max_version:
raise FinTSUnsupportedOperation('No supported {} version found. I support {}, bank supports {}.'.format(
parameter_segment_name,
tuple(version_map.keys()),
tuple(v.header.version for v in self.bpd.find_segments(parameter_segment_name))
))
if return_parameter_segment:
return max_version, version_map.get(max_version.header.version)
else:
return version_map.get(max_version.header.version)
|
Search the BPD for the highest supported version of a segment.
|
def is_pointer(type_):
"""returns True, if type represents C++ pointer type, False otherwise"""
return does_match_definition(type_,
cpptypes.pointer_t,
(cpptypes.const_t, cpptypes.volatile_t)) \
or does_match_definition(type_,
cpptypes.pointer_t,
(cpptypes.volatile_t, cpptypes.const_t))
|
returns True, if type represents C++ pointer type, False otherwise
|
def normalize_url(url):
""" Normalize url
"""
if not url:
return url
matched = _windows_path_prefix.match(url)
if matched:
return path2url(url)
p = six.moves.urllib.parse.urlparse(url)
if p.scheme == '':
if p.netloc == '' and p.path != '':
# it should be a file path
url = path2url(os.path.abspath(url))
else:
raise ValueError('url should be a http-url or file path -- ' + url)
return url
|
Normalize url
|
def command(self):
"""Manually import an OFX into a nYNAB budget"""
print('pynYNAB OFX import')
args = self.parser.parse_args()
verify_common_args(args)
client = clientfromkwargs(**args)
delta = do_ofximport(args.file, client)
client.push(expected_delta=delta)
|
Manually import an OFX into a nYNAB budget
|
def get_manifests(self, repo_name, digest=None):
'''get_manifests calls get_manifest for each of the schema versions,
including v2 and v1. Version 1 includes image layers and metadata,
and version 2 must be parsed for a specific manifest, and the 2nd
call includes the layers. If a digest is not provided
latest is used.
Parameters
==========
repo_name: reference to the <username>/<repository>:<tag> to obtain
digest: a tag or shasum version
'''
if not hasattr(self, 'manifests'):
self.manifests = {}
# Obtain schema version 1 (metadata) and 2, and image config
schemaVersions = ['v1', 'v2', 'config']
for schemaVersion in schemaVersions:
manifest = self._get_manifest(repo_name, digest, schemaVersion)
if manifest is not None:
# If we don't have a config yet, try to get from version 2 manifest
if schemaVersion == "v2" and "config" in manifest:
bot.debug('Attempting to get config as blob in verison 2 manifest')
url = self._get_layerLink(repo_name, manifest['config']['digest'])
headers = {'Accept': manifest['config']['mediaType']}
self.manifests['config'] = self._get(url, headers=headers)
self.manifests[schemaVersion] = manifest
return self.manifests
|
get_manifests calls get_manifest for each of the schema versions,
including v2 and v1. Version 1 includes image layers and metadata,
and version 2 must be parsed for a specific manifest, and the 2nd
call includes the layers. If a digest is not provided
latest is used.
Parameters
==========
repo_name: reference to the <username>/<repository>:<tag> to obtain
digest: a tag or shasum version
|
def getShocks(self):
'''
Determine which agents switch from employment to unemployment. All unemployed agents remain
unemployed until death.
Parameters
----------
None
Returns
-------
None
'''
employed = self.eStateNow == 1.0
N = int(np.sum(employed))
newly_unemployed = drawBernoulli(N,p=self.UnempPrb,seed=self.RNG.randint(0,2**31-1))
self.eStateNow[employed] = 1.0 - newly_unemployed
|
Determine which agents switch from employment to unemployment. All unemployed agents remain
unemployed until death.
Parameters
----------
None
Returns
-------
None
|
def jsonrpc_map(self):
""" Map of json-rpc available calls.
:return str:
"""
result = "<h1>JSON-RPC map</h1><pre>{0}</pre>".format("\n\n".join([
"{0}: {1}".format(fname, f.__doc__)
for fname, f in self.dispatcher.items()
]))
return Response(result)
|
Map of json-rpc available calls.
:return str:
|
def view_dot_graph(graph, filename=None, view=False):
"""
View the given DOT source. If view is True, the image is rendered
and viewed by the default application in the system. The file path of
the output is returned. If view is False, a graphviz.Source object is
returned. If view is False and the environment is in a IPython session,
an IPython image object is returned and can be displayed inline in the
notebook.
This function requires the graphviz package.
Args
----
- graph [str]: a DOT source code
- filename [str]: optional. if given and view is True, this specifies
the file path for the rendered output to write to.
- view [bool]: if True, opens the rendered output file.
"""
# Optionally depends on graphviz package
import graphviz as gv
src = gv.Source(graph)
if view:
# Returns the output file path
return src.render(filename, view=view)
else:
# Attempts to show the graph in IPython notebook
try:
__IPYTHON__
except NameError:
return src
else:
import IPython.display as display
format = 'svg'
return display.SVG(data=src.pipe(format))
|
View the given DOT source. If view is True, the image is rendered
and viewed by the default application in the system. The file path of
the output is returned. If view is False, a graphviz.Source object is
returned. If view is False and the environment is in a IPython session,
an IPython image object is returned and can be displayed inline in the
notebook.
This function requires the graphviz package.
Args
----
- graph [str]: a DOT source code
- filename [str]: optional. if given and view is True, this specifies
the file path for the rendered output to write to.
- view [bool]: if True, opens the rendered output file.
|
def wait_for_port(host, port=22, timeout=900, gateway=None):
'''
Wait until a connection to the specified port can be made on a specified
host. This is usually port 22 (for SSH), but in the case of Windows
installations, it might be port 445 (for psexec). It may also be an
alternate port for SSH, depending on the base image.
'''
start = time.time()
# Assign test ports because if a gateway is defined
# we first want to test the gateway before the host.
test_ssh_host = host
test_ssh_port = port
if gateway:
ssh_gateway = gateway['ssh_gateway']
ssh_gateway_port = 22
if ':' in ssh_gateway:
ssh_gateway, ssh_gateway_port = ssh_gateway.split(':')
if 'ssh_gateway_port' in gateway:
ssh_gateway_port = gateway['ssh_gateway_port']
test_ssh_host = ssh_gateway
test_ssh_port = ssh_gateway_port
log.debug(
'Attempting connection to host %s on port %s '
'via gateway %s on port %s',
host, port, ssh_gateway, ssh_gateway_port
)
else:
log.debug('Attempting connection to host %s on port %s', host, port)
trycount = 0
while True:
trycount += 1
try:
if socket.inet_pton(socket.AF_INET6, host):
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(5)
sock.connect((test_ssh_host, int(test_ssh_port)))
# Stop any remaining reads/writes on the socket
sock.shutdown(socket.SHUT_RDWR)
# Close it!
sock.close()
break
except socket.error as exc:
log.debug('Caught exception in wait_for_port: %s', exc)
time.sleep(1)
if time.time() - start > timeout:
log.error('Port connection timed out: %s', timeout)
return False
log.debug(
'Retrying connection to %s %s on port %s (try %s)',
'gateway' if gateway else 'host', test_ssh_host, test_ssh_port, trycount
)
if not gateway:
return True
# Let the user know that his gateway is good!
log.debug('Gateway %s on port %s is reachable.', test_ssh_host, test_ssh_port)
# Now we need to test the host via the gateway.
# We will use netcat on the gateway to test the port
ssh_args = []
ssh_args.extend([
# Don't add new hosts to the host key database
'-oStrictHostKeyChecking=no',
# Set hosts key database path to /dev/null, i.e., non-existing
'-oUserKnownHostsFile=/dev/null',
# Don't re-use the SSH connection. Less failures.
'-oControlPath=none'
])
# There should never be both a password and an ssh key passed in, so
if 'ssh_gateway_key' in gateway:
ssh_args.extend([
# tell SSH to skip password authentication
'-oPasswordAuthentication=no',
'-oChallengeResponseAuthentication=no',
# Make sure public key authentication is enabled
'-oPubkeyAuthentication=yes',
# do only use the provided identity file
'-oIdentitiesOnly=yes',
# No Keyboard interaction!
'-oKbdInteractiveAuthentication=no',
# Also, specify the location of the key file
'-i {0}'.format(gateway['ssh_gateway_key'])
])
# Netcat command testing remote port
command = 'nc -z -w5 -q0 {0} {1}'.format(host, port)
# SSH command
pcmd = 'ssh {0} {1}@{2} -p {3} {4}'.format(
' '.join(ssh_args), gateway['ssh_gateway_user'], ssh_gateway,
ssh_gateway_port, pipes.quote('date')
)
cmd = 'ssh {0} {1}@{2} -p {3} {4}'.format(
' '.join(ssh_args), gateway['ssh_gateway_user'], ssh_gateway,
ssh_gateway_port, pipes.quote(command)
)
log.debug('SSH command: \'%s\'', cmd)
kwargs = {'display_ssh_output': False,
'password': gateway.get('ssh_gateway_password', None)}
trycount = 0
usable_gateway = False
gateway_retries = 5
while True:
trycount += 1
# test gateway usage
if not usable_gateway:
pstatus = _exec_ssh_cmd(pcmd, allow_failure=True, **kwargs)
if pstatus == 0:
usable_gateway = True
else:
gateway_retries -= 1
log.error(
'Gateway usage seems to be broken, '
'password error ? Tries left: %s', gateway_retries)
if not gateway_retries:
raise SaltCloudExecutionFailure(
'SSH gateway is reachable but we can not login')
# then try to reach out the target
if usable_gateway:
status = _exec_ssh_cmd(cmd, allow_failure=True, **kwargs)
# Get the exit code of the SSH command.
# If 0 then the port is open.
if status == 0:
return True
time.sleep(1)
if time.time() - start > timeout:
log.error('Port connection timed out: %s', timeout)
return False
log.debug(
'Retrying connection to host %s on port %s '
'via gateway %s on port %s. (try %s)',
host, port, ssh_gateway, ssh_gateway_port, trycount
)
|
Wait until a connection to the specified port can be made on a specified
host. This is usually port 22 (for SSH), but in the case of Windows
installations, it might be port 445 (for psexec). It may also be an
alternate port for SSH, depending on the base image.
|
def apply_rcparams(self):
"""
Set the rcParams
"""
from matplotlib import rcParams
for key, val in self.rcParams.items():
try:
rcParams[key] = val
except Exception as e:
msg = ("""Setting "mpl.rcParams['{}']={}" """
"raised an Exception: {}")
raise PlotnineError(msg.format(key, val, e))
|
Set the rcParams
|
def _compute_static_prob(tri, com):
"""
For an object with the given center of mass, compute
the probability that the given tri would be the first to hit the
ground if the object were dropped with a pose chosen uniformly at random.
Parameters
----------
tri: (3,3) float, the vertices of a triangle
cm: (3,) float, the center of mass of the object
Returns
-------
prob: float, the probability in [0,1] for the given triangle
"""
sv = [(v - com) / np.linalg.norm(v - com) for v in tri]
# Use L'Huilier's Formula to compute spherical area
a = np.arccos(min(1, max(-1, np.dot(sv[0], sv[1]))))
b = np.arccos(min(1, max(-1, np.dot(sv[1], sv[2]))))
c = np.arccos(min(1, max(-1, np.dot(sv[2], sv[0]))))
s = (a + b + c) / 2.0
# Prevents weirdness with arctan
try:
return 1.0 / np.pi * np.arctan(np.sqrt(np.tan(s / 2) * np.tan(
(s - a) / 2) * np.tan((s - b) / 2) * np.tan((s - c) / 2)))
except BaseException:
s = s + 1e-8
return 1.0 / np.pi * np.arctan(np.sqrt(np.tan(s / 2) * np.tan(
(s - a) / 2) * np.tan((s - b) / 2) * np.tan((s - c) / 2)))
|
For an object with the given center of mass, compute
the probability that the given tri would be the first to hit the
ground if the object were dropped with a pose chosen uniformly at random.
Parameters
----------
tri: (3,3) float, the vertices of a triangle
cm: (3,) float, the center of mass of the object
Returns
-------
prob: float, the probability in [0,1] for the given triangle
|
def for_category(self, category, context=None):
"""Returns actions list for this category in current application.
Actions are filtered according to :meth:`.Action.available`.
if `context` is None, then current action context is used
(:attr:`context`)
"""
assert self.installed(), "Actions not enabled on this application"
actions = self._state["categories"].get(category, [])
if context is None:
context = self.context
return [a for a in actions if a.available(context)]
|
Returns actions list for this category in current application.
Actions are filtered according to :meth:`.Action.available`.
if `context` is None, then current action context is used
(:attr:`context`)
|
def browse_stations_categories(self):
"""Get the categories from Browse Stations.
Returns:
list: Station categories that can contain subcategories.
"""
response = self._call(
mc_calls.BrowseStationCategories
)
station_categories = response.body.get('root', {}).get('subcategories', [])
return station_categories
|
Get the categories from Browse Stations.
Returns:
list: Station categories that can contain subcategories.
|
def define_options(self, names, parser_options=None):
"""Given a list of option names, this returns a list of dicts
defined in all_options and self.shared_options. These can then
be used to populate the argparser with"""
def copy_option(options, name):
return {k: v for k, v in options[name].items()}
if parser_options is None:
parser_options = {}
options = {}
for name in names:
try:
option = copy_option(parser_options, name)
except KeyError:
option = copy_option(shared_options, name)
try:
options.update({option['clarg']: option})
except TypeError:
options.update({option['clarg'][0]: option})
return options
|
Given a list of option names, this returns a list of dicts
defined in all_options and self.shared_options. These can then
be used to populate the argparser with
|
def print_smart_tasks():
"""Print smart tasks as JSON"""
print("Printing information about smart tasks")
tasks = api(gateway.get_smart_tasks())
if len(tasks) == 0:
exit(bold("No smart tasks defined"))
container = []
for task in tasks:
container.append(api(task).task_control.raw)
print(jsonify(container))
|
Print smart tasks as JSON
|
def pretty_constants(self):
"""
the sequence of tuples (index, pretty type, value) of the constant
pool entries.
"""
for i in range(1, len(self.consts)):
t, v = self.pretty_const(i)
if t:
yield (i, t, v)
|
the sequence of tuples (index, pretty type, value) of the constant
pool entries.
|
def get_objective_requisite_assignment_session(self, *args, **kwargs):
"""Gets the session for managing objective requisites.
return: (osid.learning.ObjectiveRequisiteAssignmentSession) - an
ObjectiveRequisiteAssignmentSession
raise: OperationFailed - unable to complete request
raise: Unimplemented -
supports_objective_requisite_assignment() is false
compliance: optional - This method must be implemented if
supports_objective_requisite_assignment() is true.
"""
if not self.supports_objective_requisite_assignment():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
try:
session = sessions.ObjectiveRequisiteAssignmentSession(runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session
|
Gets the session for managing objective requisites.
return: (osid.learning.ObjectiveRequisiteAssignmentSession) - an
ObjectiveRequisiteAssignmentSession
raise: OperationFailed - unable to complete request
raise: Unimplemented -
supports_objective_requisite_assignment() is false
compliance: optional - This method must be implemented if
supports_objective_requisite_assignment() is true.
|
def check_file_path(self, path):
"""
Ensure file exists at the provided path
:type path: string
:param path: path to directory to check
"""
if os.path.exists(path) is not True:
msg = "File Not Found {}".format(path)
raise OSError(msg)
|
Ensure file exists at the provided path
:type path: string
:param path: path to directory to check
|
def double_prompt_for_plaintext_password():
"""Get the desired password from the user through a double prompt."""
password = 1
password_repeat = 2
while password != password_repeat:
password = getpass.getpass('Enter password: ')
password_repeat = getpass.getpass('Repeat password: ')
if password != password_repeat:
sys.stderr.write('Passwords do not match, try again.\n')
return password
|
Get the desired password from the user through a double prompt.
|
def send_many(self, outputs_array, fee=None, change_addr=None, id=None, endpoint=None):
"""
Args:
outputs_array: (dict) array, the data structure of each element in the array is as follows:
{"asset": <asset>,"value": <value>,"address": <address>}
asset: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7')
value: (int/decimal) transfer amount
address: (str) destination address
fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001.
change_addr: (str, optional) Change address, default is the first standard address in the wallet.
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
"""
params = [outputs_array]
if fee:
params.append(fee)
if fee and change_addr:
params.append(change_addr)
elif not fee and change_addr:
params.append(0)
params.append(change_addr)
return self._call_endpoint(SEND_MANY, params=params, id=id, endpoint=endpoint)
|
Args:
outputs_array: (dict) array, the data structure of each element in the array is as follows:
{"asset": <asset>,"value": <value>,"address": <address>}
asset: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7')
value: (int/decimal) transfer amount
address: (str) destination address
fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001.
change_addr: (str, optional) Change address, default is the first standard address in the wallet.
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
|
def _validate_currency(self, currency):
"""Check if the given order book is valid.
:param currency: Major currency name in lowercase.
:type currency: str | unicode
:raise InvalidCurrencyError: If an invalid major currency is given.
"""
if currency not in self.major_currencies:
raise InvalidCurrencyError(
'Invalid major currency \'{}\'. Choose from {}.'
.format(currency, tuple(self.major_currencies))
)
|
Check if the given order book is valid.
:param currency: Major currency name in lowercase.
:type currency: str | unicode
:raise InvalidCurrencyError: If an invalid major currency is given.
|
def dumps(value,encoding=None):
"""dumps(object,encoding=None) -> string
This function dumps a python object as a tnetstring.
"""
# This uses a deque to collect output fragments in reverse order,
# then joins them together at the end. It's measurably faster
# than creating all the intermediate strings.
# If you're reading this to get a handle on the tnetstring format,
# consider the _gdumps() function instead; it's a standard top-down
# generator that's simpler to understand but much less efficient.
q = deque()
_rdumpq(q,0,value,encoding)
return "".join(q)
|
dumps(object,encoding=None) -> string
This function dumps a python object as a tnetstring.
|
def cancel(self, CorpNum, ItemCode, MgtKey, Memo=None, UserID=None):
""" 발행취소
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Memo : 처리메모
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
postData = ''
if Memo != None and Memo != '':
postData = self._stringtify({"memo": Memo})
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "CANCEL")
|
발행취소
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Memo : 처리메모
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
|
def gcp_fixed_k(V,E,K):
"""gcp_fixed_k -- model for minimizing number of bad edges in coloring a graph
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
- K: number of colors to be used
Returns a model, ready to be solved.
"""
model = Model("gcp - fixed k")
x,z = {},{}
for i in V:
for k in range(K):
x[i,k] = model.addVar(vtype="B", name="x(%s,%s)"%(i,k))
for (i,j) in E:
z[i,j] = model.addVar(vtype="B", name="z(%s,%s)"%(i,j))
for i in V:
model.addCons(quicksum(x[i,k] for k in range(K)) == 1, "AssignColor(%s)" % i)
for (i,j) in E:
for k in range(K):
model.addCons(x[i,k] + x[j,k] <= 1 + z[i,j], "BadEdge(%s,%s,%s)"%(i,j,k))
model.setObjective(quicksum(z[i,j] for (i,j) in E), "minimize")
model.data = x,z
return model
|
gcp_fixed_k -- model for minimizing number of bad edges in coloring a graph
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
- K: number of colors to be used
Returns a model, ready to be solved.
|
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
|
Gets current repository info from git
|
def security_rule_present(name, access, direction, priority, protocol, security_group, resource_group,
destination_address_prefix=None, destination_port_range=None, source_address_prefix=None,
source_port_range=None, description=None, destination_address_prefixes=None,
destination_port_ranges=None, source_address_prefixes=None, source_port_ranges=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a security rule exists.
:param name:
Name of the security rule.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param security_group:
The name of the existing network security group to contain the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param description:
Optional description of the security rule.
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure security rule exists:
azurearm_network.security_rule_present:
- name: nsg1_rule2
- security_group: nsg1
- resource_group: group1
- priority: 101
- protocol: tcp
- access: allow
- direction: inbound
- source_address_prefix: internet
- destination_address_prefix: virtualnetwork
- source_port_range: '*'
- destination_port_ranges:
- '80'
- '443'
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure network security group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
exclusive_params = [
('source_port_ranges', 'source_port_range'),
('source_address_prefixes', 'source_address_prefix'),
('destination_port_ranges', 'destination_port_range'),
('destination_address_prefixes', 'destination_address_prefix'),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
ret['comment'] = 'Either the {0} or {1} parameter must be provided!'.format(params[0], params[1])
return ret
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=eval-used
if not isinstance(eval(params[0]), list):
ret['comment'] = 'The {0} parameter must be a list!'.format(params[0])
return ret
# pylint: disable=exec-used
exec('{0} = None'.format(params[1]))
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rule:
# access changes
if access.capitalize() != rule.get('access'):
ret['changes']['access'] = {
'old': rule.get('access'),
'new': access
}
# description changes
if description != rule.get('description'):
ret['changes']['description'] = {
'old': rule.get('description'),
'new': description
}
# direction changes
if direction.capitalize() != rule.get('direction'):
ret['changes']['direction'] = {
'old': rule.get('direction'),
'new': direction
}
# priority changes
if int(priority) != rule.get('priority'):
ret['changes']['priority'] = {
'old': rule.get('priority'),
'new': priority
}
# protocol changes
if protocol.lower() != rule.get('protocol', '').lower():
ret['changes']['protocol'] = {
'old': rule.get('protocol'),
'new': protocol
}
# destination_port_range changes
if destination_port_range != rule.get('destination_port_range'):
ret['changes']['destination_port_range'] = {
'old': rule.get('destination_port_range'),
'new': destination_port_range
}
# source_port_range changes
if source_port_range != rule.get('source_port_range'):
ret['changes']['source_port_range'] = {
'old': rule.get('source_port_range'),
'new': source_port_range
}
# destination_port_ranges changes
if sorted(destination_port_ranges or []) != sorted(rule.get('destination_port_ranges', [])):
ret['changes']['destination_port_ranges'] = {
'old': rule.get('destination_port_ranges'),
'new': destination_port_ranges
}
# source_port_ranges changes
if sorted(source_port_ranges or []) != sorted(rule.get('source_port_ranges', [])):
ret['changes']['source_port_ranges'] = {
'old': rule.get('source_port_ranges'),
'new': source_port_ranges
}
# destination_address_prefix changes
if (destination_address_prefix or '').lower() != rule.get('destination_address_prefix', '').lower():
ret['changes']['destination_address_prefix'] = {
'old': rule.get('destination_address_prefix'),
'new': destination_address_prefix
}
# source_address_prefix changes
if (source_address_prefix or '').lower() != rule.get('source_address_prefix', '').lower():
ret['changes']['source_address_prefix'] = {
'old': rule.get('source_address_prefix'),
'new': source_address_prefix
}
# destination_address_prefixes changes
if sorted(destination_address_prefixes or []) != sorted(rule.get('destination_address_prefixes', [])):
if len(destination_address_prefixes or []) != len(rule.get('destination_address_prefixes', [])):
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
else:
local_dst_addrs, remote_dst_addrs = (sorted(destination_address_prefixes),
sorted(rule.get('destination_address_prefixes')))
for idx in six_range(0, len(local_dst_addrs)):
if local_dst_addrs[idx].lower() != remote_dst_addrs[idx].lower():
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
break
# source_address_prefixes changes
if sorted(source_address_prefixes or []) != sorted(rule.get('source_address_prefixes', [])):
if len(source_address_prefixes or []) != len(rule.get('source_address_prefixes', [])):
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
else:
local_src_addrs, remote_src_addrs = (sorted(source_address_prefixes),
sorted(rule.get('source_address_prefixes')))
for idx in six_range(0, len(local_src_addrs)):
if local_src_addrs[idx].lower() != remote_src_addrs[idx].lower():
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
break
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Security rule {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Security rule {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'access': access,
'description': description,
'direction': direction,
'priority': priority,
'protocol': protocol,
'destination_address_prefix': destination_address_prefix,
'destination_address_prefixes': destination_address_prefixes,
'destination_port_range': destination_port_range,
'destination_port_ranges': destination_port_ranges,
'source_address_prefix': source_address_prefix,
'source_address_prefixes': source_address_prefixes,
'source_port_range': source_port_range,
'source_port_ranges': source_port_ranges,
}
}
if __opts__['test']:
ret['comment'] = 'Security rule {0} would be created.'.format(name)
ret['result'] = None
return ret
rule_kwargs = kwargs.copy()
rule_kwargs.update(connection_auth)
rule = __salt__['azurearm_network.security_rule_create_or_update'](
name=name,
access=access,
description=description,
direction=direction,
priority=priority,
protocol=protocol,
security_group=security_group,
resource_group=resource_group,
destination_address_prefix=destination_address_prefix,
destination_address_prefixes=destination_address_prefixes,
destination_port_range=destination_port_range,
destination_port_ranges=destination_port_ranges,
source_address_prefix=source_address_prefix,
source_address_prefixes=source_address_prefixes,
source_port_range=source_port_range,
source_port_ranges=source_port_ranges,
**rule_kwargs
)
if 'error' not in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create security rule {0}! ({1})'.format(name, rule.get('error'))
return ret
|
.. versionadded:: 2019.2.0
Ensure a security rule exists.
:param name:
Name of the security rule.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param security_group:
The name of the existing network security group to contain the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param description:
Optional description of the security rule.
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure security rule exists:
azurearm_network.security_rule_present:
- name: nsg1_rule2
- security_group: nsg1
- resource_group: group1
- priority: 101
- protocol: tcp
- access: allow
- direction: inbound
- source_address_prefix: internet
- destination_address_prefix: virtualnetwork
- source_port_range: '*'
- destination_port_ranges:
- '80'
- '443'
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure network security group exists
|
def bake(self):
"""
Bake a ``shell`` command so it's ready to execute and returns None.
:return: None
"""
command_list = self.command.split(' ')
command, args = command_list[0], command_list[1:]
self._sh_command = getattr(sh, command)
# Reconstruct command with remaining args.
self._sh_command = self._sh_command.bake(
args, _env=self.env, _out=LOG.out, _err=LOG.error)
|
Bake a ``shell`` command so it's ready to execute and returns None.
:return: None
|
def tagMap(self):
""""Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
ASN.1 tags to ASN.1 objects contained within callee.
"""
if self.tagSet:
return Set.tagMap.fget(self)
else:
return self.componentType.tagMapUnique
|
Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
ASN.1 tags to ASN.1 objects contained within callee.
|
def sync_via_get(self, owner, id, **kwargs):
"""
Sync files (via GET)
Update all files within a dataset that have originally been added via URL (e.g. via /datasets endpoints or on data.world). Check-out or tutorials for tips on how to add Google Sheets, GitHub and S3 files via URL and how to use webhooks or scripts to keep them always in sync.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.sync_via_get(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.sync_via_get_with_http_info(owner, id, **kwargs)
else:
(data) = self.sync_via_get_with_http_info(owner, id, **kwargs)
return data
|
Sync files (via GET)
Update all files within a dataset that have originally been added via URL (e.g. via /datasets endpoints or on data.world). Check-out or tutorials for tips on how to add Google Sheets, GitHub and S3 files via URL and how to use webhooks or scripts to keep them always in sync.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.sync_via_get(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
|
def close(self):
"""
Close the sockets
"""
self._socket.close()
if self._async_socket_cache:
self._async_socket_cache.close()
self._async_socket_cache = None
|
Close the sockets
|
def install(self):
"""
install the server
"""
try:
if self.args.server is not None:
server = ServerLists(self.server_type)
DynamicImporter(
'ezhost',
server.name,
args=self.args,
configure=self.configure
)
else:
ServerCommand(self.args)
except Exception as e:
raise e
|
install the server
|
def verify_message(address, message, signature):
"""
Verify message was signed by the address
:param address: signing address
:param message: message to check
:param signature: signature being tested
:return:
"""
bitcoin_message = BitcoinMessage(message)
verified = VerifyMessage(address, bitcoin_message, signature)
return verified
|
Verify message was signed by the address
:param address: signing address
:param message: message to check
:param signature: signature being tested
:return:
|
def top(self):
"""
The top-most row index in the vertical span of this cell.
"""
if self.vMerge is None or self.vMerge == ST_Merge.RESTART:
return self._tr_idx
return self._tc_above.top
|
The top-most row index in the vertical span of this cell.
|
def _register_handler(handler, file_formats):
"""Register a handler for some file extensions.
Args:
handler (:obj:`BaseFileHandler`): Handler to be registered.
file_formats (str or list[str]): File formats to be handled by this
handler.
"""
if not isinstance(handler, BaseFileHandler):
raise TypeError(
'handler must be a child of BaseFileHandler, not {}'.format(
type(handler)))
if isinstance(file_formats, str):
file_formats = [file_formats]
if not is_list_of(file_formats, str):
raise TypeError('file_formats must be a str or a list of str')
for ext in file_formats:
file_handlers[ext] = handler
|
Register a handler for some file extensions.
Args:
handler (:obj:`BaseFileHandler`): Handler to be registered.
file_formats (str or list[str]): File formats to be handled by this
handler.
|
def astat(args):
"""
%prog astat coverage.log
Create coverage-rho scatter plot.
"""
p = OptionParser(astat.__doc__)
p.add_option("--cutoff", default=1000, type="int",
help="Length cutoff [default: %default]")
p.add_option("--genome", default="",
help="Genome name [default: %default]")
p.add_option("--arrDist", default=False, action="store_true",
help="Use arrDist instead [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
covfile, = args
cutoff = opts.cutoff
genome = opts.genome
plot_arrDist = opts.arrDist
suffix = ".{0}".format(cutoff)
small_covfile = covfile + suffix
update_covfile = need_update(covfile, small_covfile)
if update_covfile:
fw = open(small_covfile, "w")
else:
logging.debug("Found `{0}`, will use this one".format(small_covfile))
covfile = small_covfile
fp = open(covfile)
header = next(fp)
if update_covfile:
fw.write(header)
data = []
msg = "{0} tigs scanned ..."
for row in fp:
tigID, rho, covStat, arrDist = row.split()
tigID = int(tigID)
if tigID % 1000000 == 0:
sys.stderr.write(msg.format(tigID) + "\r")
rho, covStat, arrDist = [float(x) for x in (rho, covStat, arrDist)]
if rho < cutoff:
continue
if update_covfile:
fw.write(row)
data.append((tigID, rho, covStat, arrDist))
print(msg.format(tigID), file=sys.stderr)
from jcvi.graphics.base import plt, savefig
logging.debug("Plotting {0} data points.".format(len(data)))
tigID, rho, covStat, arrDist = zip(*data)
y = arrDist if plot_arrDist else covStat
ytag = "arrDist" if plot_arrDist else "covStat"
fig = plt.figure(1, (7, 7))
ax = fig.add_axes([.12, .1, .8, .8])
ax.plot(rho, y, ".", color="lightslategrey")
xtag = "rho"
info = (genome, xtag, ytag)
title = "{0} {1} vs. {2}".format(*info)
ax.set_title(title)
ax.set_xlabel(xtag)
ax.set_ylabel(ytag)
if plot_arrDist:
ax.set_yscale('log')
imagename = "{0}.png".format(".".join(info))
savefig(imagename, dpi=150)
|
%prog astat coverage.log
Create coverage-rho scatter plot.
|
def fit(sim_mat, D_len, cidx):
"""
Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters.
D: numpy array - Symmetric distance matrix
k: int - number of clusters
"""
min_energy = np.inf
for j in range(3):
# select indices in each sample that maximizes its dimension
inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat]
cidx = []
energy = 0 # current enengy
for i in np.unique(inds):
indsi = np.where(inds == i)[0] # find indices for every cluster
minind, min_value = 0, 0
for index, idy in enumerate(indsi):
if idy in sim_mat:
# value = sum([sim_mat[idy].get(idx,0) for idx in indsi])
value = 0
for idx in indsi:
value += sim_mat[idy].get(idx, 0)
if value < min_value:
minind, min_value = index, value
energy += min_value
cidx.append(indsi[minind]) # new centers
if energy < min_energy:
min_energy, inds_min, cidx_min = energy, inds, cidx
return inds_min, cidx_min
|
Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters.
D: numpy array - Symmetric distance matrix
k: int - number of clusters
|
def set_span_from_ids(self, span_list):
"""
Sets the span for the term from list of ids
@type span_list: []
@param span_list: list of wf ids forming span
"""
this_span = Cspan()
this_span.create_from_ids(span_list)
self.node.append(this_span.get_node())
|
Sets the span for the term from list of ids
@type span_list: []
@param span_list: list of wf ids forming span
|
def apply_status_code(self, status_code):
"""
When a trace entity is generated under the http context,
the status code will affect this entity's fault/error/throttle flags.
Flip these flags based on status code.
"""
self._check_ended()
if not status_code:
return
if status_code >= 500:
self.add_fault_flag()
elif status_code == 429:
self.add_throttle_flag()
self.add_error_flag()
elif status_code >= 400:
self.add_error_flag()
|
When a trace entity is generated under the http context,
the status code will affect this entity's fault/error/throttle flags.
Flip these flags based on status code.
|
def get_system_device_models(auth, url):
"""Takes string no input to issue RESTUL call to HP IMC\n
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionaries where each dictionary represents a single device model
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> device_models = get_system_device_models(auth.creds, auth.url)
>>> assert type(device_models) is list
>>> assert 'virtualDeviceName' in device_models[0]
"""
get_system_device_model_url = '/imcrs/plat/res/model?start=0&size=10000&orderBy=id&desc=false&total=false'
f_url = url + get_system_device_model_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
system_device_model = (json.loads(r.text))
return system_device_model['deviceModel']
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_dev_details: An Error has occured"
|
Takes string no input to issue RESTUL call to HP IMC\n
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionaries where each dictionary represents a single device model
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> device_models = get_system_device_models(auth.creds, auth.url)
>>> assert type(device_models) is list
>>> assert 'virtualDeviceName' in device_models[0]
|
def parse_frequency(variant, info_key):
"""Parse any frequency from the info dict
Args:
variant(cyvcf2.Variant)
info_key(str)
Returns:
frequency(float): or None if frequency does not exist
"""
raw_annotation = variant.INFO.get(info_key)
raw_annotation = None if raw_annotation == '.' else raw_annotation
frequency = float(raw_annotation) if raw_annotation else None
return frequency
|
Parse any frequency from the info dict
Args:
variant(cyvcf2.Variant)
info_key(str)
Returns:
frequency(float): or None if frequency does not exist
|
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
|
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
|
def get_context_loop_positions(context):
"""
Return the paginated current position within a loop,
and the non-paginated position.
"""
try:
loop_counter = context['forloop']['counter']
except KeyError:
return 0, 0
try:
page = context['page_obj']
except KeyError:
return loop_counter, loop_counter
total_loop_counter = ((page.number - 1) * page.paginator.per_page +
loop_counter)
return total_loop_counter, loop_counter
|
Return the paginated current position within a loop,
and the non-paginated position.
|
def pwd_phasebin(phases, mags, binsize=0.002, minbin=9):
'''
This bins the phased mag series using the given binsize.
'''
bins = np.arange(0.0, 1.0, binsize)
binnedphaseinds = npdigitize(phases, bins)
binnedphases, binnedmags = [], []
for x in npunique(binnedphaseinds):
thisbin_inds = binnedphaseinds == x
thisbin_phases = phases[thisbin_inds]
thisbin_mags = mags[thisbin_inds]
if thisbin_inds.size > minbin:
binnedphases.append(npmedian(thisbin_phases))
binnedmags.append(npmedian(thisbin_mags))
return np.array(binnedphases), np.array(binnedmags)
|
This bins the phased mag series using the given binsize.
|
def sql(line, cell=None):
""" Create a SQL module with one or more queries. Use %sql --help for more details.
The supported syntax is:
%%sql [--module <modulename>]
[<optional Python code for default argument values>]
[<optional named queries>]
[<optional unnamed query>]
At least one query should be present. Named queries should start with:
DEFINE QUERY <name>
on a line by itself.
Args:
args: the optional arguments following '%%sql'.
cell: the contents of the cell; Python code for arguments followed by SQL queries.
"""
if cell is None:
_sql_parser.print_help()
else:
return handle_magic_line(line, cell, _sql_parser)
|
Create a SQL module with one or more queries. Use %sql --help for more details.
The supported syntax is:
%%sql [--module <modulename>]
[<optional Python code for default argument values>]
[<optional named queries>]
[<optional unnamed query>]
At least one query should be present. Named queries should start with:
DEFINE QUERY <name>
on a line by itself.
Args:
args: the optional arguments following '%%sql'.
cell: the contents of the cell; Python code for arguments followed by SQL queries.
|
def bind_arguments(func, args, kwargs):
"""Bind the arguments provided into a dict. When passed a function,
a tuple of arguments and a dict of keyword arguments `bind_arguments`
returns a dict of names as the function would see it. This can be useful
to implement a cache decorator that uses the function arguments to build
the cache key based on the values of the arguments.
:param func: the function the arguments should be bound for.
:param args: tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:return: a :class:`dict` of bound keyword arguments.
"""
(
args,
kwargs,
missing,
extra,
extra_positional,
arg_spec,
vararg_var,
kwarg_var,
) = _parse_signature(func)(args, kwargs)
values = {}
for (name, _has_default, _default), value in zip(arg_spec, args):
values[name] = value
if vararg_var is not None:
values[vararg_var] = tuple(extra_positional)
elif extra_positional:
raise TypeError("too many positional arguments")
if kwarg_var is not None:
multikw = set(extra) & set([x[0] for x in arg_spec])
if multikw:
raise TypeError(
"got multiple values for keyword argument " + repr(next(iter(multikw)))
)
values[kwarg_var] = extra
elif extra:
raise TypeError("got unexpected keyword argument " + repr(next(iter(extra))))
return values
|
Bind the arguments provided into a dict. When passed a function,
a tuple of arguments and a dict of keyword arguments `bind_arguments`
returns a dict of names as the function would see it. This can be useful
to implement a cache decorator that uses the function arguments to build
the cache key based on the values of the arguments.
:param func: the function the arguments should be bound for.
:param args: tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:return: a :class:`dict` of bound keyword arguments.
|
def compute_Pi_V(self, CDR3_seq, V_usage_mask):
"""Compute Pi_V.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V)*P(delV|V). This corresponds to V_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(V)*P(delV|V) into the correct form for
a Pi array or V_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V_{x_1} given the 'amino acid'.
Returns
-------
Pi_V : ndarray
(4, 3L) array corresponding to V_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
"""
#Note, the cutV_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at the conserved C
Pi_V = np.zeros((4, len(CDR3_seq)*3)) #Holds the aggregate weight for each nt possiblity and position
alignment_lengths = []
for V_in in V_usage_mask:
try:
cutV_gen_seg = self.cutV_genomic_CDR3_segs[V_in]
except IndexError:
print 'Check provided V usage mask. Contains indicies out of allowed range.'
continue
current_alignment_length = self.max_nt_to_aa_alignment_left(CDR3_seq, cutV_gen_seg)
alignment_lengths += [current_alignment_length]
current_Pi_V = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PVdelV_nt_pos_vec
current_Pi_V[:, :current_alignment_length] = self.PVdelV_nt_pos_vec[V_in][:, :current_alignment_length]
for pos in range(1, current_alignment_length, 3): #for middle nt use PVdelV_2nd_nt_pos_per_aa_vec
current_Pi_V[:, pos] = self.PVdelV_2nd_nt_pos_per_aa_vec[V_in][CDR3_seq[pos/3]][:, pos]
Pi_V[:, :current_alignment_length] += current_Pi_V[:, :current_alignment_length]
return Pi_V, max(alignment_lengths)
|
Compute Pi_V.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V)*P(delV|V). This corresponds to V_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(V)*P(delV|V) into the correct form for
a Pi array or V_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V_{x_1} given the 'amino acid'.
Returns
-------
Pi_V : ndarray
(4, 3L) array corresponding to V_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
|
def single_column_accuracy_comparison():
"""
Plot accuracy of the ideal observer (with and without locations) as the number
of sensations increases.
"""
pointRange = 1
numTrials = 10
args = []
resultsDir = os.path.dirname(os.path.realpath(__file__))
for t in range(numTrials):
for useLocation in [0, 1]:
args.append(
{"numObjects": 100,
"numLocations": 10,
"numFeatures": 10,
"numColumns": 1,
"trialNum": t,
"pointRange": pointRange,
"numPoints": 10,
"useLocation": useLocation
}
)
print "{} experiments to run, {} workers".format(len(args), cpu_count())
idealResultsFile = os.path.join(resultsDir, "ideal_model_result.pkl")
# Run all experiments and pickle results for later use
pool = Pool(processes=cpu_count())
resultsIdeal = pool.map(run_ideal_classifier, args)
with open(idealResultsFile, "wb") as f:
cPickle.dump(resultsIdeal, f)
# run HTM network
columnRange = [1]
objectRange = [100]
numAmbiguousLocationsRange = [0]
htmResultsFile = os.path.join(resultsDir, "single_column_convergence_results.pkl")
runExperimentPool(
numObjects=objectRange,
numLocations=[10],
numFeatures=[10],
numColumns=columnRange,
numPoints=10,
settlingTime=1,
nTrials=numTrials,
numWorkers=cpu_count(),
ambiguousLocationsRange=numAmbiguousLocationsRange,
resultsName=htmResultsFile)
# Read results from pickle files
with open(idealResultsFile, "rb") as f:
resultsIdeal = cPickle.load(f)
with open(htmResultsFile, "rb") as f:
resultsModel = cPickle.load(f)
# plot accuracy across sensations
accuracyIdeal = 0
accuracyBOF = 0
for r in resultsIdeal:
if r["useLocation"]:
accuracyIdeal += np.array(r['accuracy'])
else:
accuracyBOF += np.array(r['accuracy'])
accuracyIdeal /= len(resultsIdeal) / 2
accuracyBOF /= len(resultsIdeal) / 2
numTouches = len(accuracyIdeal)
accuracyModel = 0
for r in resultsModel:
accuracyModel += np.array(r['classificationPerSensation'])
accuracyModel /= len(resultsModel)
plt.figure()
plt.plot(np.arange(numTouches)+1, accuracyIdeal, '-o', label='Ideal observer (with location')
plt.plot(np.arange(numTouches) + 1, accuracyBOF, '-s', label='Ideal observer (no location)')
plt.plot(np.arange(numTouches)+1, accuracyModel, '-^', label='Sensorimotor network')
plt.xlabel("Number of sensations")
plt.ylabel("Accuracy")
plt.legend()
plt.savefig('plots/ideal_observer_comparison_single_column.pdf')
|
Plot accuracy of the ideal observer (with and without locations) as the number
of sensations increases.
|
def send_with_options(self, *, args=None, kwargs=None, delay=None, **options):
"""Asynchronously send a message to this actor, along with an
arbitrary set of processing options for the broker and
middleware.
Parameters:
args(tuple): Positional arguments that are passed to the actor.
kwargs(dict): Keyword arguments that are passed to the actor.
delay(int): The minimum amount of time, in milliseconds, the
message should be delayed by.
**options(dict): Arbitrary options that are passed to the
broker and any registered middleware.
Returns:
Message: The enqueued message.
"""
message = self.message_with_options(args=args, kwargs=kwargs, **options)
return self.broker.enqueue(message, delay=delay)
|
Asynchronously send a message to this actor, along with an
arbitrary set of processing options for the broker and
middleware.
Parameters:
args(tuple): Positional arguments that are passed to the actor.
kwargs(dict): Keyword arguments that are passed to the actor.
delay(int): The minimum amount of time, in milliseconds, the
message should be delayed by.
**options(dict): Arbitrary options that are passed to the
broker and any registered middleware.
Returns:
Message: The enqueued message.
|
def by_pdb(self, pdb_id, take_top_percentile = 30.0, cut_off = None, matrix = None, sequence_identity_cut_off = None, silent = None):
'''Returns a list of all PDB files which contain protein sequences similar to the protein sequences of pdb_id.
Only protein chains are considered in the matching so e.g. some results may have DNA or RNA chains or ligands
while some may not.
'''
self.log('BLASTing {0}'.format(pdb_id), silent, colortext.pcyan)
# Preamble
matrix = matrix or self.matrix
cut_off = cut_off or self.cut_off
sequence_identity_cut_off = sequence_identity_cut_off or self.sequence_identity_cut_off
# Parse PDB file
p = self.bio_cache.get_pdb_object(pdb_id)
chain_ids = sorted(p.seqres_sequences.keys())
assert(chain_ids)
# Run BLAST over all chains
hits = set(self.blast_by_pdb_chain(pdb_id, chain_ids[0], cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off, take_top_percentile = take_top_percentile, silent = silent))
for chain_id in chain_ids[1:]:
chain_hits = self.blast_by_pdb_chain(pdb_id, chain_id, cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off, take_top_percentile = take_top_percentile)
if chain_hits != None:
# None suggests that the chain was not a protein chain whereas an empty list suggest a protein chain with no hits
hits = hits.intersection(set(chain_hits))
return sorted(hits)
|
Returns a list of all PDB files which contain protein sequences similar to the protein sequences of pdb_id.
Only protein chains are considered in the matching so e.g. some results may have DNA or RNA chains or ligands
while some may not.
|
def _setup_arch(self, arch_mode=None):
"""Set up architecture.
"""
# set up architecture information
self.arch_info = None
if self.binary.architecture == ARCH_X86:
self._setup_x86_arch(arch_mode)
else:
# TODO: add arch to the binary file class.
self._setup_arm_arch(arch_mode)
|
Set up architecture.
|
def create(self, source, destination, gateway_ip, comment=None):
"""
Add a new policy route to the engine.
:param str source: network address with /cidr
:param str destination: network address with /cidr
:param str gateway: IP address, must be on source network
:param str comment: optional comment
"""
self.items.append(dict(
source=source, destination=destination,
gateway_ip=gateway_ip, comment=comment))
|
Add a new policy route to the engine.
:param str source: network address with /cidr
:param str destination: network address with /cidr
:param str gateway: IP address, must be on source network
:param str comment: optional comment
|
def stop_channels(self):
"""Stops all the running channels for this kernel.
"""
if self.shell_channel.is_alive():
self.shell_channel.stop()
if self.sub_channel.is_alive():
self.sub_channel.stop()
if self.stdin_channel.is_alive():
self.stdin_channel.stop()
if self.hb_channel.is_alive():
self.hb_channel.stop()
|
Stops all the running channels for this kernel.
|
def draw(self, milliseconds, surface):
"""Render the bounds of this collision ojbect onto the specified surface."""
super(CollidableObj, self).draw(milliseconds, surface)
|
Render the bounds of this collision ojbect onto the specified surface.
|
def ticker_pitch(ax=None):
'''Set the y-axis of the given axes to MIDI frequencies
Parameters
----------
ax : matplotlib.pyplot.axes
The axes handle to apply the ticker.
By default, uses the current axes handle.
'''
ax, _ = __get_axes(ax=ax)
ax.yaxis.set_major_formatter(FMT_MIDI_HZ)
|
Set the y-axis of the given axes to MIDI frequencies
Parameters
----------
ax : matplotlib.pyplot.axes
The axes handle to apply the ticker.
By default, uses the current axes handle.
|
def store_image(cls, http_client, link_hash, src, config):
"""\
Writes an image src http string to disk as a temporary file
and returns the LocallyStoredImage object
that has the info you should need on the image
"""
# check for a cache hit already on disk
image = cls.read_localfile(link_hash, src, config)
if image:
return image
# no cache found; do something else
# parse base64 image
if src.startswith('data:image'):
image = cls.write_localfile_base64(link_hash, src, config)
return image
# download the image
data = http_client.fetch(src)
if data:
image = cls.write_localfile(data, link_hash, src, config)
if image:
return image
return None
|
\
Writes an image src http string to disk as a temporary file
and returns the LocallyStoredImage object
that has the info you should need on the image
|
def send(self, template, email, _vars=None, options=None, schedule_time=None, limit=None):
"""
Remotely send an email template to a single email address.
http://docs.sailthru.com/api/send
@param template: template string
@param email: Email value
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param limit: optional dictionary to name, time, and handle conflicts of limits
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
"""
_vars = _vars or {}
options = options or {}
data = {'template': template,
'email': email,
'vars': _vars,
'options': options.copy()}
if limit:
data['limit'] = limit.copy()
if schedule_time is not None:
data['schedule_time'] = schedule_time
return self.api_post('send', data)
|
Remotely send an email template to a single email address.
http://docs.sailthru.com/api/send
@param template: template string
@param email: Email value
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param limit: optional dictionary to name, time, and handle conflicts of limits
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
|
def heal(self, channel=0, method="linear", fill_value=np.nan, verbose=True):
"""
Remove nans from channel using interpolation.
Parameters
----------
channel : int or str (optional)
Channel to heal. Default is 0.
method : {'linear', 'nearest', 'cubic'} (optional)
The interpolation method. Note that cubic interpolation is only
possible for 1D and 2D data. See `griddata`__ for more information.
Default is linear.
fill_value : number-like (optional)
The value written to pixels that cannot be filled by interpolation.
Default is nan.
verbose : bool (optional)
Toggle talkback. Default is True.
__ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
.. note:: Healing may take several minutes for large datasets.
Interpolation time goes as nearest, linear, then cubic.
"""
warnings.warn("heal", category=wt_exceptions.EntireDatasetInMemoryWarning)
timer = wt_kit.Timer(verbose=False)
with timer:
# channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
values = self.channels[channel_index][:]
points = [axis[:] for axis in self._axes]
xi = tuple(np.meshgrid(*points, indexing="ij"))
# 'undo' gridding
arr = np.zeros((len(self._axes) + 1, values.size))
for i in range(len(self._axes)):
arr[i] = xi[i].flatten()
arr[-1] = values.flatten()
# remove nans
arr = arr[:, ~np.isnan(arr).any(axis=0)]
# grid data wants tuples
tup = tuple([arr[i] for i in range(len(arr) - 1)])
# grid data
out = griddata(tup, arr[-1], xi, method=method, fill_value=fill_value)
self.channels[channel_index][:] = out
# print
if verbose:
print(
"channel {0} healed in {1} seconds".format(
channel.name, np.around(timer.interval, decimals=3)
)
)
|
Remove nans from channel using interpolation.
Parameters
----------
channel : int or str (optional)
Channel to heal. Default is 0.
method : {'linear', 'nearest', 'cubic'} (optional)
The interpolation method. Note that cubic interpolation is only
possible for 1D and 2D data. See `griddata`__ for more information.
Default is linear.
fill_value : number-like (optional)
The value written to pixels that cannot be filled by interpolation.
Default is nan.
verbose : bool (optional)
Toggle talkback. Default is True.
__ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
.. note:: Healing may take several minutes for large datasets.
Interpolation time goes as nearest, linear, then cubic.
|
def session_commit(self, session):
"""Send session_commit signal in sqlalchemy ``before_commit``.
This marks the success of session so the session may enter commit
state.
"""
# this may happen when there's nothing to commit
if not hasattr(session, 'meepo_unique_id'):
self.logger.debug("skipped - session_commit")
return
# normal session pub
self.logger.debug("%s - session_commit" % session.meepo_unique_id)
self._session_pub(session)
signal("session_commit").send(session)
self._session_del(session)
|
Send session_commit signal in sqlalchemy ``before_commit``.
This marks the success of session so the session may enter commit
state.
|
def cli(env, identifier, postinstall, key, image):
"""Reload operating system on a virtual server."""
vsi = SoftLayer.VSManager(env.client)
vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS')
keys = []
if key:
for single_key in key:
resolver = SoftLayer.SshKeyManager(env.client).resolve_ids
key_id = helpers.resolve_id(resolver, single_key, 'SshKey')
keys.append(key_id)
if not (env.skip_confirmations or formatting.no_going_back(vs_id)):
raise exceptions.CLIAbort('Aborted')
vsi.reload_instance(vs_id,
post_uri=postinstall,
ssh_keys=keys,
image_id=image)
|
Reload operating system on a virtual server.
|
def send_email(self, **kwargs):
"""
Sends an email using Mandrill's API. Returns a
Requests :class:`Response` object.
At a minimum kwargs must contain the keys to, from_email, and text.
Everything passed as kwargs except for the keywords 'key', 'async',
and 'ip_pool' will be sent as key-value pairs in the message object.
Reference https://mandrillapp.com/api/docs/messages.JSON.html#method=send
for all the available options.
"""
endpoint = self.messages_endpoint
data = {
'async': kwargs.pop('async', False),
'ip_pool': kwargs.pop('ip_pool', ''),
'key': kwargs.pop('key', self.api_key),
}
if not data.get('key', None):
raise ValueError('No Mandrill API key has been configured')
# Sending a template through Mandrill requires a couple extra args
# and a different endpoint.
if kwargs.get('template_name', None):
data['template_name'] = kwargs.pop('template_name')
data['template_content'] = kwargs.pop('template_content', [])
endpoint = self.templates_endpoint
data['message'] = kwargs
if self.app:
data['message'].setdefault(
'from_email',
self.app.config.get('MANDRILL_DEFAULT_FROM', None)
)
if endpoint != self.templates_endpoint and not data['message'].get('from_email', None):
raise ValueError(
'No from email was specified and no default was configured')
response = requests.post(endpoint,
data=json.dumps(data),
headers={'Content-Type': 'application/json'})
response.raise_for_status()
return response
|
Sends an email using Mandrill's API. Returns a
Requests :class:`Response` object.
At a minimum kwargs must contain the keys to, from_email, and text.
Everything passed as kwargs except for the keywords 'key', 'async',
and 'ip_pool' will be sent as key-value pairs in the message object.
Reference https://mandrillapp.com/api/docs/messages.JSON.html#method=send
for all the available options.
|
def _canBeExpanded( self, headVerbRoot, headVerbWID, suitableNomAdvExpansions, expansionVerbs, widToToken ):
''' Teeb kindlaks, kas kontekst on verbiahela laiendamiseks piisavalt selge/yhene:
1) Nii 'nom/adv' kandidaate kui ka Vinf kandidaate on täpselt üks;
2) Nom/adv ei kuulu mingi suurema fraasi kooseisu (meetodi _isLikelyNotPhrase() abil);
Kui tingimused täidetud, tagastab lisatava verbi listist expansionVerbs, vastasel juhul
tagastab None;
'''
if len(suitableNomAdvExpansions)==1 and expansionVerbs:
# Kontrollime, kas leidub t2pselt yks laiendiks sobiv verb (kui leidub
# rohkem, on kontekst kahtlane ja raske otsustada, kas tasub laiendada
# v6i mitte)
suitableExpansionVerbs = \
[expVerb for expVerb in expansionVerbs if expVerb[2] == suitableNomAdvExpansions[0][2]]
if len( suitableExpansionVerbs ) == 1:
# Kontrollime, et nom/adv ei kuuluks mingi suurema fraasi kooseisu (ei oleks fraasi
# peas6na);
nomAdvWID = suitableNomAdvExpansions[0][0]
if self._isLikelyNotPhrase( headVerbRoot, headVerbWID, nomAdvWID, widToToken ):
return suitableExpansionVerbs[0]
return None
|
Teeb kindlaks, kas kontekst on verbiahela laiendamiseks piisavalt selge/yhene:
1) Nii 'nom/adv' kandidaate kui ka Vinf kandidaate on täpselt üks;
2) Nom/adv ei kuulu mingi suurema fraasi kooseisu (meetodi _isLikelyNotPhrase() abil);
Kui tingimused täidetud, tagastab lisatava verbi listist expansionVerbs, vastasel juhul
tagastab None;
|
def mmols(self):
""" Dict of filepaths for all mmol files associated with code.
Notes
-----
Downloads mmol files if not already present.
Returns
-------
mmols_dict : dict, or None.
Keys : int
mmol number
Values : str
Filepath for the corresponding mmol file.
"""
mmols_dict = {}
mmol_dir = os.path.join(self.parent_dir, 'structures')
if not os.path.exists(mmol_dir):
os.makedirs(mmol_dir)
mmol_file_names = ['{0}_{1}.mmol'.format(self.code, i) for i in range(1, self.number_of_mmols + 1)]
mmol_files = [os.path.join(mmol_dir, x) for x in mmol_file_names]
for i, mmol_file in enumerate(mmol_files):
mmols_dict[i + 1] = mmol_file
# If file does not exist yet, download the mmol and write to mmol_file.
if not os.path.exists(mmol_file):
get_mmol(self.code, mmol_number=i + 1, outfile=mmol_file)
return mmols_dict
|
Dict of filepaths for all mmol files associated with code.
Notes
-----
Downloads mmol files if not already present.
Returns
-------
mmols_dict : dict, or None.
Keys : int
mmol number
Values : str
Filepath for the corresponding mmol file.
|
def constant_image_value(image, crs='EPSG:32613', scale=1):
"""Extract the output value from a calculation done with constant images"""
return getinfo(ee.Image(image).reduceRegion(
reducer=ee.Reducer.first(), scale=scale,
geometry=ee.Geometry.Rectangle([0, 0, 10, 10], crs, False)))
|
Extract the output value from a calculation done with constant images
|
def get_auth_stdin(refresh_token_filename, manual_login=False):
"""Simple wrapper for :func:`get_auth` that prompts the user using stdin.
Args:
refresh_token_filename (str): Path to file where refresh token will be
cached.
manual_login (bool): If true, prompt user to log in through a browser
and enter authorization code manually. Defaults to false.
Raises:
GoogleAuthError: If authentication with Google fails.
"""
refresh_token_cache = RefreshTokenCache(refresh_token_filename)
return get_auth(
CredentialsPrompt(), refresh_token_cache, manual_login=manual_login
)
|
Simple wrapper for :func:`get_auth` that prompts the user using stdin.
Args:
refresh_token_filename (str): Path to file where refresh token will be
cached.
manual_login (bool): If true, prompt user to log in through a browser
and enter authorization code manually. Defaults to false.
Raises:
GoogleAuthError: If authentication with Google fails.
|
def _set_uplink_switch(self, v, load=False):
"""
Setter method for uplink_switch, mapped from YANG variable /uplink_switch (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_uplink_switch is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_uplink_switch() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=uplink_switch.uplink_switch, is_container='container', presence=False, yang_name="uplink-switch", rest_name="uplink-switch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable/Disable Protected ports capability', u'callpoint': u'global-uplink-switch-cfg-cp', u'sort-priority': u'RUNNCFG_LEVEL_ROUTER_GLOBAL'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """uplink_switch must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=uplink_switch.uplink_switch, is_container='container', presence=False, yang_name="uplink-switch", rest_name="uplink-switch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable/Disable Protected ports capability', u'callpoint': u'global-uplink-switch-cfg-cp', u'sort-priority': u'RUNNCFG_LEVEL_ROUTER_GLOBAL'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__uplink_switch = t
if hasattr(self, '_set'):
self._set()
|
Setter method for uplink_switch, mapped from YANG variable /uplink_switch (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_uplink_switch is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_uplink_switch() directly.
|
def remove(self, name, func):
''' Remove a callback from a hook. '''
was_empty = self._empty()
if name in self.hooks and func in self.hooks[name]:
self.hooks[name].remove(func)
if self.app and not was_empty and self._empty(): self.app.reset()
|
Remove a callback from a hook.
|
def get_slice_location(dcmdata, teil=None):
""" get location of the slice
:param dcmdata: dicom data structure
:param teil: filename. Used when slice location doesnt exist
:return:
"""
slice_location = None
if hasattr(dcmdata, 'SliceLocation'):
# print(dcmdata.SliceLocation)
# print(type(dcmdata.SliceLocation))
try:
slice_location = float(dcmdata.SliceLocation)
except Exception as exc:
logger.info("It is not possible to use SliceLocation")
logger.debug(traceback.format_exc())
if slice_location is None and hasattr(dcmdata, "SliceThickness") and teil is not None:
logger.debug(
"Estimating SliceLocation wiht image number and SliceThickness"
)
# from builtins import map
i = list(map(int, re.findall('\d+', teil)))
i = i[-1]
try:
slice_location = float(i * float(dcmdata.SliceThickness))
except ValueError as e:
print(type(dcmdata.SliceThickness))
print(dcmdata.SliceThickness)
logger.debug(traceback.format_exc())
logger.debug("SliceThickness problem")
if slice_location is None and hasattr(dcmdata, "ImagePositionPatient") and hasattr(dcmdata,
"ImageOrientationPatient"):
if dcmdata.ImageOrientationPatient == [1, 0, 0, 0, 1, 0]:
slice_location = dcmdata.ImagePositionPatient[2]
else:
logger.warning("Unknown ImageOrientationPatient")
if slice_location is None:
logger.warning("Problem with slice location")
return slice_location
|
get location of the slice
:param dcmdata: dicom data structure
:param teil: filename. Used when slice location doesnt exist
:return:
|
def set_helper(self, helper):
"""
.. todo::
Document this.
"""
# We don't want to "unset" in this method.
if helper is None:
return
# Get the right kind of helper if given a DockerHelper
if isinstance(helper, DockerHelper):
helper = helper._helper_for_model(self.__model_type__)
# We already have this one.
if helper is self._helper:
return
if self._helper is None:
self._helper = helper
else:
raise RuntimeError('Cannot replace existing helper.')
|
.. todo::
Document this.
|
def copy(self, parent=None):
"""Copies an existing structure and all of it's children"""
new = Structure(None, parent=parent)
new.key = self.key
new.type_ = self.type_
new.val_guaranteed = self.val_guaranteed
new.key_guaranteed = self.key_guaranteed
for child in self.children:
new.children.append(child.copy(new))
return new
|
Copies an existing structure and all of it's children
|
def resolve_sound(self, sound):
"""Function tries to identify a sound in the data.
Notes
-----
The function tries to resolve sounds to take a sound with less complex
features in order to yield the next approximate sound class, if the
transcription data are sound classes.
"""
sound = sound if isinstance(sound, Sound) else self.system[sound]
if sound.name in self.data:
return '//'.join([x['grapheme'] for x in self.data[sound.name]])
raise KeyError(":td:resolve_sound: No sound could be found.")
|
Function tries to identify a sound in the data.
Notes
-----
The function tries to resolve sounds to take a sound with less complex
features in order to yield the next approximate sound class, if the
transcription data are sound classes.
|
def data_interp(self, i, currenttime):
"""
Method to streamline request for data from cache,
Uses linear interpolation bewtween timesteps to
get u,v,w,temp,salt
"""
if self.active.value is True:
while self.get_data.value is True:
logger.debug("Waiting for DataController to release cache file so I can read from it...")
timer.sleep(2)
pass
if self.need_data(i+1):
# Acquire lock for asking for data
self.data_request_lock.acquire()
self.has_data_request_lock.value = os.getpid()
try:
# Do I still need data?
if self.need_data(i+1):
# Tell the DataController that we are going to be reading from the file
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
# Open netcdf file on disk from commondataset
self.dataset.opennc()
# Get the indices for the current particle location
indices = self.dataset.get_indices('u', timeinds=[np.asarray([i-1])], point=self.part.location )
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
# Override the time
# get the current time index data
self.point_get.value = [indices[0] + 1, indices[-2], indices[-1]]
# Request that the data controller update the cache
self.get_data.value = True
# Wait until the data controller is done
if self.active.value is True:
while self.get_data.value is True:
logger.debug("Waiting for DataController to update cache with the CURRENT time index")
timer.sleep(2)
pass
# Do we still need to get the next timestep?
if self.need_data(i+1):
# get the next time index data
self.point_get.value = [indices[0] + 2, indices[-2], indices[-1]]
# Request that the data controller update the cache
self.get_data.value = True
# Wait until the data controller is done
if self.active.value is True:
while self.get_data.value is True:
logger.debug("Waiting for DataController to update cache with the NEXT time index")
timer.sleep(2)
pass
except StandardError:
logger.warn("Particle failed to request data correctly")
raise
finally:
# Release lock for asking for data
self.has_data_request_lock.value = -1
self.data_request_lock.release()
if self.caching is True:
# Tell the DataController that we are going to be reading from the file
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
try:
# Open the Cache netCDF file on disk
self.dataset.opennc()
# Grab data at time index closest to particle location
u = [np.mean(np.mean(self.dataset.get_values('u', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('u', timeinds=[np.asarray([i+1])], point=self.part.location )))]
v = [np.mean(np.mean(self.dataset.get_values('v', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('v', timeinds=[np.asarray([i+1])], point=self.part.location )))]
# if there is vertical velocity inthe dataset, get it
if 'w' in self.dataset.nc.variables:
w = [np.mean(np.mean(self.dataset.get_values('w', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('w', timeinds=[np.asarray([i+1])], point=self.part.location )))]
else:
w = [0.0, 0.0]
# If there is salt and temp in the dataset, get it
if self.temp_name is not None and self.salt_name is not None:
temp = [np.mean(np.mean(self.dataset.get_values('temp', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('temp', timeinds=[np.asarray([i+1])], point=self.part.location )))]
salt = [np.mean(np.mean(self.dataset.get_values('salt', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('salt', timeinds=[np.asarray([i+1])], point=self.part.location )))]
# Check for nans that occur in the ocean (happens because
# of model and coastline resolution mismatches)
if np.isnan(u).any() or np.isnan(v).any() or np.isnan(w).any():
# Take the mean of the closest 4 points
# If this includes nan which it will, result is nan
uarray1 = self.dataset.get_values('u', timeinds=[np.asarray([i])], point=self.part.location, num=2)
varray1 = self.dataset.get_values('v', timeinds=[np.asarray([i])], point=self.part.location, num=2)
uarray2 = self.dataset.get_values('u', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
varray2 = self.dataset.get_values('v', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
if 'w' in self.dataset.nc.variables:
warray1 = self.dataset.get_values('w', timeinds=[np.asarray([i])], point=self.part.location, num=2)
warray2 = self.dataset.get_values('w', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
w = [warray1.mean(), warray2.mean()]
else:
w = [0.0, 0.0]
if self.temp_name is not None and self.salt_name is not None:
temparray1 = self.dataset.get_values('temp', timeinds=[np.asarray([i])], point=self.part.location, num=2)
saltarray1 = self.dataset.get_values('salt', timeinds=[np.asarray([i])], point=self.part.location, num=2)
temparray2 = self.dataset.get_values('temp', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
saltarray2 = self.dataset.get_values('salt', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
temp = [temparray1.mean(), temparray2.mean()]
salt = [saltarray1.mean(), saltarray2.mean()]
u = [uarray1.mean(), uarray2.mean()]
v = [varray1.mean(), varray2.mean()]
# Linear interp of data between timesteps
currenttime = date2num(currenttime)
timevar = self.timevar.datenum
u = self.linterp(timevar[i:i+2], u, currenttime)
v = self.linterp(timevar[i:i+2], v, currenttime)
w = self.linterp(timevar[i:i+2], w, currenttime)
if self.temp_name is not None and self.salt_name is not None:
temp = self.linterp(timevar[i:i+2], temp, currenttime)
salt = self.linterp(timevar[i:i+2], salt, currenttime)
if self.temp_name is None:
temp = np.nan
if self.salt_name is None:
salt = np.nan
except StandardError:
logger.error("Error in data_interp method on ForceParticle")
raise
finally:
# If caching is False, we don't have to close the dataset. We can stay in read-only mode.
if self.caching is True:
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
return u, v, w, temp, salt
|
Method to streamline request for data from cache,
Uses linear interpolation bewtween timesteps to
get u,v,w,temp,salt
|
def expose_event(self, widget, event):
"""When an area of the window is exposed, we just copy out of the
server-side, off-screen surface to that area.
"""
x, y, width, height = event.area
self.logger.debug("surface is %s" % self.surface)
if self.surface is not None:
win = widget.get_window()
cr = win.cairo_create()
# set clip area for exposed region
cr.rectangle(x, y, width, height)
cr.clip()
# Paint from off-screen surface
cr.set_source_surface(self.surface, 0, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False
|
When an area of the window is exposed, we just copy out of the
server-side, off-screen surface to that area.
|
def linestyle(i,a=5,b=3):
'''
provide one out of 25 unique combinations of style, color and mark
use in combination with markevery=a+mod(i,b) to add spaced points,
here a would be the base spacing that would depend on the data
density, modulated with the number of lines to be plotted (b)
Parameters
----------
i : integer
Number of linestyle combination - there are many....
a : integer
Spacing of marks. The default is 5.
b : integer
Modulation in case of plotting many nearby lines. The default
is 3.
Examples
--------
>>> plot(x,sin(x),linestyle(7)[0], markevery=linestyle(7)[1])
(c) 2014 FH
'''
lines=['-','--','-.',':']
points=['v','^','<','>','1','2','3','4','s','p','*','h','H','+','x','D','d','o']
colors=['b','g','r','c','m','k']
ls_string = colors[sc.mod(i,6)]+lines[sc.mod(i,4)]+points[sc.mod(i,18)]
mark_i = a+sc.mod(i,b)
return ls_string,int(mark_i)
|
provide one out of 25 unique combinations of style, color and mark
use in combination with markevery=a+mod(i,b) to add spaced points,
here a would be the base spacing that would depend on the data
density, modulated with the number of lines to be plotted (b)
Parameters
----------
i : integer
Number of linestyle combination - there are many....
a : integer
Spacing of marks. The default is 5.
b : integer
Modulation in case of plotting many nearby lines. The default
is 3.
Examples
--------
>>> plot(x,sin(x),linestyle(7)[0], markevery=linestyle(7)[1])
(c) 2014 FH
|
def condor_submit(cmd):
"""
Submits cmd to HTCondor queue
Parameters
----------
cmd: string
Command to be submitted
Returns
-------
int
returncode value from calling the submission command.
"""
is_running = subprocess.call('condor_status', shell=True) == 0
if not is_running:
raise CalledProcessError('HTCondor is not running.')
sub_cmd = 'condor_qsub -shell n -b y -r y -N ' \
+ cmd.split()[0] + ' -m n'
log.info('Calling: ' + sub_cmd)
return subprocess.call(sub_cmd + ' ' + cmd, shell=True)
|
Submits cmd to HTCondor queue
Parameters
----------
cmd: string
Command to be submitted
Returns
-------
int
returncode value from calling the submission command.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.