code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get_request_setting(self, service_id, version_number, name):
"""Gets the specified Request Settings object."""
content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, name))
return FastlyRequestSetting(self, content)
|
Gets the specified Request Settings object.
|
def _Operation(self,operation):
"""Execute specified operations task against one or more servers.
Returns a clc.v2.Requests object. If error due to server(s) already being in
the requested state this is not raised as an error at this level.
>>> clc.v2.Server(alias='BTDI',id='WA1BTDIKRT02').PowerOn().WaitUntilComplete()
0
"""
try:
return(clc.v2.Requests(
clc.v2.API.Call(
'POST',
'operations/%s/servers/%s' % (self.alias,operation),'["%s"]' % self.id,
session=self.session),
alias=self.alias,
session=self.session))
except clc.APIFailedResponse as e:
# Most likely a queue add error presented as a 400. Let Requests parse this
return(clc.v2.Requests(e.response_json,alias=self.alias,session=self.session))
|
Execute specified operations task against one or more servers.
Returns a clc.v2.Requests object. If error due to server(s) already being in
the requested state this is not raised as an error at this level.
>>> clc.v2.Server(alias='BTDI',id='WA1BTDIKRT02').PowerOn().WaitUntilComplete()
0
|
def update_subports(self, port):
"""Set port attributes for trunk subports.
For baremetal deployments only, set the neutron port attributes
during the bind_port event.
"""
trunk_details = port.get('trunk_details')
subports = trunk_details['sub_ports']
host_id = port.get(bc.dns.DNSNAME)
context = bc.get_context()
el_context = context.elevated()
for subport in subports:
bc.get_plugin().update_port(el_context, subport['port_id'],
{'port':
{bc.portbindings.HOST_ID: host_id,
'device_owner': bc.trunk_consts.TRUNK_SUBPORT_OWNER}})
# Set trunk to ACTIVE status.
trunk_obj = bc.trunk_objects.Trunk.get_object(
el_context, id=trunk_details['trunk_id'])
trunk_obj.update(status=bc.trunk_consts.ACTIVE_STATUS)
|
Set port attributes for trunk subports.
For baremetal deployments only, set the neutron port attributes
during the bind_port event.
|
def is_secret_known(
end_state: NettingChannelEndState,
secrethash: SecretHash,
) -> bool:
"""True if the `secrethash` is for a lock with a known secret."""
return (
secrethash in end_state.secrethashes_to_unlockedlocks or
secrethash in end_state.secrethashes_to_onchain_unlockedlocks
)
|
True if the `secrethash` is for a lock with a known secret.
|
def createEditor(self, parent, option, index):
"""Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
"""
editor = QtGui.QDoubleSpinBox(parent)
try:
editor.setMinimum(self.minimum)
editor.setMaximum(self.maximum)
editor.setSingleStep(self.singleStep)
editor.setDecimals(self.decimals)
except TypeError as err:
# initiate the spinbox with default values.
pass
return editor
|
Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
|
def _build_predict(self, Xnew, full_cov=False):
"""
Xnew is a data matrix, the points at which we want to predict.
This method computes
p(F* | Y)
where F* are points on the GP at Xnew, Y are noisy observations at X.
"""
y = self.Y - self.mean_function(self.X)
Kmn = self.kern.K(self.X, Xnew)
Kmm_sigma = self.kern.K(self.X) + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type) * self.likelihood.variance
Knn = self.kern.K(Xnew) if full_cov else self.kern.Kdiag(Xnew)
f_mean, f_var = base_conditional(Kmn, Kmm_sigma, Knn, y, full_cov=full_cov, white=False) # N x P, N x P or P x N x N
return f_mean + self.mean_function(Xnew), f_var
|
Xnew is a data matrix, the points at which we want to predict.
This method computes
p(F* | Y)
where F* are points on the GP at Xnew, Y are noisy observations at X.
|
def _collect_capacity_curves(data, direction="charge"):
"""Create a list of pandas.DataFrames, one for each charge step.
The DataFrames are named by its cycle number.
Input: CellpyData
Returns: list of pandas.DataFrames
minimum voltage value,
maximum voltage value"""
minimum_v_value = np.Inf
maximum_v_value = -np.Inf
charge_list = []
cycles = data.get_cycle_numbers()
for cycle in cycles:
try:
if direction == "charge":
q, v = data.get_ccap(cycle)
else:
q, v = data.get_dcap(cycle)
except NullData as e:
logging.warning(e)
break
else:
d = pd.DataFrame({"q": q, "v": v})
# d.name = f"{cycle}"
d.name = cycle
charge_list.append(d)
v_min = v.min()
v_max = v.max()
if v_min < minimum_v_value:
minimum_v_value = v_min
if v_max > maximum_v_value:
maximum_v_value = v_max
return charge_list, cycles, minimum_v_value, maximum_v_value
|
Create a list of pandas.DataFrames, one for each charge step.
The DataFrames are named by its cycle number.
Input: CellpyData
Returns: list of pandas.DataFrames
minimum voltage value,
maximum voltage value
|
def load_sub_plugins_from_str(cls, plugins_str):
"""
Load plugin classes based on column separated list of plugin names.
Returns dict with plugin name as key and class as value.
"""
plugin_classes = {}
if plugins_str:
for plugin_name in plugins_str.split(":"):
pc = load_plugin(plugin_name, MONITOR_DEFAULT_PLUGIN_MODULE)
plugin_classes[plugin_name] = pc
return plugin_classes
|
Load plugin classes based on column separated list of plugin names.
Returns dict with plugin name as key and class as value.
|
def _get_content_length(self,msg):
'''从消息中解析Content-length'''
m = re.search(r'[Cc]ontent-length:\s?(?P<len>\d+)',msg,re.S)
return (m and int(m.group('len'))) or 0
|
从消息中解析Content-length
|
def create_branches(self, branches):
"""
Create branches from a TreeBuffer or dict mapping names to type names
Parameters
----------
branches : TreeBuffer or dict
"""
if not isinstance(branches, TreeBuffer):
branches = TreeBuffer(branches)
self.set_buffer(branches, create_branches=True)
|
Create branches from a TreeBuffer or dict mapping names to type names
Parameters
----------
branches : TreeBuffer or dict
|
def read_config(cls, configparser):
"""Read configuration file options."""
config = dict()
section = cls.__name__
option = "prefixes"
if configparser.has_option(section, option):
value = configparser.get(section, option)
names = [x.strip().lower() for x in value.split(",")]
else:
names = []
config[option] = names
return config
|
Read configuration file options.
|
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value
|
group = display-name ":" [group-list] ";" [CFWS]
|
def get_client(service, service_type='client', **conn_args):
"""
User function to get the correct client.
Based on the GOOGLE_CLIENT_MAP dictionary, the return will be a cloud or general
client that can interact with the desired service.
:param service: GCP service to connect to. E.g. 'gce', 'iam'
:type service: ``str``
:param conn_args: Dictionary of connection arguments. 'project' is required.
'user_agent' can be specified and will be set in the client
returned.
:type conn_args: ``dict``
:return: client_details, client
:rtype: ``tuple`` of ``dict``, ``object``
"""
client_details = choose_client(service)
user_agent = get_user_agent(**conn_args)
if client_details:
if client_details['client_type'] == 'cloud':
client = get_gcp_client(
mod_name=client_details['module_name'],
pkg_name=conn_args.get('pkg_name', 'google.cloud'),
key_file=conn_args.get('key_file', None),
project=conn_args['project'], user_agent=user_agent)
else:
client = get_google_client(
mod_name=client_details['module_name'],
key_file=conn_args.get('key_file', None),
user_agent=user_agent, api_version=conn_args.get('api_version', 'v1'))
else:
# There is no client known for this service. We can try the standard API.
try:
client = get_google_client(
mod_name=service, key_file=conn_args.get('key_file', None),
user_agent=user_agent, api_version=conn_args.get('api_version', 'v1'))
except Exception as e:
raise e
return client_details, client
|
User function to get the correct client.
Based on the GOOGLE_CLIENT_MAP dictionary, the return will be a cloud or general
client that can interact with the desired service.
:param service: GCP service to connect to. E.g. 'gce', 'iam'
:type service: ``str``
:param conn_args: Dictionary of connection arguments. 'project' is required.
'user_agent' can be specified and will be set in the client
returned.
:type conn_args: ``dict``
:return: client_details, client
:rtype: ``tuple`` of ``dict``, ``object``
|
def _rm_units_from_var_name_single(var):
"""
NOTE: USE THIS FOR SINGLE CELLS ONLY
When parsing sheets, all variable names be exact matches when cross-referenceing the metadata and data sections
However, sometimes people like to put "age (years BP)" in one section, and "age" in the other. This causes problems.
We're using this regex to match all variableName cells and remove the "(years BP)" where applicable.
:param str var: Variable name
:return str: Variable name
"""
# Use the regex to match the cell
m = re.match(re_var_w_units, var)
# Should always get a match, but be careful anyways.
if m:
# m.group(1): variableName
# m.group(2): units in parenthesis (may not exist).
try:
var = m.group(1).strip().lower()
# var = m.group(1).strip().lower()
except Exception:
# This must be a malformed cell somehow. This regex should match every variableName cell.
# It didn't work out. Return the original var as a fallback
pass
return var
|
NOTE: USE THIS FOR SINGLE CELLS ONLY
When parsing sheets, all variable names be exact matches when cross-referenceing the metadata and data sections
However, sometimes people like to put "age (years BP)" in one section, and "age" in the other. This causes problems.
We're using this regex to match all variableName cells and remove the "(years BP)" where applicable.
:param str var: Variable name
:return str: Variable name
|
def cost_matrix(self, set_a, set_b, time_a, time_b):
"""
Calculates the costs (distances) between the items in set a and set b at the specified times.
Args:
set_a: List of STObjects
set_b: List of STObjects
time_a: time at which objects in set_a are evaluated
time_b: time at whcih object in set_b are evaluated
Returns:
A numpy array with shape [len(set_a), len(set_b)] containing the cost matrix between the items in set a
and the items in set b.
"""
costs = np.zeros((len(set_a), len(set_b)))
for a, item_a in enumerate(set_a):
for b, item_b in enumerate(set_b):
costs[a, b] = self.total_cost_function(item_a, item_b, time_a, time_b)
return costs
|
Calculates the costs (distances) between the items in set a and set b at the specified times.
Args:
set_a: List of STObjects
set_b: List of STObjects
time_a: time at which objects in set_a are evaluated
time_b: time at whcih object in set_b are evaluated
Returns:
A numpy array with shape [len(set_a), len(set_b)] containing the cost matrix between the items in set a
and the items in set b.
|
def serialize(v, known_modules=[]):
'''Get a text representation of an object.'''
tname = name(v, known_modules=known_modules)
func = serializer(tname)
return func(v), tname
|
Get a text representation of an object.
|
def _compile_docker_commands(app_name, assembled_specs, port_spec):
""" This is used to compile the command that will be run when the docker container starts
up. This command has to install any libs that the app uses, run the `always` command, and
run the `once` command if the container is being launched for the first time """
app_spec = assembled_specs['apps'][app_name]
commands = ['set -e']
commands += _lib_install_commands_for_app(app_name, assembled_specs)
if app_spec['mount']:
commands.append("cd {}".format(container_code_path(app_spec)))
commands.append("export PATH=$PATH:{}".format(container_code_path(app_spec)))
commands += _copy_assets_commands_for_app(app_spec, assembled_specs)
commands += _get_once_commands(app_spec, port_spec)
commands += _get_always_commands(app_spec)
return commands
|
This is used to compile the command that will be run when the docker container starts
up. This command has to install any libs that the app uses, run the `always` command, and
run the `once` command if the container is being launched for the first time
|
def scale_down(self, workers, pods=None):
""" Remove the pods for the requested list of workers
When scale_down is called by the _adapt async loop, the workers are
assumed to have been cleanly closed first and in-memory data has been
migrated to the remaining workers.
Note that when the worker process exits, Kubernetes leaves the pods in
a 'Succeeded' state that we collect here.
If some workers have not been closed, we just delete the pods with
matching ip addresses.
Parameters
----------
workers: List[str] List of addresses of workers to close
"""
# Get the existing worker pods
pods = pods or self._cleanup_terminated_pods(self.pods())
# Work out the list of pods that we are going to delete
# Each worker to delete is given in the form "tcp://<worker ip>:<port>"
# Convert this to a set of IPs
ips = set(urlparse(worker).hostname for worker in workers)
to_delete = [p for p in pods if p.status.pod_ip in ips]
if not to_delete:
return
self._delete_pods(to_delete)
|
Remove the pods for the requested list of workers
When scale_down is called by the _adapt async loop, the workers are
assumed to have been cleanly closed first and in-memory data has been
migrated to the remaining workers.
Note that when the worker process exits, Kubernetes leaves the pods in
a 'Succeeded' state that we collect here.
If some workers have not been closed, we just delete the pods with
matching ip addresses.
Parameters
----------
workers: List[str] List of addresses of workers to close
|
def ng_call_ctrl_function(self, element, func, params='', return_out=False):
"""
:Description: Will execute controller function with provided parameters.
:Warning: This will only work for angular.js 1.x.
:Warning: Requires angular debugging to be enabled.
:param element: Element for browser instance to target.
:param func: Function to execute from angular element controller.
:type func: string
:param params: String (naked) args, or list of parameters to pass to target function.
:type params: string, tuple, list
:param return_out: Return output of function call otherwise None
:type return_out: bool
"""
if isinstance(params, string_types):
param_str = params
elif isinstance(params, (tuple, list)):
param_str = self.__serialize_params(params)
else:
raise ValueError('Invalid type specified for function parameters')
exec_str = 'angular.element(arguments[0]).controller().%s(%s);' % (func, param_str)
if return_out:
return self.__type2python(
self.browser.execute_script('return {}'.format(exec_str), element))
else:
self.browser.execute_script(exec_str, element)
|
:Description: Will execute controller function with provided parameters.
:Warning: This will only work for angular.js 1.x.
:Warning: Requires angular debugging to be enabled.
:param element: Element for browser instance to target.
:param func: Function to execute from angular element controller.
:type func: string
:param params: String (naked) args, or list of parameters to pass to target function.
:type params: string, tuple, list
:param return_out: Return output of function call otherwise None
:type return_out: bool
|
def run_sql_script(conn, scriptname='results_md2grid.sql'):
"""This function runs .sql scripts in the folder 'sql_scripts' """
script_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'sql_scripts'))
script_str = open(os.path.join(script_dir, scriptname)).read()
conn.execution_options(autocommit=True).execute(script_str)
return
|
This function runs .sql scripts in the folder 'sql_scripts'
|
def find(self, title):
"""Return the first worksheet with the given title.
Args:
title(str): title/name of the worksheet to return
Returns:
WorkSheet: contained worksheet object
Raises:
KeyError: if the spreadsheet has no no worksheet with the given ``title``
"""
if title not in self._titles:
raise KeyError(title)
return self._titles[title][0]
|
Return the first worksheet with the given title.
Args:
title(str): title/name of the worksheet to return
Returns:
WorkSheet: contained worksheet object
Raises:
KeyError: if the spreadsheet has no no worksheet with the given ``title``
|
def get_app_perms(model_or_app_label):
"""
Get permission-string list of the specified django application.
Parameters
----------
model_or_app_label : model class or string
A model class or app_label string to specify the particular django
application.
Returns
-------
set
A set of perms of the specified django application.
Examples
--------
>>> perms1 = get_app_perms('auth')
>>> perms2 = get_app_perms(Permission)
>>> perms1 == perms2
True
"""
from django.contrib.auth.models import Permission
if isinstance(model_or_app_label, string_types):
app_label = model_or_app_label
else:
# assume model_or_app_label is model class
app_label = model_or_app_label._meta.app_label
qs = Permission.objects.filter(content_type__app_label=app_label)
perms = ('%s.%s' % (app_label, p.codename) for p in qs.iterator())
return set(perms)
|
Get permission-string list of the specified django application.
Parameters
----------
model_or_app_label : model class or string
A model class or app_label string to specify the particular django
application.
Returns
-------
set
A set of perms of the specified django application.
Examples
--------
>>> perms1 = get_app_perms('auth')
>>> perms2 = get_app_perms(Permission)
>>> perms1 == perms2
True
|
def _extract_stars(data, catalog, size=(11, 11), use_xy=True):
"""
Extract cutout images from a single image centered on stars defined
in the single input catalog.
Parameters
----------
data : `~astropy.nddata.NDData`
A `~astropy.nddata.NDData` object containing the 2D image from
which to extract the stars. If the input ``catalog`` contains
only the sky coordinates (i.e. not the pixel coordinates) of the
stars then the `~astropy.nddata.NDData` object must have a valid
``wcs`` attribute.
catalogs : `~astropy.table.Table`
A single catalog of sources to be extracted from the input
``data``. The center of each source can be defined either in
pixel coordinates (in ``x`` and ``y`` columns) or sky
coordinates (in a ``skycoord`` column containing a
`~astropy.coordinates.SkyCoord` object). If both are specified,
then the value of the ``use_xy`` keyword determines which
coordinates will be used.
size : int or array_like (int), optional
The extraction box size along each axis. If ``size`` is a
scalar then a square box of size ``size`` will be used. If
``size`` has two elements, they should be in ``(ny, nx)`` order.
The size must be greater than or equal to 3 pixel for both axes.
use_xy : bool, optional
Whether to use the ``x`` and ``y`` pixel positions when both
pixel and sky coordinates are present in the input catalog
table. If `False` then sky coordinates are used instead of
pixel coordinates (e.g. for linked stars). The default is
`True`.
Returns
-------
stars : list of `EPSFStar` objects
A list of `EPSFStar` instances containing the extracted stars.
"""
colnames = catalog.colnames
if ('x' not in colnames or 'y' not in colnames) or not use_xy:
xcenters, ycenters = skycoord_to_pixel(catalog['skycoord'], data.wcs,
origin=0, mode='all')
else:
xcenters = catalog['x'].data.astype(np.float)
ycenters = catalog['y'].data.astype(np.float)
if 'id' in colnames:
ids = catalog['id']
else:
ids = np.arange(len(catalog), dtype=np.int) + 1
if data.uncertainty is None:
weights = np.ones_like(data.data)
else:
if data.uncertainty.uncertainty_type == 'weights':
weights = np.asanyarray(data.uncertainty.array, dtype=np.float)
else:
warnings.warn('The data uncertainty attribute has an unsupported '
'type. Only uncertainty_type="weights" can be '
'used to set weights. Weights will be set to 1.',
AstropyUserWarning)
weights = np.ones_like(data.data)
if data.mask is not None:
weights[data.mask] = 0.
stars = []
for xcenter, ycenter, obj_id in zip(xcenters, ycenters, ids):
try:
large_slc, small_slc = overlap_slices(data.data.shape, size,
(ycenter, xcenter),
mode='strict')
data_cutout = data.data[large_slc]
weights_cutout = weights[large_slc]
except (PartialOverlapError, NoOverlapError):
stars.append(None)
continue
origin = (large_slc[1].start, large_slc[0].start)
cutout_center = (xcenter - origin[0], ycenter - origin[1])
star = EPSFStar(data_cutout, weights_cutout,
cutout_center=cutout_center, origin=origin,
wcs_large=data.wcs, id_label=obj_id)
stars.append(star)
return stars
|
Extract cutout images from a single image centered on stars defined
in the single input catalog.
Parameters
----------
data : `~astropy.nddata.NDData`
A `~astropy.nddata.NDData` object containing the 2D image from
which to extract the stars. If the input ``catalog`` contains
only the sky coordinates (i.e. not the pixel coordinates) of the
stars then the `~astropy.nddata.NDData` object must have a valid
``wcs`` attribute.
catalogs : `~astropy.table.Table`
A single catalog of sources to be extracted from the input
``data``. The center of each source can be defined either in
pixel coordinates (in ``x`` and ``y`` columns) or sky
coordinates (in a ``skycoord`` column containing a
`~astropy.coordinates.SkyCoord` object). If both are specified,
then the value of the ``use_xy`` keyword determines which
coordinates will be used.
size : int or array_like (int), optional
The extraction box size along each axis. If ``size`` is a
scalar then a square box of size ``size`` will be used. If
``size`` has two elements, they should be in ``(ny, nx)`` order.
The size must be greater than or equal to 3 pixel for both axes.
use_xy : bool, optional
Whether to use the ``x`` and ``y`` pixel positions when both
pixel and sky coordinates are present in the input catalog
table. If `False` then sky coordinates are used instead of
pixel coordinates (e.g. for linked stars). The default is
`True`.
Returns
-------
stars : list of `EPSFStar` objects
A list of `EPSFStar` instances containing the extracted stars.
|
def start(self, *args):
"""
Create a singularity container instance
"""
if self.volumes:
volumes = " --bind " + " --bind ".join(self.volumes)
else:
volumes = ""
self._print("Instantiating container [{0:s}]. Timeout set to {1:d}. The container ID is printed below.".format(self.name, self.time_out))
utils.xrun("singularity instance.start",
list(args) + [volumes,
# "-c",
self.image, self.name])
self.status = "created"
return 0
|
Create a singularity container instance
|
def reset(cls):
"""
Reset the conspect elements to initial state.
"""
cls.input_el.value = ""
cls.subconspect_el.html = ""
cls.show_error(False)
|
Reset the conspect elements to initial state.
|
def make_fileitem_peinfo_detectedentrypointsignature_name(entrypoint_name, condition='is', negate=False,
preserve_case=False):
"""
Create a node for FileItem/PEInfo/DetectedEntryPointSignature/Name
:return: A IndicatorItem represented as an Element node
"""
document = 'FileItem'
search = 'FileItem/PEInfo/DetectedEntryPointSignature/Name'
content_type = 'string'
content = entrypoint_name
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node
|
Create a node for FileItem/PEInfo/DetectedEntryPointSignature/Name
:return: A IndicatorItem represented as an Element node
|
def _insert_code(code_to_modify, code_to_insert, before_line):
"""
Insert piece of code `code_to_insert` to `code_to_modify` right inside the line `before_line` before the
instruction on this line by modifying original bytecode
:param code_to_modify: Code to modify
:param code_to_insert: Code to insert
:param before_line: Number of line for code insertion
:return: boolean flag whether insertion was successful, modified code
"""
linestarts = dict(dis.findlinestarts(code_to_modify))
if not linestarts:
return False, code_to_modify
if code_to_modify.co_name == '<module>':
# There's a peculiarity here: if a breakpoint is added in the first line of a module, we
# can't replace the code because we require a line event to stop and the line event
# was already generated, so, fallback to tracing.
if before_line == min(linestarts.values()):
return False, code_to_modify
if before_line not in linestarts.values():
return False, code_to_modify
offset = None
for off, line_no in linestarts.items():
if line_no == before_line:
offset = off
break
code_to_insert_list = add_jump_instruction(offset, code_to_insert)
try:
code_to_insert_list, new_names = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, 'co_names',
dis.hasname)
code_to_insert_list, new_consts = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, 'co_consts',
[opmap['LOAD_CONST']])
code_to_insert_list, new_vars = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, 'co_varnames',
dis.haslocal)
new_bytes, all_inserted_code = _update_label_offsets(code_to_modify.co_code, offset, list(code_to_insert_list))
new_lnotab = _modify_new_lines(code_to_modify, offset, code_to_insert_list)
if new_lnotab is None:
return False, code_to_modify
except ValueError:
pydev_log.exception()
return False, code_to_modify
new_code = CodeType(
code_to_modify.co_argcount, # integer
code_to_modify.co_kwonlyargcount, # integer
len(new_vars), # integer
code_to_modify.co_stacksize, # integer
code_to_modify.co_flags, # integer
new_bytes, # bytes
new_consts, # tuple
new_names, # tuple
new_vars, # tuple
code_to_modify.co_filename, # string
code_to_modify.co_name, # string
code_to_modify.co_firstlineno, # integer
new_lnotab, # bytes
code_to_modify.co_freevars, # tuple
code_to_modify.co_cellvars # tuple
)
return True, new_code
|
Insert piece of code `code_to_insert` to `code_to_modify` right inside the line `before_line` before the
instruction on this line by modifying original bytecode
:param code_to_modify: Code to modify
:param code_to_insert: Code to insert
:param before_line: Number of line for code insertion
:return: boolean flag whether insertion was successful, modified code
|
def upload_model(self, path: str, meta: dict, force: bool) -> str:
"""
Put the given file to the remote storage.
:param path: Path to the model file.
:param meta: Metadata of the model.
:param force: Overwrite an existing model.
:return: URL of the uploaded model.
:raises BackendRequiredError: If supplied bucket is unusable.
:raises ModelAlreadyExistsError: If model already exists and no forcing.
"""
raise NotImplementedError
|
Put the given file to the remote storage.
:param path: Path to the model file.
:param meta: Metadata of the model.
:param force: Overwrite an existing model.
:return: URL of the uploaded model.
:raises BackendRequiredError: If supplied bucket is unusable.
:raises ModelAlreadyExistsError: If model already exists and no forcing.
|
def get_blink_cookie(self, name):
"""Gets a blink cookie value"""
value = self.get_cookie(name)
if value != None:
self.clear_cookie(name)
return escape.url_unescape(value)
|
Gets a blink cookie value
|
def _decode(s, encoding=None, errors=None):
"""Decodes *s*."""
if encoding is None:
encoding = ENCODING
if errors is None:
errors = ENCODING_ERRORS
return s if isinstance(s, unicode) else s.decode(encoding, errors)
|
Decodes *s*.
|
def _handle_unknown_method(self, method, remainder, request=None):
'''
Routes undefined actions (like RESET) to the appropriate controller.
'''
if request is None:
self._raise_method_deprecation_warning(self._handle_unknown_method)
# try finding a post_{custom} or {custom} method first
controller = self._find_controller('post_%s' % method, method)
if controller:
return controller, remainder
# if no controller exists, try routing to a sub-controller; note that
# since this isn't a safe GET verb, any local exposes are 405'd
if remainder:
if self._find_controller(remainder[0]):
abort(405)
sub_controller = self._lookup_child(remainder[0])
if sub_controller:
return lookup_controller(sub_controller, remainder[1:],
request)
abort(405)
|
Routes undefined actions (like RESET) to the appropriate controller.
|
def _override_cfg(container, yamlkeys, value):
"""
Override a hierarchical key in the config, setting it to the value.
Note that yamlkeys should be a non-empty list of strings.
"""
key = yamlkeys[0]
rest = yamlkeys[1:]
if len(rest) == 0:
# no rest means we found the key to update.
container[key] = value
elif key in container:
# still need to find the leaf in the tree, so recurse.
_override_cfg(container[key], rest, value)
else:
# need to create a sub-tree down to the leaf to insert into.
subtree = {}
_override_cfg(subtree, rest, value)
container[key] = subtree
|
Override a hierarchical key in the config, setting it to the value.
Note that yamlkeys should be a non-empty list of strings.
|
def process_data_config_section(config, data_config):
""" Processes the data configuration section from the configuration
data dict.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param data_config: Data configuration section from a config data dict.
"""
if 'connectors' in data_config:
for connector in data_config['connectors']:
config.data['connectors'][
connector['name']] = get_config_from_package(
connector['class'])
if 'sources' in data_config:
if data_config['sources']:
for source in data_config['sources']:
config.data['sources'][source['name']] = source
del config.data['sources'][source['name']]['name']
|
Processes the data configuration section from the configuration
data dict.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param data_config: Data configuration section from a config data dict.
|
def _release_waiter(self) -> None:
"""
Iterates over all waiters till found one that is not finsihed and
belongs to a host that has available connections.
"""
if not self._waiters:
return
# Having the dict keys ordered this avoids to iterate
# at the same order at each call.
queues = list(self._waiters.keys())
random.shuffle(queues)
for key in queues:
if self._available_connections(key) < 1:
continue
waiters = self._waiters[key]
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
return
|
Iterates over all waiters till found one that is not finsihed and
belongs to a host that has available connections.
|
def _startJobWithRetries(self, jobID):
""" Place the given job in STATUS_RUNNING mode; the job is expected to be
STATUS_NOTSTARTED.
NOTE: this function was factored out of jobStartNext because it's also
needed for testing (e.g., test_client_jobs_dao.py)
"""
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET status=%%s, ' \
' _eng_cjm_conn_id=%%s, ' \
' start_time=UTC_TIMESTAMP(), ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE (job_id=%%s AND status=%%s)' \
% (self.jobsTableName,)
sqlParams = [self.STATUS_RUNNING, self._connectionID,
jobID, self.STATUS_NOTSTARTED]
numRowsUpdated = conn.cursor.execute(query, sqlParams)
if numRowsUpdated != 1:
self._logger.warn('jobStartNext: numRowsUpdated=%r instead of 1; '
'likely side-effect of transient connection '
'failure', numRowsUpdated)
return
|
Place the given job in STATUS_RUNNING mode; the job is expected to be
STATUS_NOTSTARTED.
NOTE: this function was factored out of jobStartNext because it's also
needed for testing (e.g., test_client_jobs_dao.py)
|
def _get_seal_key_ntlm1(negotiate_flags, exported_session_key):
"""
3.4.5.3 SEALKEY
Calculates the seal_key used to seal (encrypt) messages. This for
authentication where NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY has not
been negotiated. Will weaken the keys if NTLMSSP_NEGOTIATE_56 is not
negotiated it will default to the 40-bit key
:param negotiate_flags: The negotiate_flags structure sent by the server
:param exported_session_key: A 128-bit session key used to derive signing
and sealing keys
:return seal_key: Key used to seal messages
"""
if negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_56:
seal_key = exported_session_key[:7] + b"\xa0"
else:
seal_key = exported_session_key[:5] + b"\xe5\x38\xb0"
return seal_key
|
3.4.5.3 SEALKEY
Calculates the seal_key used to seal (encrypt) messages. This for
authentication where NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY has not
been negotiated. Will weaken the keys if NTLMSSP_NEGOTIATE_56 is not
negotiated it will default to the 40-bit key
:param negotiate_flags: The negotiate_flags structure sent by the server
:param exported_session_key: A 128-bit session key used to derive signing
and sealing keys
:return seal_key: Key used to seal messages
|
def request(self, batch, attempt=0):
"""Attempt to upload the batch and retry before raising an error """
try:
q = self.api.new_queue()
for msg in batch:
q.add(msg['event'], msg['value'], source=msg['source'])
q.submit()
except:
if attempt > self.retries:
raise
self.request(batch, attempt+1)
|
Attempt to upload the batch and retry before raising an error
|
def _GetFormatErrorLocation(
self, yaml_definition, last_definition_object):
"""Retrieves a format error location.
Args:
yaml_definition (dict[str, object]): current YAML definition.
last_definition_object (DataTypeDefinition): previous data type
definition.
Returns:
str: format error location.
"""
name = yaml_definition.get('name', None)
if name:
error_location = 'in: {0:s}'.format(name or '<NAMELESS>')
elif last_definition_object:
error_location = 'after: {0:s}'.format(last_definition_object.name)
else:
error_location = 'at start'
return error_location
|
Retrieves a format error location.
Args:
yaml_definition (dict[str, object]): current YAML definition.
last_definition_object (DataTypeDefinition): previous data type
definition.
Returns:
str: format error location.
|
def encode(secret: Union[str, bytes], payload: dict = None,
alg: str = default_alg, header: dict = None) -> str:
"""
:param secret: The secret used to encode the token.
:type secret: Union[str, bytes]
:param payload: The payload to be encoded in the token.
:type payload: dict
:param alg: The algorithm used to hash the token.
:type alg: str
:param header: The header to be encoded in the token.
:type header: dict
:return: A new token
:rtype: str
"""
secret = util.to_bytes(secret)
payload = payload or {}
header = header or {}
header_json = util.to_bytes(json.dumps(header))
header_b64 = util.b64_encode(header_json)
payload_json = util.to_bytes(json.dumps(payload))
payload_b64 = util.b64_encode(payload_json)
pre_signature = util.join(header_b64, payload_b64)
signature = _hash(secret, pre_signature, alg)
signature_b64 = util.b64_encode(signature)
token = util.join(pre_signature, signature_b64)
return util.from_bytes(token)
|
:param secret: The secret used to encode the token.
:type secret: Union[str, bytes]
:param payload: The payload to be encoded in the token.
:type payload: dict
:param alg: The algorithm used to hash the token.
:type alg: str
:param header: The header to be encoded in the token.
:type header: dict
:return: A new token
:rtype: str
|
def get_policy(policy_name,
region=None, key=None, keyid=None, profile=None):
'''
Check to see if policy exists.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.instance_profile_exists myiprofile
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.get_policy(_get_policy_arn(policy_name,
region=region, key=key, keyid=keyid, profile=profile))
return ret.get('get_policy_response', {}).get('get_policy_result', {})
except boto.exception.BotoServerError:
return None
|
Check to see if policy exists.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.instance_profile_exists myiprofile
|
def CollectData(self):
"""Return some current samples. Call StartDataCollection() first.
"""
while 1: # loop until we get data or a timeout
_bytes = self._ReadPacket()
if not _bytes:
return None
if len(_bytes) < 4 + 8 + 1 or _bytes[0] < 0x20 or _bytes[0] > 0x2F:
logging.warning("Wanted data, dropped type=0x%02x, len=%d",
_bytes[0], len(_bytes))
continue
seq, _type, x, y = struct.unpack("BBBB", _bytes[:4])
data = [
struct.unpack(">hhhh", _bytes[x:x + 8])
for x in range(4,
len(_bytes) - 8, 8)
]
if self._last_seq and seq & 0xF != (self._last_seq + 1) & 0xF:
logging.warning("Data sequence skipped, lost packet?")
self._last_seq = seq
if _type == 0:
if not self._coarse_scale or not self._fine_scale:
logging.warning(
"Waiting for calibration, dropped data packet.")
continue
out = []
for main, usb, aux, voltage in data:
if main & 1:
coarse = ((main & ~1) - self._coarse_zero)
out.append(coarse * self._coarse_scale)
else:
out.append((main - self._fine_zero) * self._fine_scale)
return out
elif _type == 1:
self._fine_zero = data[0][0]
self._coarse_zero = data[1][0]
elif _type == 2:
self._fine_ref = data[0][0]
self._coarse_ref = data[1][0]
else:
logging.warning("Discarding data packet type=0x%02x", _type)
continue
# See http://wiki/Main/MonsoonProtocol for details on these values.
if self._coarse_ref != self._coarse_zero:
self._coarse_scale = 2.88 / (
self._coarse_ref - self._coarse_zero)
if self._fine_ref != self._fine_zero:
self._fine_scale = 0.0332 / (self._fine_ref - self._fine_zero)
|
Return some current samples. Call StartDataCollection() first.
|
def __gzip(filename):
""" Compress a file returning the new filename (.gz)
"""
zipname = filename + '.gz'
file_pointer = open(filename,'rb')
zip_pointer = gzip.open(zipname,'wb')
zip_pointer.writelines(file_pointer)
file_pointer.close()
zip_pointer.close()
return zipname
|
Compress a file returning the new filename (.gz)
|
def clear_lock(self, abspath=True):
"""Clean any conda lock in the system."""
cmd_list = ['clean', '--lock', '--json']
return self._call_and_parse(cmd_list, abspath=abspath)
|
Clean any conda lock in the system.
|
def slice(self, *slice_args, **kwargs):
"""Create a new SampleSet with rows sliced according to standard Python
slicing syntax.
Args:
start (int, optional, default=None):
Start index for `slice`.
stop (int):
Stop index for `slice`.
step (int, optional, default=None):
Step value for `slice`.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples before
slicing. Note that `sorted_by` determines the sample order in
the returned SampleSet.
Returns:
:obj:`.SampleSet`
Examples:
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.diag(range(1, 11)), dimod.BINARY, energy=range(10))
>>> print(sampleset)
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
3 0 0 0 1 0 0 0 0 0 0 3 1
4 0 0 0 0 1 0 0 0 0 0 4 1
5 0 0 0 0 0 1 0 0 0 0 5 1
6 0 0 0 0 0 0 1 0 0 0 6 1
7 0 0 0 0 0 0 0 1 0 0 7 1
8 0 0 0 0 0 0 0 0 1 0 8 1
9 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 10 rows, 10 samples, 10 variables]
>>> # the first 3 samples by energy == truncate(3)
>>> print(sampleset.slice(3))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # the last 3 samples by energy
>>> print(sampleset.slice(-3, None))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 0 0 0 0 1 0 0 7 1
1 0 0 0 0 0 0 0 0 1 0 8 1
2 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # every second sample in between (skip the top and the bottom 3)
>>> print(sampleset.slice(3, -3, 2))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 1 0 0 0 0 0 0 3 1
1 0 0 0 0 0 1 0 0 0 0 5 1
['BINARY', 2 rows, 2 samples, 10 variables]
"""
# handle `sorted_by` kwarg with a default value in a python2-compatible way
sorted_by = kwargs.pop('sorted_by', 'energy')
if kwargs:
# be strict about allowed kwargs: throw the same error as python3 would
raise TypeError('slice got an unexpected '
'keyword argument {!r}'.format(kwargs.popitem()[0]))
# follow Python's slice syntax
if slice_args:
selector = slice(*slice_args)
else:
selector = slice(None)
if sorted_by is None:
record = self.record[selector]
else:
sort_indices = np.argsort(self.record[sorted_by])
record = self.record[sort_indices[selector]]
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype)
|
Create a new SampleSet with rows sliced according to standard Python
slicing syntax.
Args:
start (int, optional, default=None):
Start index for `slice`.
stop (int):
Stop index for `slice`.
step (int, optional, default=None):
Step value for `slice`.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples before
slicing. Note that `sorted_by` determines the sample order in
the returned SampleSet.
Returns:
:obj:`.SampleSet`
Examples:
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.diag(range(1, 11)), dimod.BINARY, energy=range(10))
>>> print(sampleset)
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
3 0 0 0 1 0 0 0 0 0 0 3 1
4 0 0 0 0 1 0 0 0 0 0 4 1
5 0 0 0 0 0 1 0 0 0 0 5 1
6 0 0 0 0 0 0 1 0 0 0 6 1
7 0 0 0 0 0 0 0 1 0 0 7 1
8 0 0 0 0 0 0 0 0 1 0 8 1
9 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 10 rows, 10 samples, 10 variables]
>>> # the first 3 samples by energy == truncate(3)
>>> print(sampleset.slice(3))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # the last 3 samples by energy
>>> print(sampleset.slice(-3, None))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 0 0 0 0 1 0 0 7 1
1 0 0 0 0 0 0 0 0 1 0 8 1
2 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # every second sample in between (skip the top and the bottom 3)
>>> print(sampleset.slice(3, -3, 2))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 1 0 0 0 0 0 0 3 1
1 0 0 0 0 0 1 0 0 0 0 5 1
['BINARY', 2 rows, 2 samples, 10 variables]
|
def call_git_branch():
"""return the string output of git desribe"""
try:
with open(devnull, "w") as fnull:
arguments = [GIT_COMMAND, 'rev-parse', '--abbrev-ref', 'HEAD']
return check_output(arguments, cwd=CURRENT_DIRECTORY,
stderr=fnull).decode("ascii").strip()
except (OSError, CalledProcessError):
return None
|
return the string output of git desribe
|
def geom_find_rotsymm(g, atwts, ax, improp, \
nmax=_DEF.SYMM_MATCH_NMAX, \
tol=_DEF.SYMM_MATCH_TOL):
""" Identify highest-order symmetry for a geometry on a given axis.
Regular and improper axes possible.
.. todo:: Complete geom_find_rotsymm docstring
"""
# Imports
import numpy as np
# Vectorize the geometry
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
# Ensure a 3-D axis vector
ax = make_nd_vec(ax, nd=3, t=np.float64, norm=True)
# Loop downward either until a good axis is found or nval < 1
# Should never traverse below n == 1 for regular rotation check;
# could for improper, though.
nval = nmax + 1
nfac = 1.0
while nfac > tol and nval > 0:
nval = nval - 1
try:
nfac = geom_symm_match(g, atwts, ax, \
2*np.pi/nval, improp)
except ZeroDivisionError as zde:
# If it's because nval == zero, ignore. Else re-raise.
if nval > 0:
raise zde
## end if
## end try
## loop
# Should be good to return
return nval, nfac
|
Identify highest-order symmetry for a geometry on a given axis.
Regular and improper axes possible.
.. todo:: Complete geom_find_rotsymm docstring
|
def mod2pi(ts):
"""For a timeseries where all variables represent phases (in radians),
return an equivalent timeseries where all values are in the range (-pi, pi]
"""
return np.pi - np.mod(np.pi - ts, 2*np.pi)
|
For a timeseries where all variables represent phases (in radians),
return an equivalent timeseries where all values are in the range (-pi, pi]
|
def get_nearest_points_dirty(self, center_point, radius, unit='km'):
"""
return approx list of point from circle with given center and radius
it uses geohash and return with some error (see GEO_HASH_ERRORS)
:param center_point: center of search circle
:param radius: radius of search circle
:return: list of GeoPoints from given area
"""
if unit == 'mi':
radius = utils.mi_to_km(radius)
grid_size = GEO_HASH_GRID_SIZE[self.precision]
if radius > grid_size / 2:
# radius is too big for current grid, we cannot use 9 neighbors
# to cover all possible points
suggested_precision = 0
for precision, max_size in GEO_HASH_GRID_SIZE.items():
if radius > max_size / 2:
suggested_precision = precision - 1
break
raise ValueError(
'Too large radius, please rebuild GeoHashGrid with '
'precision={0}'.format(suggested_precision)
)
me_and_neighbors = geohash.expand(self.get_point_hash(center_point))
return chain(*(self.data.get(key, []) for key in me_and_neighbors))
|
return approx list of point from circle with given center and radius
it uses geohash and return with some error (see GEO_HASH_ERRORS)
:param center_point: center of search circle
:param radius: radius of search circle
:return: list of GeoPoints from given area
|
def convert_areaSource(self, node):
"""
Convert the given node into an area source object.
:param node: a node with tag areaGeometry
:returns: a :class:`openquake.hazardlib.source.AreaSource` instance
"""
geom = node.areaGeometry
coords = split_coords_2d(~geom.Polygon.exterior.LinearRing.posList)
polygon = geo.Polygon([geo.Point(*xy) for xy in coords])
msr = valid.SCALEREL[~node.magScaleRel]()
area_discretization = geom.attrib.get(
'discretization', self.area_source_discretization)
if area_discretization is None:
raise ValueError(
'The source %r has no `discretization` parameter and the job.'
'ini file has no `area_source_discretization` parameter either'
% node['id'])
return source.AreaSource(
source_id=node['id'],
name=node['name'],
tectonic_region_type=node.attrib.get('tectonicRegion'),
mfd=self.convert_mfdist(node),
rupture_mesh_spacing=self.rupture_mesh_spacing,
magnitude_scaling_relationship=msr,
rupture_aspect_ratio=~node.ruptAspectRatio,
upper_seismogenic_depth=~geom.upperSeismoDepth,
lower_seismogenic_depth=~geom.lowerSeismoDepth,
nodal_plane_distribution=self.convert_npdist(node),
hypocenter_distribution=self.convert_hpdist(node),
polygon=polygon,
area_discretization=area_discretization,
temporal_occurrence_model=self.get_tom(node))
|
Convert the given node into an area source object.
:param node: a node with tag areaGeometry
:returns: a :class:`openquake.hazardlib.source.AreaSource` instance
|
def make_movie(workdir, pf, dpi=120, fps=1, format="pdf", engine="ffmpeg"):
""" Make the movie using either ffmpeg or gifsicle.
"""
os.chdir(workdir)
if format != "png":
cmd = "parallel convert -density {}".format(dpi)
cmd += " {} {.}.png ::: " + "*.{}".format(format)
sh(cmd)
assert engine in ("ffmpeg", "gifsicle"), \
"Only ffmpeg or gifsicle is currently supported"
if engine == "ffmpeg":
cmd = "ffmpeg -framerate {} -pattern_type glob -i '*.png' {}.mp4"\
.format(fps, pf)
elif engine == "gifsicle":
cmd = "convert *.png gif:- |"
cmd += " gifsicle --delay {} --loop --optimize=3".format(100 / fps)
cmd += " --colors=256 --multifile - > {}.gif".format(pf)
sh(cmd)
|
Make the movie using either ffmpeg or gifsicle.
|
def _run_process(self, start_path, stop_path, process_num=0):
"""
The function calls _run_path for given set of paths
"""
# pre processing
self.producer.initialize_worker(process_num)
self.consumer.initialize_worker(process_num)
# processing
for path in range(start_path, stop_path):
self._run_path(path)
# post processing
self.consumer.finalize_worker(process_num)
|
The function calls _run_path for given set of paths
|
def _get_lowstate(self):
'''
Format the incoming data into a lowstate object
'''
if not self.request.body:
return
data = self.deserialize(self.request.body)
self.request_payload = copy(data)
if data and 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
if not isinstance(data, list):
lowstate = [data]
else:
lowstate = data
return lowstate
|
Format the incoming data into a lowstate object
|
def normalize_variables(cls, variables):
"""Make sure version is treated consistently
"""
# if the version is False, empty string, etc, throw it out
if variables.get('version', True) in ('', False, '_NO_VERSION', None):
del variables['version']
return super(PackageResource, cls).normalize_variables(variables)
|
Make sure version is treated consistently
|
def check_and_get_data(input_list,**pars):
"""Verify that all specified files are present. If not, retrieve them from MAST.
Parameters
----------
input_list : list
List of one or more calibrated fits images that will be used for catalog generation.
Returns
=======
total_input_list: list
list of full filenames
"""
empty_list = []
retrieve_list = [] # Actual files retrieved via astroquery and resident on disk
candidate_list = [] # File names gathered from *_asn.fits file
ipppssoot_list = [] # ipppssoot names used to avoid duplicate downloads
total_input_list = [] # Output full filename list of data on disk
# Loop over the input_list to determine if the item in the input_list is a full association file
# (*_asn.fits), a full individual image file (aka singleton, *_flt.fits), or a root name specification
# (association or singleton, ipppssoot).
for input_item in input_list:
print('Input item: ', input_item)
indx = input_item.find('_')
# Input with a suffix (_xxx.fits)
if indx != -1:
lc_input_item = input_item.lower()
suffix = lc_input_item[indx+1:indx+4]
print('file: ', lc_input_item)
# For an association, need to open the table and read the image names as this could
# be a custom association. The assumption is this file is on local disk when specified
# in this manner (vs just the ipppssoot of the association).
# This "if" block just collects the wanted full file names.
if suffix == 'asn':
try:
asntab = Table.read(input_item, format='fits')
except FileNotFoundError:
log.error('File {} not found.'.format(input_item))
return(empty_list)
for row in asntab:
if row['MEMTYPE'].startswith('PROD'):
continue
memname = row['MEMNAME'].lower().strip()
# Need to check if the MEMNAME is a full filename or an ipppssoot
if memname.find('_') != -1:
candidate_list.append(memname)
else:
candidate_list.append(memname + '_flc.fits')
elif suffix == 'flc' or suffix == 'flt':
if lc_input_item not in candidate_list:
candidate_list.append(lc_input_item)
else:
log.error('Inappropriate file suffix: {}. Looking for "asn.fits", "flc.fits", or "flt.fits".'.format(suffix))
return(empty_list)
# Input is an ipppssoot (association or singleton), nine characters by definition.
# This "else" block actually downloads the data specified as ipppssoot.
elif len(input_item) == 9:
try:
if input_item not in ipppssoot_list:
# An ipppssoot of an individual file which is part of an association cannot be
# retrieved from MAST
retrieve_list = aqutils.retrieve_observation(input_item,**pars)
# If the retrieved list is not empty, add filename(s) to the total_input_list.
# Also, update the ipppssoot_list so we do not try to download the data again. Need
# to do this since retrieve_list can be empty because (1) data cannot be acquired (error)
# or (2) data is already on disk (ok).
if retrieve_list:
total_input_list += retrieve_list
ipppssoot_list.append(input_item)
else:
log.error('File {} cannot be retrieved from MAST.'.format(input_item))
return(empty_list)
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)
# Only the retrieve_list files via astroquery have been put into the total_input_list thus far.
# Now check candidate_list to detect or acquire the requested files from MAST via
# astroquery.
for file in candidate_list:
# If the file is found on disk, add it to the total_input_list and continue
if glob.glob(file):
total_input_list.append(file)
continue
else:
log.error('File {} cannot be found on the local disk.'.format(file))
return(empty_list)
log.info("TOTAL INPUT LIST: {}".format(total_input_list))
return(total_input_list)
|
Verify that all specified files are present. If not, retrieve them from MAST.
Parameters
----------
input_list : list
List of one or more calibrated fits images that will be used for catalog generation.
Returns
=======
total_input_list: list
list of full filenames
|
def forward_backward(self, x):
"""forward backward implementation"""
with mx.autograd.record():
(ls, next_sentence_label, classified, masked_id, decoded, \
masked_weight, ls1, ls2, valid_length) = forward(x, self._model, self._mlm_loss,
self._nsp_loss, self._vocab_size,
args.dtype)
ls = ls / self._rescale_factor
if args.dtype == 'float16':
self._trainer.backward(ls)
else:
ls.backward()
return ls, next_sentence_label, classified, masked_id, decoded, \
masked_weight, ls1, ls2, valid_length
|
forward backward implementation
|
def save_itemgetter(self, obj):
"""itemgetter serializer (needed for namedtuple support)"""
class Dummy:
def __getitem__(self, item):
return item
items = obj(Dummy())
if not isinstance(items, tuple):
items = (items,)
return self.save_reduce(operator.itemgetter, items)
|
itemgetter serializer (needed for namedtuple support)
|
def __validate_dates(start_date, end_date):
"""Validate if a date string.
Validate if a string is a date on yyyy-mm-dd format and it the
period between them is less than a year.
"""
try:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be yyyy-mm-dd")
if (end_date - start_date).days > 366:
raise ValueError("The difference between start and end date " +
"should be less than or equal to 366 days.")
if (end_date - start_date).days < 0:
raise ValueError("End date cannot be before start date.")
|
Validate if a date string.
Validate if a string is a date on yyyy-mm-dd format and it the
period between them is less than a year.
|
def decrypt_with_ad(self, ad: bytes, ciphertext: bytes) -> bytes:
"""
If k is non-empty returns DECRYPT(k, n++, ad, ciphertext). Otherwise returns ciphertext. If an authentication
failure occurs in DECRYPT() then n is not incremented and an error is signaled to the caller.
:param ad: bytes sequence
:param ciphertext: bytes sequence
:return: plaintext bytes sequence
"""
if self.n == MAX_NONCE:
raise NoiseMaxNonceError('Nonce has depleted!')
if not self.has_key():
return ciphertext
plaintext = self.cipher.decrypt(self.k, self.n, ad, ciphertext)
self.n = self.n + 1
return plaintext
|
If k is non-empty returns DECRYPT(k, n++, ad, ciphertext). Otherwise returns ciphertext. If an authentication
failure occurs in DECRYPT() then n is not incremented and an error is signaled to the caller.
:param ad: bytes sequence
:param ciphertext: bytes sequence
:return: plaintext bytes sequence
|
def validate_input(self):
"""Raise appropriate exception if gate was defined incorrectly."""
if self.vert[1] <= self.vert[0]:
raise ValueError(u'{} must be larger than {}'.format(self.vert[1], self.vert[0]))
|
Raise appropriate exception if gate was defined incorrectly.
|
def run(self):
"""Loop forever, pushing out stats."""
self.graphite.start()
while True:
log.debug('Graphite pusher is sleeping for %d seconds', self.period)
time.sleep(self.period)
log.debug('Pushing stats to Graphite')
try:
self.push()
log.debug('Done pushing stats to Graphite')
except:
log.exception('Exception while pushing stats to Graphite')
raise
|
Loop forever, pushing out stats.
|
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
|
Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
|
def _init_map(self):
"""stub"""
TextAnswerFormRecord._init_map(self)
FilesAnswerFormRecord._init_map(self)
super(AnswerTextAndFilesMixin, self)._init_map()
|
stub
|
def broadcast(self, data_dict):
'''
Send to the visualizer (if there is one) or enqueue for later
'''
if self.vis_socket:
self.queued_messages.append(data_dict)
self.send_all_updates()
|
Send to the visualizer (if there is one) or enqueue for later
|
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
|
Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
|
def get_locations():
"""
Pull the accounts locations.
"""
arequest = requests.get(LOCATIONS_URL, headers=HEADERS)
status_code = str(arequest.status_code)
if status_code == '401':
_LOGGER.error("Token expired.")
return False
return arequest.json()
|
Pull the accounts locations.
|
def main():
"""
Project's main method which will parse the command line arguments, run a
scan using the TagCubeClient and exit.
"""
cmd_args = TagCubeCLI.parse_args()
try:
tagcube_cli = TagCubeCLI.from_cmd_args(cmd_args)
except ValueError, ve:
# We get here when there are no credentials configured
print '%s' % ve
sys.exit(1)
try:
sys.exit(tagcube_cli.run())
except ValueError, ve:
# We get here when the configured credentials had some issue (invalid)
# or there was some error (such as invalid profile name) with the params
print '%s' % ve
sys.exit(2)
|
Project's main method which will parse the command line arguments, run a
scan using the TagCubeClient and exit.
|
def new(self, data):
"""通过data新建一个stock_block
Arguments:
data {[type]} -- [description]
Returns:
[type] -- [description]
"""
temp = copy(self)
temp.__init__(data)
return temp
|
通过data新建一个stock_block
Arguments:
data {[type]} -- [description]
Returns:
[type] -- [description]
|
def generate_orbital_path(self, factor=3., n_points=20, viewup=None, z_shift=None):
"""Genrates an orbital path around the data scene
Parameters
----------
facotr : float
A scaling factor when biulding the orbital extent
n_points : int
number of points on the orbital path
viewup : list(float)
the normal to the orbital plane
z_shift : float, optional
shift the plane up/down from the center of the scene by this amount
"""
if viewup is None:
viewup = rcParams['camera']['viewup']
center = list(self.center)
bnds = list(self.bounds)
if z_shift is None:
z_shift = (bnds[5] - bnds[4]) * factor
center[2] = center[2] + z_shift
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
return vtki.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
|
Genrates an orbital path around the data scene
Parameters
----------
facotr : float
A scaling factor when biulding the orbital extent
n_points : int
number of points on the orbital path
viewup : list(float)
the normal to the orbital plane
z_shift : float, optional
shift the plane up/down from the center of the scene by this amount
|
def multiply(traj, result_list):
"""Example of a sophisticated simulation that involves multiplying two values.
This time we will store tha value in a shared list and only in the end add the result.
:param traj:
Trajectory containing
the parameters in a particular combination,
it also serves as a container for results.
"""
z=traj.x*traj.y
result_list[traj.v_idx] = z
|
Example of a sophisticated simulation that involves multiplying two values.
This time we will store tha value in a shared list and only in the end add the result.
:param traj:
Trajectory containing
the parameters in a particular combination,
it also serves as a container for results.
|
def add(self, pattern, function, method=None, type_cast=None):
"""Function for registering a path pattern.
Args:
pattern (str): Regex pattern to match a certain path.
function (function): Function to associate with this path.
method (str, optional): Usually used to define one of GET, POST,
PUT, DELETE. You may use whatever fits your situation though.
Defaults to None.
type_cast (dict, optional): Mapping between the param name and
one of `int`, `float` or `bool`. The value reflected by the
provided param name will than be casted to the given type.
Defaults to None.
"""
if not type_cast:
type_cast = {}
with self._lock:
self._data_store.append({
'pattern': pattern,
'function': function,
'method': method,
'type_cast': type_cast,
})
|
Function for registering a path pattern.
Args:
pattern (str): Regex pattern to match a certain path.
function (function): Function to associate with this path.
method (str, optional): Usually used to define one of GET, POST,
PUT, DELETE. You may use whatever fits your situation though.
Defaults to None.
type_cast (dict, optional): Mapping between the param name and
one of `int`, `float` or `bool`. The value reflected by the
provided param name will than be casted to the given type.
Defaults to None.
|
def OnActivateReader(self, event):
"""Called when the user activates a reader in the tree."""
item = event.GetItem()
if item:
itemdata = self.readertreepanel.readertreectrl.GetItemPyData(item)
if isinstance(itemdata, smartcard.Card.Card):
self.ActivateCard(itemdata)
elif isinstance(itemdata, smartcard.reader.Reader.Reader):
self.dialogpanel.OnActivateReader(itemdata)
event.Skip()
|
Called when the user activates a reader in the tree.
|
def on_demand_annotation(twitter_app_key, twitter_app_secret, user_twitter_id):
"""
A service that leverages twitter lists for on-demand annotation of popular users.
TODO: Do this.
"""
####################################################################################################################
# Log into my application
####################################################################################################################
twitter = login(twitter_app_key, twitter_app_secret)
twitter_lists_list = twitter.get_list_memberships(user_id=user_twitter_id, count=1000)
for twitter_list in twitter_lists_list:
print(twitter_list)
return twitter_lists_list
|
A service that leverages twitter lists for on-demand annotation of popular users.
TODO: Do this.
|
def notification_selected_sm_changed(self, model, prop_name, info):
"""If a new state machine is selected, make sure the tab is open"""
selected_state_machine_id = self.model.selected_state_machine_id
if selected_state_machine_id is None:
return
page_id = self.get_page_num(selected_state_machine_id)
# to retrieve the current tab colors
number_of_pages = self.view["notebook"].get_n_pages()
old_label_colors = list(range(number_of_pages))
for p in range(number_of_pages):
page = self.view["notebook"].get_nth_page(p)
label = self.view["notebook"].get_tab_label(page).get_child().get_children()[0]
# old_label_colors[p] = label.get_style().fg[Gtk.StateType.NORMAL]
old_label_colors[p] = label.get_style_context().get_color(Gtk.StateType.NORMAL)
if not self.view.notebook.get_current_page() == page_id:
self.view.notebook.set_current_page(page_id)
# set the old colors
for p in range(number_of_pages):
page = self.view["notebook"].get_nth_page(p)
label = self.view["notebook"].get_tab_label(page).get_child().get_children()[0]
# Gtk TODO
style = label.get_style_context()
|
If a new state machine is selected, make sure the tab is open
|
def next_population(self, population, fitnesses):
"""Make a new population after each optimization iteration.
Args:
population: The population current population of solutions.
fitnesses: The fitness associated with each solution in the population
Returns:
list; a list of solutions.
"""
return [self._next_solution() for _ in range(self._population_size)]
|
Make a new population after each optimization iteration.
Args:
population: The population current population of solutions.
fitnesses: The fitness associated with each solution in the population
Returns:
list; a list of solutions.
|
def finalize(self):
"""
finalize simulation for consumer
"""
super(StringWriterConsumer, self).finalize()
self.result = self.decoder(self.result)
|
finalize simulation for consumer
|
def fen(self, *, shredder: bool = False, en_passant: str = "legal", promoted: Optional[bool] = None) -> str:
"""
Gets a FEN representation of the position.
A FEN string (e.g.,
``rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1``) consists
of the position part :func:`~chess.Board.board_fen()`, the
:data:`~chess.Board.turn`, the castling part
(:data:`~chess.Board.castling_rights`),
the en passant square (:data:`~chess.Board.ep_square`),
the :data:`~chess.Board.halfmove_clock`
and the :data:`~chess.Board.fullmove_number`.
:param shredder: Use :func:`~chess.Board.castling_shredder_fen()`
and encode castling rights by the file of the rook
(like ``HAha``) instead of the default
:func:`~chess.Board.castling_xfen()` (like ``KQkq``).
:param en_passant: By default, only fully legal en passant squares
are included (:func:`~chess.Board.has_legal_en_passant()`).
Pass ``fen`` to strictly follow the FEN specification
(always include the en passant square after a two-step pawn move)
or ``xfen`` to follow the X-FEN specification
(:func:`~chess.Board.has_pseudo_legal_en_passant()`).
:param promoted: Mark promoted pieces like ``Q~``. By default, this is
only enabled in chess variants where this is relevant.
"""
return " ".join([
self.epd(shredder=shredder, en_passant=en_passant, promoted=promoted),
str(self.halfmove_clock),
str(self.fullmove_number)
])
|
Gets a FEN representation of the position.
A FEN string (e.g.,
``rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1``) consists
of the position part :func:`~chess.Board.board_fen()`, the
:data:`~chess.Board.turn`, the castling part
(:data:`~chess.Board.castling_rights`),
the en passant square (:data:`~chess.Board.ep_square`),
the :data:`~chess.Board.halfmove_clock`
and the :data:`~chess.Board.fullmove_number`.
:param shredder: Use :func:`~chess.Board.castling_shredder_fen()`
and encode castling rights by the file of the rook
(like ``HAha``) instead of the default
:func:`~chess.Board.castling_xfen()` (like ``KQkq``).
:param en_passant: By default, only fully legal en passant squares
are included (:func:`~chess.Board.has_legal_en_passant()`).
Pass ``fen`` to strictly follow the FEN specification
(always include the en passant square after a two-step pawn move)
or ``xfen`` to follow the X-FEN specification
(:func:`~chess.Board.has_pseudo_legal_en_passant()`).
:param promoted: Mark promoted pieces like ``Q~``. By default, this is
only enabled in chess variants where this is relevant.
|
def day(self):
'''set unit to day'''
self.magnification = 86400
self._update(self.baseNumber, self.magnification)
return self
|
set unit to day
|
def compareBIMfiles(beforeFileName, afterFileName, outputFileName):
"""Compare two BIM files for differences.
:param beforeFileName: the name of the file before modification.
:param afterFileName: the name of the file after modification.
:param outputFileName: the name of the output file (containing the
differences between the ``before`` and the ``after``
files.
:type beforeFileName: str
:type afterFileName: str
:type outputFileName: str
:returns: the number of differences between the two files.
The ``bim`` files contain the list of markers in a given dataset. The
``before`` file should have more markers than the ``after`` file. The
``after`` file should be a subset of the markers in the ``before`` file.
"""
# Creating the options
options = Dummy()
options.before = beforeFileName
options.after = afterFileName
options.out = outputFileName
# Checking the options
CompareBIM.checkArgs(options)
# Reading the BIM files
beforeBIM = CompareBIM.readBIM(options.before)
afterBIM = CompareBIM.readBIM(options.after)
# Finding the differences
CompareBIM.compareSNPs(beforeBIM, afterBIM, options.out)
return beforeBIM - afterBIM
|
Compare two BIM files for differences.
:param beforeFileName: the name of the file before modification.
:param afterFileName: the name of the file after modification.
:param outputFileName: the name of the output file (containing the
differences between the ``before`` and the ``after``
files.
:type beforeFileName: str
:type afterFileName: str
:type outputFileName: str
:returns: the number of differences between the two files.
The ``bim`` files contain the list of markers in a given dataset. The
``before`` file should have more markers than the ``after`` file. The
``after`` file should be a subset of the markers in the ``before`` file.
|
def split_on(word: str, section: str) -> Tuple[str, str]:
"""
Given a string, split on a section, and return the two sections as a tuple.
:param word:
:param section:
:return:
>>> split_on('hamrye', 'ham')
('ham', 'rye')
"""
return word[:word.index(section)] + section, word[word.index(section) + len(section):]
|
Given a string, split on a section, and return the two sections as a tuple.
:param word:
:param section:
:return:
>>> split_on('hamrye', 'ham')
('ham', 'rye')
|
def canvasReleaseEvent(self, e):
"""Handle canvas release events has finished capturing e.
:param e: A Qt event object.
:type: QEvent
"""
_ = e # NOQA
self.is_emitting_point = False
self.rectangle_created.emit()
|
Handle canvas release events has finished capturing e.
:param e: A Qt event object.
:type: QEvent
|
def ctrl_srfc_pt_send(self, target, bitfieldPt, force_mavlink1=False):
'''
This message sets the control surfaces for selective passthrough mode.
target : The system setting the commands (uint8_t)
bitfieldPt : Bitfield containing the passthrough configuration, see CONTROL_SURFACE_FLAG ENUM. (uint16_t)
'''
return self.send(self.ctrl_srfc_pt_encode(target, bitfieldPt), force_mavlink1=force_mavlink1)
|
This message sets the control surfaces for selective passthrough mode.
target : The system setting the commands (uint8_t)
bitfieldPt : Bitfield containing the passthrough configuration, see CONTROL_SURFACE_FLAG ENUM. (uint16_t)
|
def progression_sinusoidal(week, start_weight, final_weight, start_week,
end_week,
periods=2, scale=0.025, offset=0):
"""A sinusoidal progression function going through the points
('start_week', 'start_weight') and ('end_week', 'final_weight'), evaluated
in 'week'. This function calls a linear progression function
and multiplies it by a sinusoid.
Parameters
----------
week
The week to evaluate the linear function at.
start_weight
The weight at 'start_week'.
final_weight
The weight at 'end_week'.
start_week
The number of the first week, typically 1.
end_week
The number of the final week, e.g. 8.
periods
Number of sinusoidal periods in the time range.
scale
The scale (amplitude) of the sinusoidal term.
offset
The offset (shift) of the sinusoid.
Returns
-------
weight
The weight at 'week'.
Examples
-------
>>> progression_sinusoidal(1, 100, 120, 1, 8)
100.0
>>> progression_sinusoidal(8, 100, 120, 1, 8)
120.0
>>> progression_sinusoidal(4, 100, 120, 1, 8)
106.44931454758678
"""
# Get the linear model
linear = progression_linear(week, start_weight, final_weight,
start_week, end_week)
# Calculate the time period and the argument to the sine function
time_period = end_week - start_week
sine_argument = ((week - offset - start_week) * (math.pi * 2) /
(time_period / periods))
linear_with_sinusoidal = linear * (1 + scale * math.sin(sine_argument))
return linear_with_sinusoidal
|
A sinusoidal progression function going through the points
('start_week', 'start_weight') and ('end_week', 'final_weight'), evaluated
in 'week'. This function calls a linear progression function
and multiplies it by a sinusoid.
Parameters
----------
week
The week to evaluate the linear function at.
start_weight
The weight at 'start_week'.
final_weight
The weight at 'end_week'.
start_week
The number of the first week, typically 1.
end_week
The number of the final week, e.g. 8.
periods
Number of sinusoidal periods in the time range.
scale
The scale (amplitude) of the sinusoidal term.
offset
The offset (shift) of the sinusoid.
Returns
-------
weight
The weight at 'week'.
Examples
-------
>>> progression_sinusoidal(1, 100, 120, 1, 8)
100.0
>>> progression_sinusoidal(8, 100, 120, 1, 8)
120.0
>>> progression_sinusoidal(4, 100, 120, 1, 8)
106.44931454758678
|
def mask(args):
"""
%prog mask data.bin samples.ids STR.ids meta.tsv
OR
%prog mask data.tsv meta.tsv
Compute P-values based on meta and data. The `data.bin` should be the matrix
containing filtered loci and the output mask.tsv will have the same
dimension.
"""
p = OptionParser(mask.__doc__)
opts, args = p.parse_args(args)
if len(args) not in (2, 4):
sys.exit(not p.print_help())
if len(args) == 4:
databin, sampleids, strids, metafile = args
df, m, samples, loci = read_binfile(databin, sampleids, strids)
mode = "STRs"
elif len(args) == 2:
databin, metafile = args
df = pd.read_csv(databin, sep="\t", index_col=0)
m = df.as_matrix()
samples = df.index
loci = list(df.columns)
mode = "TREDs"
pf = "{}_{}_SEARCH".format(mode, timestamp())
final_columns, percentiles = read_meta(metafile)
maskfile = pf + ".mask.tsv"
run_args = []
for i, locus in enumerate(loci):
a = m[:, i]
percentile = percentiles[locus]
run_args.append((i, a, percentile))
if mode == "TREDs" or need_update(databin, maskfile):
cpus = min(8, len(run_args))
write_mask(cpus, samples, final_columns, run_args, filename=maskfile)
logging.debug("File `{}` written.".format(maskfile))
|
%prog mask data.bin samples.ids STR.ids meta.tsv
OR
%prog mask data.tsv meta.tsv
Compute P-values based on meta and data. The `data.bin` should be the matrix
containing filtered loci and the output mask.tsv will have the same
dimension.
|
def saveCustomParams(self, data):
"""
Send custom dictionary to Polyglot to save and be retrieved on startup.
:param data: Dictionary of key value pairs to store in Polyglot database.
"""
LOGGER.info('Sending customParams to Polyglot.')
message = { 'customparams': data }
self.send(message)
|
Send custom dictionary to Polyglot to save and be retrieved on startup.
:param data: Dictionary of key value pairs to store in Polyglot database.
|
def delete(args):
"""Delete nodes from the cluster
"""
nodes = [ClusterNode.from_uri(n) for n in args.nodes]
cluster = Cluster.from_node(ClusterNode.from_uri(args.cluster))
echo("Deleting...")
for node in nodes:
cluster.delete_node(node)
cluster.wait()
|
Delete nodes from the cluster
|
def bedtools_merge(data, sample):
"""
Get all contiguous genomic regions with one or more overlapping
reads. This is the shell command we'll eventually run
bedtools bamtobed -i 1A_0.sorted.bam | bedtools merge [-d 100]
-i <input_bam> : specifies the input file to bed'ize
-d <int> : For PE set max distance between reads
"""
LOGGER.info("Entering bedtools_merge: %s", sample.name)
mappedreads = os.path.join(data.dirs.refmapping,
sample.name+"-mapped-sorted.bam")
## command to call `bedtools bamtobed`, and pipe output to stdout
## Usage: bedtools bamtobed [OPTIONS] -i <bam>
## Usage: bedtools merge [OPTIONS] -i <bam>
cmd1 = [ipyrad.bins.bedtools, "bamtobed", "-i", mappedreads]
cmd2 = [ipyrad.bins.bedtools, "merge", "-i", "-"]
## If PE the -d flag to tell bedtools how far apart to allow mate pairs.
## If SE the -d flag is negative, specifying that SE reads need to
## overlap by at least a specific number of bp. This prevents the
## stairstep syndrome when a + and - read are both extending from
## the same cutsite. Passing a negative number to `merge -d` gets this done.
if 'pair' in data.paramsdict["datatype"]:
check_insert_size(data, sample)
#cmd2.insert(2, str(data._hackersonly["max_inner_mate_distance"]))
cmd2.insert(2, str(data._hackersonly["max_inner_mate_distance"]))
cmd2.insert(2, "-d")
else:
cmd2.insert(2, str(-1 * data._hackersonly["min_SE_refmap_overlap"]))
cmd2.insert(2, "-d")
## pipe output from bamtobed into merge
LOGGER.info("stdv: bedtools merge cmds: %s %s", cmd1, cmd2)
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc1.stdout)
result = proc2.communicate()[0]
proc1.stdout.close()
## check for errors and do cleanup
if proc2.returncode:
raise IPyradWarningExit("error in %s: %s", cmd2, result)
## Write the bedfile out, because it's useful sometimes.
if os.path.exists(ipyrad.__debugflag__):
with open(os.path.join(data.dirs.refmapping, sample.name + ".bed"), 'w') as outfile:
outfile.write(result)
## Report the number of regions we're returning
nregions = len(result.strip().split("\n"))
LOGGER.info("bedtools_merge: Got # regions: %s", nregions)
return result
|
Get all contiguous genomic regions with one or more overlapping
reads. This is the shell command we'll eventually run
bedtools bamtobed -i 1A_0.sorted.bam | bedtools merge [-d 100]
-i <input_bam> : specifies the input file to bed'ize
-d <int> : For PE set max distance between reads
|
def log_response(self, response):
"""
Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`sanic.response.HTTPResponse` or
:class:`sanic.response.StreamingHTTPResponse`
:return: None
"""
if self.access_log:
extra = {"status": getattr(response, "status", 0)}
if isinstance(response, HTTPResponse):
extra["byte"] = len(response.body)
else:
extra["byte"] = -1
extra["host"] = "UNKNOWN"
if self.request is not None:
if self.request.ip:
extra["host"] = "{0}:{1}".format(
self.request.ip, self.request.port
)
extra["request"] = "{0} {1}".format(
self.request.method, self.request.url
)
else:
extra["request"] = "nil"
access_logger.info("", extra=extra)
|
Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`sanic.response.HTTPResponse` or
:class:`sanic.response.StreamingHTTPResponse`
:return: None
|
def pdf(self, x, e=0., w=1., a=0.):
"""
probability density function
see: https://en.wikipedia.org/wiki/Skew_normal_distribution
:param x: input value
:param e:
:param w:
:param a:
:return:
"""
t = (x-e) / w
return 2. / w * stats.norm.pdf(t) * stats.norm.cdf(a*t)
|
probability density function
see: https://en.wikipedia.org/wiki/Skew_normal_distribution
:param x: input value
:param e:
:param w:
:param a:
:return:
|
def _extended_gcd(self, a, b):
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
|
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
|
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
if not output:
return 'ERROR - no output'
last = output[-1]
if isTimeout:
return 'TIMEOUT'
if returncode != 0:
return 'ERROR - Pre-run'
if last is None:
return 'ERROR - no output'
elif 'result: true' in last:
return result.RESULT_TRUE_PROP
elif 'result: false' in last:
return result.RESULT_FALSE_REACH
else:
return result.RESULT_UNKNOWN
|
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
|
def send_batches(self, batch_list):
"""Sends a list of batches to the validator.
Args:
batch_list (:obj:`BatchList`): the list of batches
Returns:
dict: the json result data, as a dict
"""
if isinstance(batch_list, BaseMessage):
batch_list = batch_list.SerializeToString()
return self._post('/batches', batch_list)
|
Sends a list of batches to the validator.
Args:
batch_list (:obj:`BatchList`): the list of batches
Returns:
dict: the json result data, as a dict
|
def OnTextColor(self, event):
"""Text color choice event handler"""
color = event.GetValue().GetRGB()
post_command_event(self, self.TextColorMsg, color=color)
|
Text color choice event handler
|
def get_form_value(self, form_key, object_brain_uid, default=None):
"""Returns a value from the request's form for the given uid, if any
"""
if form_key not in self.request.form:
return default
uid = object_brain_uid
if not api.is_uid(uid):
uid = api.get_uid(object_brain_uid)
values = self.request.form.get(form_key)
if isinstance(values, list):
if len(values) == 0:
return default
if len(values) > 1:
logger.warn("Multiple set of values for {}".format(form_key))
values = values[0]
return values.get(uid, default)
|
Returns a value from the request's form for the given uid, if any
|
def _remove_add_key(self, key):
"""Move a key to the end of the linked list and discard old entries."""
if not hasattr(self, '_queue'):
return # haven't initialized yet, so don't bother
if key in self._queue:
self._queue.remove(key)
self._queue.append(key)
if self.maxsize == 0:
return
while len(self._queue) > self.maxsize:
del self[self._queue[0]]
|
Move a key to the end of the linked list and discard old entries.
|
def reynolds(target, u0, b, temperature='pore.temperature'):
r"""
Uses exponential model by Reynolds [1] for the temperature dependance of
shear viscosity
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
u0, b : float, array_like
Coefficients of the viscosity exponential model (mu = u0*Exp(-b*T)
where T is the temperature in Kelvin
temperature : string
The dictionary key containing the temperature values (K). Can be
either a pore or throat array.
[1] Reynolds O. (1886). Phil Trans Royal Soc London, v. 177, p.157.
"""
value = u0*sp.exp(b*target[temperature])
return value
|
r"""
Uses exponential model by Reynolds [1] for the temperature dependance of
shear viscosity
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
u0, b : float, array_like
Coefficients of the viscosity exponential model (mu = u0*Exp(-b*T)
where T is the temperature in Kelvin
temperature : string
The dictionary key containing the temperature values (K). Can be
either a pore or throat array.
[1] Reynolds O. (1886). Phil Trans Royal Soc London, v. 177, p.157.
|
def address_by_interface(ifname):
"""Returns the IP address of the given interface name, e.g. 'eth0'
Parameters
----------
ifname : str
Name of the interface whose address is to be returned. Required.
Taken from this Stack Overflow answer: https://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python#24196955
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', bytes(ifname[:15], 'utf-8'))
)[20:24])
|
Returns the IP address of the given interface name, e.g. 'eth0'
Parameters
----------
ifname : str
Name of the interface whose address is to be returned. Required.
Taken from this Stack Overflow answer: https://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python#24196955
|
def status(name, sig=None):
'''
Return the status for a service.
If the name contains globbing, a dict mapping service name to PID or empty
string is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
string: PID if running, empty otherwise
dict: Maps service name to PID if running, empty string otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
'''
if sig:
return __salt__['status.pid'](sig)
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
results[service] = __salt__['status.pid'](service)
if contains_globbing:
return results
return results[name]
|
Return the status for a service.
If the name contains globbing, a dict mapping service name to PID or empty
string is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
string: PID if running, empty otherwise
dict: Maps service name to PID if running, empty string otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
|
def decode(self, covertext):
"""Given an input string ``unrank(X[:n]) || X[n:]`` returns ``X``.
"""
if not isinstance(covertext, str):
raise InvalidInputException('Input must be of type string.')
insufficient = (len(covertext) < self._fixed_slice)
if insufficient:
raise DecodeFailureError(
"Covertext is shorter than self._fixed_slice, can't decode.")
maximumBytesToRank = int(math.floor(self.getCapacity() / 8.0))
rank_payload = self._dfa.rank(covertext[:self._fixed_slice])
X = fte.bit_ops.long_to_bytes(rank_payload)
X = string.rjust(X, maximumBytesToRank, '\x00')
msg_len_header = self._encrypter.decryptOneBlock(
X[:DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT])
msg_len_header = msg_len_header[8:16]
msg_len = fte.bit_ops.bytes_to_long(
msg_len_header[:DfaEncoderObject._COVERTEXT_HEADER_LEN_PLAINTEXT])
retval = X[16:16 + msg_len]
retval += covertext[self._fixed_slice:]
ctxt_len = self._encrypter.getCiphertextLen(retval)
remaining_buffer = retval[ctxt_len:]
retval = retval[:ctxt_len]
retval = self._encrypter.decrypt(retval)
return retval, remaining_buffer
|
Given an input string ``unrank(X[:n]) || X[n:]`` returns ``X``.
|
def add_progress(self, count, symbol='#',
color=None, on_color=None, attrs=None):
"""Add a section of progress to the progressbar.
The progress is captured by "count" and displayed as a fraction
of the statusbar width proportional to this count over the total
progress displayed. The progress will be displayed using the "symbol"
character and the foreground and background colours and display style
determined by the the "fg", "bg" and "style" parameters. For these,
use the colorama package to set up the formatting.
"""
self._progress.add_progress(count, symbol, color, on_color, attrs)
|
Add a section of progress to the progressbar.
The progress is captured by "count" and displayed as a fraction
of the statusbar width proportional to this count over the total
progress displayed. The progress will be displayed using the "symbol"
character and the foreground and background colours and display style
determined by the the "fg", "bg" and "style" parameters. For these,
use the colorama package to set up the formatting.
|
def adapt(self, d, x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
# create input matrix and target vector
self.x_mem[:,1:] = self.x_mem[:,:-1]
self.x_mem[:,0] = x
self.d_mem[1:] = self.d_mem[:-1]
self.d_mem[0] = d
# estimate output and error
self.y_mem = np.dot(self.x_mem.T, self.w)
self.e_mem = self.d_mem - self.y_mem
# update
dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_eps
dw_part2 = np.linalg.solve(dw_part1, self.ide)
dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem))
self.w += self.mu * dw
|
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.