code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def alias_config_alias_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
alias = ET.SubElement(alias_config, "alias")
name = ET.SubElement(alias, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def save_to_mat_file(self, parameter_space,
result_parsing_function,
filename, runs):
"""
Return the results relative to the desired parameter space in the form
of a .mat file.
Args:
parameter_space (dict): dictionary containing
parameter/list-of-values pairs.
result_parsing_function (function): user-defined function, taking a
result dictionary as argument, that can be used to parse the
result files and return a list of values.
filename (path): name of output .mat file.
runs (int): number of runs to gather for each parameter
combination.
"""
# Make sure all values are lists
for key in parameter_space:
if not isinstance(parameter_space[key], list):
parameter_space[key] = [parameter_space[key]]
# Add a dimension label for each non-singular dimension
dimension_labels = [{key: str(parameter_space[key])} for key in
parameter_space.keys() if len(parameter_space[key])
> 1] + [{'runs': range(runs)}]
# Create a list of the parameter names
return savemat(
filename,
{'results':
self.get_results_as_numpy_array(parameter_space,
result_parsing_function,
runs=runs),
'dimension_labels': dimension_labels})
|
Return the results relative to the desired parameter space in the form
of a .mat file.
Args:
parameter_space (dict): dictionary containing
parameter/list-of-values pairs.
result_parsing_function (function): user-defined function, taking a
result dictionary as argument, that can be used to parse the
result files and return a list of values.
filename (path): name of output .mat file.
runs (int): number of runs to gather for each parameter
combination.
|
def group_membership_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/group_memberships#create-membership"
api_path = "/api/v2/group_memberships.json"
return self.call(api_path, method="POST", data=data, **kwargs)
|
https://developer.zendesk.com/rest_api/docs/core/group_memberships#create-membership
|
def render_unregistered(error=None):
"""
Render template file for the unregistered user.
Args:
error (str, default None): Optional error message.
Returns:
str: Template filled with data.
"""
return template(
read_index_template(),
registered=False,
error=error,
seeder_data=None,
url_id=None,
)
|
Render template file for the unregistered user.
Args:
error (str, default None): Optional error message.
Returns:
str: Template filled with data.
|
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
|
Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
|
def _handle_github(self):
"""Handle exception and submit it as GitHub issue."""
value = click.prompt(
_BUG + click.style(
'1. Open an issue by typing "open";\n',
fg='green',
) + click.style(
'2. Print human-readable information by typing '
'"print";\n',
fg='yellow',
) + click.style(
'3. See the full traceback without submitting details '
'(default: "ignore").\n\n',
fg='red',
) + 'Please select an action by typing its name',
type=click.Choice([
'open',
'print',
'ignore',
], ),
default='ignore',
)
getattr(self, '_process_' + value)()
|
Handle exception and submit it as GitHub issue.
|
def strip_to_chains(self, chains, break_at_endmdl = True):
'''Throw away all ATOM/HETATM/ANISOU/TER lines for chains that are not in the chains list.'''
if chains:
chains = set(chains)
# Remove any structure lines not associated with the chains
self.lines = [l for l in self.lines if not(l.startswith('ATOM ') or l.startswith('HETATM') or l.startswith('ANISOU') or l.startswith('TER')) or l[21] in chains]
# For some Rosetta protocols, only one NMR model should be kept
if break_at_endmdl:
new_lines = []
for l in self.lines:
if l.startswith('ENDMDL'):
new_lines.append(l)
break
new_lines.append(l)
self.lines = new_lines
self._update_structure_lines()
# todo: this logic should be fine if no other member elements rely on these lines e.g. residue mappings otherwise we need to update or clear those elements here
else:
raise Exception('The chains argument needs to be supplied.')
|
Throw away all ATOM/HETATM/ANISOU/TER lines for chains that are not in the chains list.
|
def update_module_item(self, id, course_id, module_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_module_id=None, module_item_new_tab=None, module_item_position=None, module_item_published=None, module_item_title=None):
"""
Update a module item.
Update and return an existing module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - module_item[title]
"""The name of the module item"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)"""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[external_url]
"""External url that the item points to. Only applies to 'ExternalUrl' type."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete, Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
# OPTIONAL - module_item[published]
"""Whether the module item is published and visible to students."""
if module_item_published is not None:
data["module_item[published]"] = module_item_published
# OPTIONAL - module_item[module_id]
"""Move this item to another module by specifying the target module id here.
The target module must be in the same course."""
if module_item_module_id is not None:
data["module_item[module_id]"] = module_item_module_id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
|
Update a module item.
Update and return an existing module item
|
def _set_boutons_interface(self, buttons):
"""Display buttons given by the list of tuples (id,function,description,is_active)"""
for id_action, f, d, is_active in buttons:
icon = self.get_icon(id_action)
action = self.addAction(QIcon(icon), d)
action.setEnabled(is_active)
action.triggered.connect(f)
|
Display buttons given by the list of tuples (id,function,description,is_active)
|
def get_vlan_brief_output_vlan_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vlan_brief = ET.Element("get_vlan_brief")
config = get_vlan_brief
output = ET.SubElement(get_vlan_brief, "output")
vlan = ET.SubElement(output, "vlan")
vlan_id_key = ET.SubElement(vlan, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
interface = ET.SubElement(vlan, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name = ET.SubElement(interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def GenerateNetworkedConfigFile(load_hook, normal_class_load_hook, normal_class_dump_hook, **kwargs) -> NetworkedConfigObject:
"""
Generates a NetworkedConfigObject using the specified hooks.
"""
def NetworkedConfigObjectGenerator(url, safe_load: bool=True):
cfg = NetworkedConfigObject(url=url, load_hook=load_hook, safe_load=safe_load,
normal_class_load_hook=normal_class_load_hook,
normal_class_dump_hook=normal_class_dump_hook)
return cfg
return NetworkedConfigObjectGenerator
|
Generates a NetworkedConfigObject using the specified hooks.
|
def get_motor_offsets(SERVO_OUTPUT_RAW, ofs, motor_ofs):
'''calculate magnetic field strength from raw magnetometer'''
import mavutil
self = mavutil.mavfile_global
m = SERVO_OUTPUT_RAW
motor_pwm = m.servo1_raw + m.servo2_raw + m.servo3_raw + m.servo4_raw
motor_pwm *= 0.25
rc3_min = self.param('RC3_MIN', 1100)
rc3_max = self.param('RC3_MAX', 1900)
motor = (motor_pwm - rc3_min) / (rc3_max - rc3_min)
if motor > 1.0:
motor = 1.0
if motor < 0.0:
motor = 0.0
motor_offsets0 = motor_ofs[0] * motor
motor_offsets1 = motor_ofs[1] * motor
motor_offsets2 = motor_ofs[2] * motor
ofs = (ofs[0] + motor_offsets0, ofs[1] + motor_offsets1, ofs[2] + motor_offsets2)
return ofs
|
calculate magnetic field strength from raw magnetometer
|
def _apply_rate(self, max_rate, aggressive=False):
"""
Try to adjust the rate (characters/second)
of the fragments of the list,
so that it does not exceed the given ``max_rate``.
This is done by testing whether some slack
can be borrowed from the fragment before
the faster current one.
If ``aggressive`` is ``True``,
the slack might be retrieved from the fragment after
the faster current one,
if the previous fragment could not contribute enough slack.
"""
self.log(u"Called _apply_rate")
self.log([u" Aggressive: %s", aggressive])
self.log([u" Max rate: %.3f", max_rate])
regular_fragments = list(self.smflist.regular_fragments)
if len(regular_fragments) <= 1:
self.log(u" The list contains at most one regular fragment, returning")
return
faster_fragments = [(i, f) for i, f in regular_fragments if (f.rate is not None) and (f.rate >= max_rate + Decimal("0.001"))]
if len(faster_fragments) == 0:
self.log(u" No regular fragment faster than max rate, returning")
return
self.log_warn(u" Some fragments have rate faster than max rate:")
self.log([u" %s", [i for i, f in faster_fragments]])
self.log(u"Fixing rate for faster fragments...")
for frag_index, fragment in faster_fragments:
self.smflist.fix_fragment_rate(frag_index, max_rate, aggressive=aggressive)
self.log(u"Fixing rate for faster fragments... done")
faster_fragments = [(i, f) for i, f in regular_fragments if (f.rate is not None) and (f.rate >= max_rate + Decimal("0.001"))]
if len(faster_fragments) > 0:
self.log_warn(u" Some fragments still have rate faster than max rate:")
self.log([u" %s", [i for i, f in faster_fragments]])
|
Try to adjust the rate (characters/second)
of the fragments of the list,
so that it does not exceed the given ``max_rate``.
This is done by testing whether some slack
can be borrowed from the fragment before
the faster current one.
If ``aggressive`` is ``True``,
the slack might be retrieved from the fragment after
the faster current one,
if the previous fragment could not contribute enough slack.
|
def grow(files: hug.types.multiple, in_ext: hug.types.text="short", out_ext: hug.types.text="html",
out_dir: hug.types.text="", recursive: hug.types.smart_boolean=False):
"""Grow up your markup"""
if files == ['-']:
print(text(sys.stdin.read()))
return
print(INTRO)
if recursive:
files = iter_source_code(files, in_ext)
for file_name in files:
with open(file_name, 'r') as input_file:
output_file_name = "{0}.{1}".format(os.path.join(out_dir, ".".join(file_name.split('.')[:-1])), out_ext)
with open(output_file_name, 'w') as output_file:
print(" |-> [{2}]: {3} '{0}' -> '{1}' till it's not short...".format(file_name, output_file_name,
'HTML', 'Growing'))
output_file.write(text(input_file.read()))
print(" |")
print(" | >>> Done Growing! :) <<<")
print("")
|
Grow up your markup
|
def for_kind(kind_map, type_, fallback_key):
"""
Create an Options object from any mapping.
"""
if type_ not in kind_map:
if fallback_key not in kind_map:
raise ConfigException('"%s" is not in the config and has no fallback' % type_)
config = kind_map[fallback_key]
else:
config = kind_map[type_]
if isinstance(config, dict):
if 'element' not in config:
raise ConfigException('"%s" does not define an element' % type_)
opts = Options(type_, **config)
else:
opts = Options(type_, config)
return opts
|
Create an Options object from any mapping.
|
def display_col_dp(dp_list, attr_name):
"""
show a value assocciated with an attribute for each
DataProperty instance in the dp_list
"""
print()
print("---------- {:s} ----------".format(attr_name))
print([getattr(dp, attr_name) for dp in dp_list])
|
show a value assocciated with an attribute for each
DataProperty instance in the dp_list
|
async def send_rpc(self, msg, _context):
"""Send an RPC to a service on behalf of a client."""
service = msg.get('name')
rpc_id = msg.get('rpc_id')
payload = msg.get('payload')
timeout = msg.get('timeout')
response_id = await self.service_manager.send_rpc_command(service, rpc_id, payload,
timeout)
try:
result = await self.service_manager.rpc_results.get(response_id, timeout=timeout)
except asyncio.TimeoutError:
self._logger.warning("RPC 0x%04X on service %s timed out after %f seconds",
rpc_id, service, timeout)
result = dict(result='timeout', response=b'')
return result
|
Send an RPC to a service on behalf of a client.
|
def get_beam(self, ra, dec):
"""
Get the psf as a :class:`AegeanTools.fits_image.Beam` object.
Parameters
----------
ra, dec : float
The sky position (degrees).
Returns
-------
beam : :class:`AegeanTools.fits_image.Beam`
The psf at the given location.
"""
if self.data is None:
return self.wcshelper.beam
else:
psf = self.get_psf_sky(ra, dec)
if not all(np.isfinite(psf)):
return None
return Beam(psf[0], psf[1], psf[2])
|
Get the psf as a :class:`AegeanTools.fits_image.Beam` object.
Parameters
----------
ra, dec : float
The sky position (degrees).
Returns
-------
beam : :class:`AegeanTools.fits_image.Beam`
The psf at the given location.
|
def create_run(cmd, project, exp, grp):
"""
Create a new 'run' in the database.
This creates a new transaction in the database and creates a new
run in this transaction. Afterwards we return both the transaction as
well as the run itself. The user is responsible for committing it when
the time comes.
Args:
cmd: The command that has been executed.
prj: The project this run belongs to.
exp: The experiment this run belongs to.
grp: The run_group (uuid) we blong to.
Returns:
The inserted tuple representing the run and the session opened with
the new run. Don't forget to commit it at some point.
"""
from benchbuild.utils import schema as s
session = s.Session()
run = s.Run(
command=str(cmd),
project_name=project.name,
project_group=project.group,
experiment_name=exp,
run_group=str(grp),
experiment_group=project.experiment.id)
session.add(run)
session.commit()
return (run, session)
|
Create a new 'run' in the database.
This creates a new transaction in the database and creates a new
run in this transaction. Afterwards we return both the transaction as
well as the run itself. The user is responsible for committing it when
the time comes.
Args:
cmd: The command that has been executed.
prj: The project this run belongs to.
exp: The experiment this run belongs to.
grp: The run_group (uuid) we blong to.
Returns:
The inserted tuple representing the run and the session opened with
the new run. Don't forget to commit it at some point.
|
def update(self):
"""Update the command result attributed."""
# Only continue if monitor list is not empty
if len(self.__folder_list) == 0:
return self.__folder_list
# Iter upon the folder list
for i in range(len(self.get())):
# Update folder size
try:
self.__folder_list[i]['size'] = self.__folder_size(self.path(i))
except OSError as e:
logger.debug('Cannot get folder size ({}). Error: {}'.format(self.path(i), e))
if e.errno == 13:
# Permission denied
self.__folder_list[i]['size'] = '!'
else:
self.__folder_list[i]['size'] = '?'
return self.__folder_list
|
Update the command result attributed.
|
def clean_series(series, *args, **kwargs):
"""Ensure all datetimes are valid Timestamp objects and dtype is np.datetime64[ns]
>>> from datetime import timedelta
>>> clean_series(pd.Series([datetime.datetime(1, 1, 1), 9, '1942', datetime.datetime(1970, 10, 23)]))
0 1677-09-22 00:12:44+00:00
1 9
2 1942
3 1970-10-23 00:00:00+00:00
dtype: object
>>> clean_series(pd.Series([datetime.datetime(1, 1, 1), datetime.datetime(3000, 10, 23)]))
0 1677-09-22 00:12:44+00:00
1 2262-04-11 23:47:16.854775+00:00
dtype: datetime64[ns, UTC]
"""
if not series.dtype == np.dtype('O'):
return series
if any_generated((isinstance(v, datetime.datetime) for v in series)):
series = series.apply(clip_datetime)
if any_generated((isinstance(v, basestring) for v in series)):
series = series.apply(encode)
series = series.apply(try_float_int)
return series
|
Ensure all datetimes are valid Timestamp objects and dtype is np.datetime64[ns]
>>> from datetime import timedelta
>>> clean_series(pd.Series([datetime.datetime(1, 1, 1), 9, '1942', datetime.datetime(1970, 10, 23)]))
0 1677-09-22 00:12:44+00:00
1 9
2 1942
3 1970-10-23 00:00:00+00:00
dtype: object
>>> clean_series(pd.Series([datetime.datetime(1, 1, 1), datetime.datetime(3000, 10, 23)]))
0 1677-09-22 00:12:44+00:00
1 2262-04-11 23:47:16.854775+00:00
dtype: datetime64[ns, UTC]
|
def threenum(h5file, var, post_col='mult'):
""" Calculates the three number summary for a variable.
The three number summary is the minimum, maximum and the mean
of the data. Traditionally one would summerise data with the
five number summary: max, min, 1st, 2nd (median), 3rd quartile.
But quantiles are hard to calculate without sorting the data
which hard to do out-of-core.
"""
f = h5py.File(h5file, 'r')
d = f[var]
w = f[post_col]
s = d.chunks[0]
n = d.shape[0]
maxval = -np.abs(d[0])
minval = np.abs(d[0])
total = 0
wsum = 0
for x in range(0, n, s):
aN = ~np.logical_or(np.isnan(d[x:x+s]), np.isinf(d[x:x+s]))
d_c = d[x:x+s][aN]
w_c = w[x:x+s][aN]
chunk_max = np.max(d_c)
chunk_min = np.min(d_c)
maxval = chunk_max if chunk_max > maxval else maxval
minval = chunk_min if chunk_min < minval else minval
total += np.sum(w_c*d_c)
wsum += np.sum(w_c)
f.close()
mean = total/float(wsum)
return (minval, maxval, mean)
|
Calculates the three number summary for a variable.
The three number summary is the minimum, maximum and the mean
of the data. Traditionally one would summerise data with the
five number summary: max, min, 1st, 2nd (median), 3rd quartile.
But quantiles are hard to calculate without sorting the data
which hard to do out-of-core.
|
def build(cls, builder, *args, build_loop=None, **kwargs):
""" Build a hardware control API and initialize the adapter in one call
:param builder: the builder method to use (e.g.
:py:meth:`hardware_control.API.build_hardware_simulator`)
:param args: Args to forward to the builder method
:param kwargs: Kwargs to forward to the builder method
"""
loop = asyncio.new_event_loop()
kwargs['loop'] = loop
args = [arg for arg in args
if not isinstance(arg, asyncio.AbstractEventLoop)]
if asyncio.iscoroutinefunction(builder):
checked_loop = build_loop or asyncio.get_event_loop()
api = checked_loop.run_until_complete(builder(*args, **kwargs))
else:
api = builder(*args, **kwargs)
return cls(api, loop)
|
Build a hardware control API and initialize the adapter in one call
:param builder: the builder method to use (e.g.
:py:meth:`hardware_control.API.build_hardware_simulator`)
:param args: Args to forward to the builder method
:param kwargs: Kwargs to forward to the builder method
|
def rename(name, new_name):
'''
.. versionadded:: 2017.7.0
Renames a container. Returns ``True`` if successful, and raises an error if
the API returns one. If unsuccessful and the API returns no error (should
not happen), then ``False`` will be returned.
name
Name or ID of existing container
new_name
New name to assign to container
CLI Example:
.. code-block:: bash
salt myminion docker.rename foo bar
'''
id_ = inspect_container(name)['Id']
log.debug('Renaming container \'%s\' (ID: %s) to \'%s\'',
name, id_, new_name)
_client_wrapper('rename', id_, new_name)
# Confirm that the ID of the container corresponding to the new name is the
# same as it was before.
return inspect_container(new_name)['Id'] == id_
|
.. versionadded:: 2017.7.0
Renames a container. Returns ``True`` if successful, and raises an error if
the API returns one. If unsuccessful and the API returns no error (should
not happen), then ``False`` will be returned.
name
Name or ID of existing container
new_name
New name to assign to container
CLI Example:
.. code-block:: bash
salt myminion docker.rename foo bar
|
def read_namespaced_replication_controller_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_replication_controller_status # noqa: E501
read status of the specified ReplicationController # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replication_controller_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicationController (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1ReplicationController
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_replication_controller_status_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_replication_controller_status_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
|
read_namespaced_replication_controller_status # noqa: E501
read status of the specified ReplicationController # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replication_controller_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicationController (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1ReplicationController
If the method is called asynchronously,
returns the request thread.
|
def key(self, direction, mechanism, purviews=False, _prefix=None):
"""Cache key. This is the call signature of |Subsystem.find_mice()|."""
return "subsys:{}:{}:{}:{}:{}".format(
self.subsystem_hash, _prefix, direction, mechanism, purviews)
|
Cache key. This is the call signature of |Subsystem.find_mice()|.
|
def encode(self):
"""
Return binary string representation of object.
:rtype: str
"""
buf = bytearray()
for typ in sorted(self.format.keys()):
encoded = None
if typ != 0xFFFF: # end of block
(name, marshall) = self.format[typ]
value = getattr(self, name, None)
if value is not None:
try:
encoded = marshall.encode(value)
self.log.debug("Encoded field [{0}] to value {1!r}".format(name, encoded))
except:
self.log.exception("Error encoding key/value: key={0}, value={1!r}".format(name, value))
raise
# Note, there is an assumption here that encode() func is returning
# a byte string (so len = num bytes). That should be a safe assumption.
size = len(encoded) if encoded is not None else 0
packed = struct.pack('<H', typ)
packed += struct.pack('<I', size)
if encoded is not None:
if isinstance(encoded, bytearray):
encoded = str(encoded)
elif isinstance(encoded, unicode):
encoded = encoded.encode('utf-8')
packed += struct.pack('<%ds' % size, encoded)
buf += packed
return buf
|
Return binary string representation of object.
:rtype: str
|
def blocking_start(self, waiting_func=None):
"""this function is just a wrapper around the start and
wait_for_completion methods. It starts the queuing thread and then
waits for it to complete. If run by the main thread, it will detect
the KeyboardInterrupt exception (which is what SIGTERM and SIGHUP
have been translated to) and will order the threads to die."""
try:
self.start()
self.wait_for_completion(waiting_func)
# it only ends if someone hits ^C or sends SIGHUP or SIGTERM -
# any of which will get translated into a KeyboardInterrupt
except KeyboardInterrupt:
while True:
try:
self.stop()
break
except KeyboardInterrupt:
self.logger.warning('We heard you the first time. There '
'is no need for further keyboard or signal '
'interrupts. We are waiting for the '
'worker threads to stop. If this app '
'does not halt soon, you may have to send '
'SIGKILL (kill -9)')
|
this function is just a wrapper around the start and
wait_for_completion methods. It starts the queuing thread and then
waits for it to complete. If run by the main thread, it will detect
the KeyboardInterrupt exception (which is what SIGTERM and SIGHUP
have been translated to) and will order the threads to die.
|
def GetRunlevelsLSB(states):
"""Accepts a string and returns a list of strings of numeric LSB runlevels."""
if not states:
return set()
valid = set(["0", "1", "2", "3", "4", "5", "6"])
_LogInvalidRunLevels(states, valid)
return valid.intersection(set(states.split()))
|
Accepts a string and returns a list of strings of numeric LSB runlevels.
|
def merge_from(self, other):
"""Merge information from another NumberFormat object into this one."""
if other.pattern is not None:
self.pattern = other.pattern
if other.format is not None:
self.format = other.format
self.leading_digits_pattern.extend(other.leading_digits_pattern)
if other.national_prefix_formatting_rule is not None:
self.national_prefix_formatting_rule = other.national_prefix_formatting_rule
if other.national_prefix_optional_when_formatting is not None:
self.national_prefix_optional_when_formatting = other.national_prefix_optional_when_formatting
if other.domestic_carrier_code_formatting_rule is not None:
self.domestic_carrier_code_formatting_rule = other.domestic_carrier_code_formatting_rule
|
Merge information from another NumberFormat object into this one.
|
def _init_security(self):
"""
Initialize a secure connection to the server.
"""
if not self._starttls():
raise SecurityError("Could not start TLS connection")
# _ssh_handshake() will throw an exception upon failure
self._ssl_handshake()
if not self._auth():
raise SecurityError("Could not authorize connection")
|
Initialize a secure connection to the server.
|
def get(self):
'''
An endpoint to determine salt-api capabilities
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Legnth: 83
{"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"}
'''
ret = {"clients": list(self.saltclients.keys()),
"return": "Welcome"}
self.write(self.serialize(ret))
|
An endpoint to determine salt-api capabilities
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Legnth: 83
{"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"}
|
def paint(self, painter, option, index):
"""
Reimplements the :meth:`QStyledItemDelegate.paint` method.
"""
if option.state & QStyle.State_MouseOver:
styleSheet = self.__style.hover
elif option.state & QStyle.State_Selected:
styleSheet = self.__style.highlight
else:
styleSheet = self.__style.default
self.__label.setStyleSheet(styleSheet)
data = index.model().data(index, Qt.DisplayRole)
self.__label.setText(umbra.ui.common.QVariant_to_string(data))
self.__label.setFixedSize(option.rect.size())
painter.save()
painter.translate(option.rect.topLeft())
self.__label.render(painter)
painter.restore()
|
Reimplements the :meth:`QStyledItemDelegate.paint` method.
|
def get_unique_sample_files(file_samples):
"""Filter file_sample data frame to only keep one file per sample.
Params
------
file_samples : `pandas.DataFrame`
A data frame containing a mapping between file IDs and sample barcodes.
This type of data frame is returned by :meth:`get_file_samples`.
Returns
-------
`pandas.DataFrame`
The filtered data frame.
Notes
-----
In order to remove redundant files in a consistent fashion, the samples are
sorted by file ID, and then the first file for each sample is kept.
"""
assert isinstance(file_samples, pd.DataFrame)
df = file_samples # use shorter variable name
# sort data frame by file ID
df = df.sort_values('file_id')
# - some samples have multiple files with the same barcode,
# corresponding to different aliquots
# get rid of those duplicates
logger.info('Original number of files: %d', len(df.index))
df.drop_duplicates('sample_barcode', keep='first', inplace=True)
logger.info('Number of files after removing duplicates from different '
'aliquots: %d', len(df.index))
# - some samples also have multiple files corresponding to different vials
# add an auxilliary column that contains the sample barcode without the
# vial tag (first 15 characters)
df['sample_barcode15'] = df['sample_barcode'].apply(lambda x: x[:15])
# use auxilliary column to get rid of duplicate files
df.drop_duplicates('sample_barcode15', keep='first', inplace=True)
logger.info('Number of files after removing duplicates from different '
'vials: %d', len(df.index))
# get rid of auxilliary column
df.drop('sample_barcode15', axis=1, inplace=True)
# restore original order using the (numerical) index
df.sort_index(inplace=True)
# return the filtered data frame
return df
|
Filter file_sample data frame to only keep one file per sample.
Params
------
file_samples : `pandas.DataFrame`
A data frame containing a mapping between file IDs and sample barcodes.
This type of data frame is returned by :meth:`get_file_samples`.
Returns
-------
`pandas.DataFrame`
The filtered data frame.
Notes
-----
In order to remove redundant files in a consistent fashion, the samples are
sorted by file ID, and then the first file for each sample is kept.
|
def write(self, s):
"""Write the unicode string ``s`` to the pseudoterminal.
Returns the number of bytes written.
"""
b = s.encode(self.encoding)
return super(PtyProcessUnicode, self).write(b)
|
Write the unicode string ``s`` to the pseudoterminal.
Returns the number of bytes written.
|
def sphere_constrained_cubic(dr, a, alpha):
"""
Sphere generated by a cubic interpolant constrained to be (1,0) on
(r0-sqrt(3)/2, r0+sqrt(3)/2), the size of the cube in the (111) direction.
"""
sqrt3 = np.sqrt(3)
b_coeff = a*0.5/sqrt3*(1 - 0.6*sqrt3*alpha)/(0.15 + a*a)
rscl = np.clip(dr, -0.5*sqrt3, 0.5*sqrt3)
a, d = rscl + 0.5*sqrt3, rscl - 0.5*sqrt3
return alpha*d*a*rscl + b_coeff*d*a - d/sqrt3
|
Sphere generated by a cubic interpolant constrained to be (1,0) on
(r0-sqrt(3)/2, r0+sqrt(3)/2), the size of the cube in the (111) direction.
|
def clone(self, folder, git_repository):
"""Ensures theme destination folder and clone git specified repo in it.
:param git_repository: git url of the theme folder
:param folder: path of the git managed theme folder
"""
os.makedirs(folder)
git.Git().clone(git_repository, folder)
|
Ensures theme destination folder and clone git specified repo in it.
:param git_repository: git url of the theme folder
:param folder: path of the git managed theme folder
|
def proximal_convex_conj_kl_cross_entropy(space, lam=1, g=None):
r"""Proximal factory of the convex conj of cross entropy KL divergence.
Function returning the proximal factory of the convex conjugate of the
functional F, where F is the cross entropy Kullback-Leibler (KL)
divergence given by::
F(x) = sum_i (x_i ln(pos(x_i)) - x_i ln(g_i) + g_i - x_i) + ind_P(x)
with ``x`` and ``g`` in the linear space ``X``, and ``g`` non-negative.
Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator
function for nonnegativity.
Parameters
----------
space : `TensorSpace`
Space X which is the domain of the functional F
lam : positive float, optional
Scaling factor.
g : ``space`` element, optional
Data term, positive. If None it is take as the one-element.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized.
See Also
--------
proximal_convex_conj_kl : proximal for related functional
Notes
-----
The functional is given by the expression
.. math::
F(x) = \sum_i (x_i \ln(pos(x_i)) - x_i \ln(g_i) + g_i - x_i) +
I_{x \geq 0}(x)
The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the
domain of :math:`F` such that :math:`F` is defined over whole space
:math:`X`. The non-negativity thresholding :math:`pos` is used to define
:math:`F` in the real numbers.
Note that the functional is not well-defined without a prior g. Hence, if g
is omitted this will be interpreted as if g is equal to the one-element.
The convex conjugate :math:`F^*` of :math:`F` is
.. math::
F^*(p) = \sum_i g_i (exp(p_i) - 1)
where :math:`p` is the variable dual to :math:`x`.
The proximal operator of the convex conjugate of :math:`F` is
.. math::
\mathrm{prox}_{\sigma (\lambda F)^*}(x) = x - \lambda
W(\frac{\sigma}{\lambda} g e^{x/\lambda})
where :math:`\sigma` is the step size-like parameter, :math:`\lambda` is
the weighting in front of the function :math:`F`, and :math:`W` is the
Lambert W function (see, for example, the
`Wikipedia article <https://en.wikipedia.org/wiki/Lambert_W_function>`_).
For real-valued input x, the Lambert :math:`W` function is defined only for
:math:`x \geq -1/e`, and it has two branches for values
:math:`-1/e \leq x < 0`. However, for inteneded use-cases, where
:math:`\lambda` and :math:`g` are positive, the argument of :math:`W`
will always be positive.
`Wikipedia article on Kullback Leibler divergence
<https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_.
For further information about the functional, see for example `this article
<http://ieeexplore.ieee.org/document/1056144/?arnumber=1056144>`_.
The KL cross entropy functional :math:`F`, described above, is related to
another functional functional also know as KL divergence. This functional
is often used as data discrepancy term in inverse problems, when data is
corrupted with Poisson noise. This functional is obtained by changing place
of the prior and the variable. See the See Also section.
"""
lam = float(lam)
if g is not None and g not in space:
raise TypeError('{} is not an element of {}'.format(g, space))
class ProximalConvexConjKLCrossEntropy(Operator):
"""Proximal operator of conjugate of cross entropy KL divergence."""
def __init__(self, sigma):
"""Initialize a new instance.
Parameters
----------
sigma : positive float
"""
self.sigma = float(sigma)
super(ProximalConvexConjKLCrossEntropy, self).__init__(
domain=space, range=space, linear=False)
def _call(self, x, out):
"""Return ``self(x, out=out)``."""
# Lazy import to improve `import odl` time
import scipy.special
if g is None:
# If g is None, it is taken as the one element
# Different branches of lambertw is not an issue, see Notes
lambw = scipy.special.lambertw(
(self.sigma / lam) * np.exp(x / lam))
else:
# Different branches of lambertw is not an issue, see Notes
lambw = scipy.special.lambertw(
(self.sigma / lam) * g * np.exp(x / lam))
if not np.issubsctype(self.domain.dtype, np.complexfloating):
lambw = lambw.real
lambw = x.space.element(lambw)
out.lincomb(1, x, -lam, lambw)
return ProximalConvexConjKLCrossEntropy
|
r"""Proximal factory of the convex conj of cross entropy KL divergence.
Function returning the proximal factory of the convex conjugate of the
functional F, where F is the cross entropy Kullback-Leibler (KL)
divergence given by::
F(x) = sum_i (x_i ln(pos(x_i)) - x_i ln(g_i) + g_i - x_i) + ind_P(x)
with ``x`` and ``g`` in the linear space ``X``, and ``g`` non-negative.
Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator
function for nonnegativity.
Parameters
----------
space : `TensorSpace`
Space X which is the domain of the functional F
lam : positive float, optional
Scaling factor.
g : ``space`` element, optional
Data term, positive. If None it is take as the one-element.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized.
See Also
--------
proximal_convex_conj_kl : proximal for related functional
Notes
-----
The functional is given by the expression
.. math::
F(x) = \sum_i (x_i \ln(pos(x_i)) - x_i \ln(g_i) + g_i - x_i) +
I_{x \geq 0}(x)
The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the
domain of :math:`F` such that :math:`F` is defined over whole space
:math:`X`. The non-negativity thresholding :math:`pos` is used to define
:math:`F` in the real numbers.
Note that the functional is not well-defined without a prior g. Hence, if g
is omitted this will be interpreted as if g is equal to the one-element.
The convex conjugate :math:`F^*` of :math:`F` is
.. math::
F^*(p) = \sum_i g_i (exp(p_i) - 1)
where :math:`p` is the variable dual to :math:`x`.
The proximal operator of the convex conjugate of :math:`F` is
.. math::
\mathrm{prox}_{\sigma (\lambda F)^*}(x) = x - \lambda
W(\frac{\sigma}{\lambda} g e^{x/\lambda})
where :math:`\sigma` is the step size-like parameter, :math:`\lambda` is
the weighting in front of the function :math:`F`, and :math:`W` is the
Lambert W function (see, for example, the
`Wikipedia article <https://en.wikipedia.org/wiki/Lambert_W_function>`_).
For real-valued input x, the Lambert :math:`W` function is defined only for
:math:`x \geq -1/e`, and it has two branches for values
:math:`-1/e \leq x < 0`. However, for inteneded use-cases, where
:math:`\lambda` and :math:`g` are positive, the argument of :math:`W`
will always be positive.
`Wikipedia article on Kullback Leibler divergence
<https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_.
For further information about the functional, see for example `this article
<http://ieeexplore.ieee.org/document/1056144/?arnumber=1056144>`_.
The KL cross entropy functional :math:`F`, described above, is related to
another functional functional also know as KL divergence. This functional
is often used as data discrepancy term in inverse problems, when data is
corrupted with Poisson noise. This functional is obtained by changing place
of the prior and the variable. See the See Also section.
|
def put_content(self, content):
"""
Makes a ``PUT`` request with the content in the body.
:raise: An :exc:`requests.RequestException` if it is not 2xx.
"""
r = requests.request(self.method if self.method else 'PUT', self.url, data=content, **self.storage_args)
if self.raise_for_status: r.raise_for_status()
|
Makes a ``PUT`` request with the content in the body.
:raise: An :exc:`requests.RequestException` if it is not 2xx.
|
def broker_metadata(self, broker_id):
"""Get BrokerMetadata
Arguments:
broker_id (int): node_id for a broker to check
Returns:
BrokerMetadata or None if not found
"""
return self._brokers.get(broker_id) or self._bootstrap_brokers.get(broker_id)
|
Get BrokerMetadata
Arguments:
broker_id (int): node_id for a broker to check
Returns:
BrokerMetadata or None if not found
|
def show_qt(qt_class, modal=False, onshow_event=None, force_style=False):
"""
Shows and raise a pyqt window ensuring it's not duplicated
(if it's duplicated then raise the old one).
qt_class argument should be a class/subclass of QMainWindow, QDialog or any
top-level widget.
onshow_event provides a way to pass a function to execute before the window
is showed on screen, it should be handy with modal windows.
Returns the qt_class instance.
"""
dialog = None
window = anchor()
# look for a previous instance
for d in window.children():
if isinstance(d, qt_class):
dialog = d
# if there's no previous instance then create a new one
if dialog is None:
dialog = qt_class(window)
# stylize
if force_style:
set_style(dialog, not isinstance(dialog, QtGui.QMenu))
# gc
dialog.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# show dialog
pos = QtGui.QCursor.pos()
dialog.move(pos.x(), pos.y())
# execute callback
if onshow_event:
onshow_event(dialog)
if modal:
dialog.exec_()
else:
dialog.show()
dialog.raise_() # ensures dialog window is on top
return dialog
|
Shows and raise a pyqt window ensuring it's not duplicated
(if it's duplicated then raise the old one).
qt_class argument should be a class/subclass of QMainWindow, QDialog or any
top-level widget.
onshow_event provides a way to pass a function to execute before the window
is showed on screen, it should be handy with modal windows.
Returns the qt_class instance.
|
def get_cond_latents_at_level(cond_latents, level, hparams):
"""Returns a single or list of conditional latents at level 'level'."""
if cond_latents:
if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]:
return [cond_latent[level] for cond_latent in cond_latents]
elif hparams.latent_dist_encoder in ["pointwise", "conv_lstm"]:
return cond_latents[level]
|
Returns a single or list of conditional latents at level 'level'.
|
def timeit(unit='s'):
"""
测试函数耗时
:param unit: 时间单位,有 's','m','h' 可选(seconds,minutes,hours)
"""
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
start = time.time()
_result = func(*args, **kwargs)
_format(unit, time.time() - start, func.__name__ + '()')
return _result
return inner
return wrapper
|
测试函数耗时
:param unit: 时间单位,有 's','m','h' 可选(seconds,minutes,hours)
|
def clear(self):
"""
Remove all entries from the cache and delete all data.
:return:
"""
for f in [x['content'] for x in self.metadata.values()]:
os.remove(f)
self.metadata = {}
self._flush()
|
Remove all entries from the cache and delete all data.
:return:
|
def bind_column(model, name, column, force=False, recursive=False, copy=False) -> Column:
"""Bind a column to the model with the given name.
This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily
attach a new column to an existing model:
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
bound = bloop.models.bind_column(User, "email", email)
assert bound is email
# rebind with force, and use a copy
bound = bloop.models.bind_column(User, "email", email, force=True, copy=True)
assert bound is not email
If an existing index refers to this column, it will be updated to point to the new column
using :meth:`~bloop.models.refresh_index`, including recalculating the index projection.
Meta attributes including ``Meta.columns``, ``Meta.hash_key``, etc. will be updated if necessary.
If ``name`` or the column's ``dynamo_name`` conflicts with an existing column or index on the model, raises
:exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are
existing subclasses of ``model``, a copy of the column will attempt to bind to each subclass. The recursive
calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the
provided column is used. This uses a shallow copy via :meth:`~bloop.models.Column.__copy__`.
:param model:
The model to bind the column to.
:param name:
The name to bind the column as. In effect, used for ``setattr(model, name, column)``
:param column:
The column to bind to the model.
:param force:
Unbind existing columns or indexes with the same name or dynamo_name. Default is False.
:param recursive:
Bind to each subclass of this model. Default is False.
:param copy:
Use a copy of the column instead of the column directly. Default is False.
:return:
The bound column. This is a new column when ``copy`` is True, otherwise the input column.
"""
if not subclassof(model, BaseModel):
raise InvalidModel(f"{model} is not a subclass of BaseModel")
meta = model.Meta
if copy:
column = copyfn(column)
# TODO elif column.model is not None: logger.warning(f"Trying to rebind column bound to {column.model}")
column._name = name
safe_repr = unbound_repr(column)
# Guard against name, dynamo_name collisions; if force=True, unbind any matches
same_dynamo_name = (
util.index(meta.columns, "dynamo_name").get(column.dynamo_name) or
util.index(meta.indexes, "dynamo_name").get(column.dynamo_name)
)
same_name = (
meta.columns_by_name.get(column.name) or
util.index(meta.indexes, "name").get(column.name)
)
if column.hash_key and column.range_key:
raise InvalidModel(f"Tried to bind {safe_repr} as both a hash and range key.")
if force:
if same_name:
unbind(meta, name=column.name)
if same_dynamo_name:
unbind(meta, dynamo_name=column.dynamo_name)
else:
if same_name:
raise InvalidModel(
f"The column {safe_repr} has the same name as an existing column "
f"or index {same_name}. Did you mean to bind with force=True?")
if same_dynamo_name:
raise InvalidModel(
f"The column {safe_repr} has the same dynamo_name as an existing "
f"column or index {same_name}. Did you mean to bind with force=True?")
if column.hash_key and meta.hash_key:
raise InvalidModel(
f"Tried to bind {safe_repr} but {meta.model} "
f"already has a different hash_key: {meta.hash_key}")
if column.range_key and meta.range_key:
raise InvalidModel(
f"Tried to bind {safe_repr} but {meta.model} "
f"already has a different range_key: {meta.range_key}")
# success!
# --------------------------------
column.model = meta.model
meta.columns.add(column)
meta.columns_by_name[name] = column
setattr(meta.model, name, column)
if column.hash_key:
meta.hash_key = column
meta.keys.add(column)
if column.range_key:
meta.range_key = column
meta.keys.add(column)
try:
for index in meta.indexes:
refresh_index(meta, index)
except KeyError as e:
raise InvalidModel(
f"Binding column {column} removed a required column for index {unbound_repr(index)}") from e
if recursive:
for subclass in util.walk_subclasses(meta.model):
try:
bind_column(subclass, name, column, force=False, recursive=False, copy=True)
except InvalidModel:
pass
return column
|
Bind a column to the model with the given name.
This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily
attach a new column to an existing model:
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
bound = bloop.models.bind_column(User, "email", email)
assert bound is email
# rebind with force, and use a copy
bound = bloop.models.bind_column(User, "email", email, force=True, copy=True)
assert bound is not email
If an existing index refers to this column, it will be updated to point to the new column
using :meth:`~bloop.models.refresh_index`, including recalculating the index projection.
Meta attributes including ``Meta.columns``, ``Meta.hash_key``, etc. will be updated if necessary.
If ``name`` or the column's ``dynamo_name`` conflicts with an existing column or index on the model, raises
:exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are
existing subclasses of ``model``, a copy of the column will attempt to bind to each subclass. The recursive
calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the
provided column is used. This uses a shallow copy via :meth:`~bloop.models.Column.__copy__`.
:param model:
The model to bind the column to.
:param name:
The name to bind the column as. In effect, used for ``setattr(model, name, column)``
:param column:
The column to bind to the model.
:param force:
Unbind existing columns or indexes with the same name or dynamo_name. Default is False.
:param recursive:
Bind to each subclass of this model. Default is False.
:param copy:
Use a copy of the column instead of the column directly. Default is False.
:return:
The bound column. This is a new column when ``copy`` is True, otherwise the input column.
|
def DeserializeExclusiveData(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
Raises:
Exception: If the transaction type is incorrect or if there are no claims.
"""
self.Type = TransactionType.ClaimTransaction
if self.Version != 0:
raise Exception('Format Exception')
numrefs = reader.ReadVarInt()
claims = []
for i in range(0, numrefs):
c = CoinReference()
c.Deserialize(reader)
claims.append(c)
self.Claims = claims
if len(self.Claims) == 0:
raise Exception('Format Exception')
|
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
Raises:
Exception: If the transaction type is incorrect or if there are no claims.
|
def install(*pkgs, **kwargs):
'''
Installs a single or multiple packages via nix
:type pkgs: list(str)
:param pkgs:
packages to update
:param bool attributes:
Pass the list of packages or single package as attribues, not package names.
default: False
:return: Installed packages. Example element: ``gcc-3.3.2``
:rtype: list(str)
.. code-block:: bash
salt '*' nix.install package [package2 ...]
salt '*' nix.install attributes=True attr.name [attr.name2 ...]
'''
attributes = kwargs.get('attributes', False)
if not pkgs:
return "Plese specify a package or packages to upgrade"
cmd = _quietnix()
cmd.append('--install')
if kwargs.get('attributes', False):
cmd.extend(_zip_flatten('--attr', pkgs))
else:
cmd.extend(pkgs)
out = _run(cmd)
installs = list(itertools.chain.from_iterable(
[s.split()[1:] for s in out['stderr'].splitlines()
if s.startswith('installing')]
))
return [_strip_quotes(s) for s in installs]
|
Installs a single or multiple packages via nix
:type pkgs: list(str)
:param pkgs:
packages to update
:param bool attributes:
Pass the list of packages or single package as attribues, not package names.
default: False
:return: Installed packages. Example element: ``gcc-3.3.2``
:rtype: list(str)
.. code-block:: bash
salt '*' nix.install package [package2 ...]
salt '*' nix.install attributes=True attr.name [attr.name2 ...]
|
def inst(self, *instructions):
"""
Mutates the Program object by appending new instructions.
This function accepts a number of different valid forms, e.g.
>>> p = Program()
>>> p.inst(H(0)) # A single instruction
>>> p.inst(H(0), H(1)) # Multiple instructions
>>> p.inst([H(0), H(1)]) # A list of instructions
>>> p.inst(H(i) for i in range(4)) # A generator of instructions
>>> p.inst(("H", 1)) # A tuple representing an instruction
>>> p.inst("H 0") # A string representing an instruction
>>> q = Program()
>>> p.inst(q) # Another program
It can also be chained:
>>> p = Program()
>>> p.inst(H(0)).inst(H(1))
:param instructions: A list of Instruction objects, e.g. Gates
:return: self for method chaining
"""
for instruction in instructions:
if isinstance(instruction, list):
self.inst(*instruction)
elif isinstance(instruction, types.GeneratorType):
self.inst(*instruction)
elif isinstance(instruction, tuple):
if len(instruction) == 0:
raise ValueError("tuple should have at least one element")
elif len(instruction) == 1:
self.inst(instruction[0])
else:
op = instruction[0]
if op == "MEASURE":
if len(instruction) == 2:
self.measure(instruction[1], None)
else:
self.measure(instruction[1], instruction[2])
else:
params = []
possible_params = instruction[1]
rest = instruction[2:]
if isinstance(possible_params, list):
params = possible_params
else:
rest = [possible_params] + list(rest)
self.gate(op, params, rest)
elif isinstance(instruction, string_types):
self.inst(run_parser(instruction.strip()))
elif isinstance(instruction, Program):
if id(self) == id(instruction):
raise ValueError("Nesting a program inside itself is not supported")
for defgate in instruction._defined_gates:
self.inst(defgate)
for instr in instruction._instructions:
self.inst(instr)
# Implementation note: these two base cases are the only ones which modify the program
elif isinstance(instruction, DefGate):
defined_gate_names = [gate.name for gate in self._defined_gates]
if instruction.name in defined_gate_names:
warnings.warn("Gate {} has already been defined in this program"
.format(instruction.name))
self._defined_gates.append(instruction)
elif isinstance(instruction, AbstractInstruction):
self._instructions.append(instruction)
self._synthesized_instructions = None
else:
raise TypeError("Invalid instruction: {}".format(instruction))
return self
|
Mutates the Program object by appending new instructions.
This function accepts a number of different valid forms, e.g.
>>> p = Program()
>>> p.inst(H(0)) # A single instruction
>>> p.inst(H(0), H(1)) # Multiple instructions
>>> p.inst([H(0), H(1)]) # A list of instructions
>>> p.inst(H(i) for i in range(4)) # A generator of instructions
>>> p.inst(("H", 1)) # A tuple representing an instruction
>>> p.inst("H 0") # A string representing an instruction
>>> q = Program()
>>> p.inst(q) # Another program
It can also be chained:
>>> p = Program()
>>> p.inst(H(0)).inst(H(1))
:param instructions: A list of Instruction objects, e.g. Gates
:return: self for method chaining
|
def reload(self):
"""
Automatically reloads the config file.
This is just an alias for self.load()."""
if not self.fd.closed: self.fd.close()
self.fd = open(self.fd.name, 'r')
self.load()
|
Automatically reloads the config file.
This is just an alias for self.load().
|
def cressman_point(sq_dist, values, radius):
r"""Generate a Cressman interpolation value for a point.
The calculated value is based on the given distances and search radius.
Parameters
----------
sq_dist: (N, ) ndarray
Squared distance between observations and grid point
values: (N, ) ndarray
Observation values in same order as sq_dist
radius: float
Maximum distance to search for observations to use for
interpolation.
Returns
-------
value: float
Interpolation value for grid point.
"""
weights = tools.cressman_weights(sq_dist, radius)
total_weights = np.sum(weights)
return sum(v * (w / total_weights) for (w, v) in zip(weights, values))
|
r"""Generate a Cressman interpolation value for a point.
The calculated value is based on the given distances and search radius.
Parameters
----------
sq_dist: (N, ) ndarray
Squared distance between observations and grid point
values: (N, ) ndarray
Observation values in same order as sq_dist
radius: float
Maximum distance to search for observations to use for
interpolation.
Returns
-------
value: float
Interpolation value for grid point.
|
def set_dependent_orders(
self,
accountID,
tradeSpecifier,
**kwargs
):
"""
Create, replace and cancel a Trade's dependent Orders (Take Profit,
Stop Loss and Trailing Stop Loss) through the Trade itself
Args:
accountID:
Account Identifier
tradeSpecifier:
Specifier for the Trade
takeProfit:
The specification of the Take Profit to create/modify/cancel.
If takeProfit is set to null, the Take Profit Order will be
cancelled if it exists. If takeProfit is not provided, the
exisiting Take Profit Order will not be modified. If a sub-
field of takeProfit is not specified, that field will be set to
a default value on create, and be inherited by the replacing
order on modify.
stopLoss:
The specification of the Stop Loss to create/modify/cancel. If
stopLoss is set to null, the Stop Loss Order will be cancelled
if it exists. If stopLoss is not provided, the exisiting Stop
Loss Order will not be modified. If a sub-field of stopLoss is
not specified, that field will be set to a default value on
create, and be inherited by the replacing order on modify.
trailingStopLoss:
The specification of the Trailing Stop Loss to
create/modify/cancel. If trailingStopLoss is set to null, the
Trailing Stop Loss Order will be cancelled if it exists. If
trailingStopLoss is not provided, the exisiting Trailing Stop
Loss Order will not be modified. If a sub-field of
trailngStopLoss is not specified, that field will be set to a
default value on create, and be inherited by the replacing
order on modify.
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'PUT',
'/v3/accounts/{accountID}/trades/{tradeSpecifier}/orders'
)
request.set_path_param(
'accountID',
accountID
)
request.set_path_param(
'tradeSpecifier',
tradeSpecifier
)
body = EntityDict()
if 'takeProfit' in kwargs:
body.set('takeProfit', kwargs['takeProfit'])
if 'stopLoss' in kwargs:
body.set('stopLoss', kwargs['stopLoss'])
if 'trailingStopLoss' in kwargs:
body.set('trailingStopLoss', kwargs['trailingStopLoss'])
request.set_body_dict(body.dict)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('takeProfitOrderCancelTransaction') is not None:
parsed_body['takeProfitOrderCancelTransaction'] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody['takeProfitOrderCancelTransaction'],
self.ctx
)
if jbody.get('takeProfitOrderTransaction') is not None:
parsed_body['takeProfitOrderTransaction'] = \
self.ctx.transaction.TakeProfitOrderTransaction.from_dict(
jbody['takeProfitOrderTransaction'],
self.ctx
)
if jbody.get('takeProfitOrderFillTransaction') is not None:
parsed_body['takeProfitOrderFillTransaction'] = \
self.ctx.transaction.OrderFillTransaction.from_dict(
jbody['takeProfitOrderFillTransaction'],
self.ctx
)
if jbody.get('takeProfitOrderCreatedCancelTransaction') is not None:
parsed_body['takeProfitOrderCreatedCancelTransaction'] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody['takeProfitOrderCreatedCancelTransaction'],
self.ctx
)
if jbody.get('stopLossOrderCancelTransaction') is not None:
parsed_body['stopLossOrderCancelTransaction'] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody['stopLossOrderCancelTransaction'],
self.ctx
)
if jbody.get('stopLossOrderTransaction') is not None:
parsed_body['stopLossOrderTransaction'] = \
self.ctx.transaction.StopLossOrderTransaction.from_dict(
jbody['stopLossOrderTransaction'],
self.ctx
)
if jbody.get('stopLossOrderFillTransaction') is not None:
parsed_body['stopLossOrderFillTransaction'] = \
self.ctx.transaction.OrderFillTransaction.from_dict(
jbody['stopLossOrderFillTransaction'],
self.ctx
)
if jbody.get('stopLossOrderCreatedCancelTransaction') is not None:
parsed_body['stopLossOrderCreatedCancelTransaction'] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody['stopLossOrderCreatedCancelTransaction'],
self.ctx
)
if jbody.get('trailingStopLossOrderCancelTransaction') is not None:
parsed_body['trailingStopLossOrderCancelTransaction'] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody['trailingStopLossOrderCancelTransaction'],
self.ctx
)
if jbody.get('trailingStopLossOrderTransaction') is not None:
parsed_body['trailingStopLossOrderTransaction'] = \
self.ctx.transaction.TrailingStopLossOrderTransaction.from_dict(
jbody['trailingStopLossOrderTransaction'],
self.ctx
)
if jbody.get('relatedTransactionIDs') is not None:
parsed_body['relatedTransactionIDs'] = \
jbody.get('relatedTransactionIDs')
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
elif str(response.status) == "400":
if jbody.get('takeProfitOrderCancelRejectTransaction') is not None:
parsed_body['takeProfitOrderCancelRejectTransaction'] = \
self.ctx.transaction.OrderCancelRejectTransaction.from_dict(
jbody['takeProfitOrderCancelRejectTransaction'],
self.ctx
)
if jbody.get('takeProfitOrderRejectTransaction') is not None:
parsed_body['takeProfitOrderRejectTransaction'] = \
self.ctx.transaction.TakeProfitOrderRejectTransaction.from_dict(
jbody['takeProfitOrderRejectTransaction'],
self.ctx
)
if jbody.get('stopLossOrderCancelRejectTransaction') is not None:
parsed_body['stopLossOrderCancelRejectTransaction'] = \
self.ctx.transaction.OrderCancelRejectTransaction.from_dict(
jbody['stopLossOrderCancelRejectTransaction'],
self.ctx
)
if jbody.get('stopLossOrderRejectTransaction') is not None:
parsed_body['stopLossOrderRejectTransaction'] = \
self.ctx.transaction.StopLossOrderRejectTransaction.from_dict(
jbody['stopLossOrderRejectTransaction'],
self.ctx
)
if jbody.get('trailingStopLossOrderCancelRejectTransaction') is not None:
parsed_body['trailingStopLossOrderCancelRejectTransaction'] = \
self.ctx.transaction.OrderCancelRejectTransaction.from_dict(
jbody['trailingStopLossOrderCancelRejectTransaction'],
self.ctx
)
if jbody.get('trailingStopLossOrderRejectTransaction') is not None:
parsed_body['trailingStopLossOrderRejectTransaction'] = \
self.ctx.transaction.TrailingStopLossOrderRejectTransaction.from_dict(
jbody['trailingStopLossOrderRejectTransaction'],
self.ctx
)
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
if jbody.get('relatedTransactionIDs') is not None:
parsed_body['relatedTransactionIDs'] = \
jbody.get('relatedTransactionIDs')
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
|
Create, replace and cancel a Trade's dependent Orders (Take Profit,
Stop Loss and Trailing Stop Loss) through the Trade itself
Args:
accountID:
Account Identifier
tradeSpecifier:
Specifier for the Trade
takeProfit:
The specification of the Take Profit to create/modify/cancel.
If takeProfit is set to null, the Take Profit Order will be
cancelled if it exists. If takeProfit is not provided, the
exisiting Take Profit Order will not be modified. If a sub-
field of takeProfit is not specified, that field will be set to
a default value on create, and be inherited by the replacing
order on modify.
stopLoss:
The specification of the Stop Loss to create/modify/cancel. If
stopLoss is set to null, the Stop Loss Order will be cancelled
if it exists. If stopLoss is not provided, the exisiting Stop
Loss Order will not be modified. If a sub-field of stopLoss is
not specified, that field will be set to a default value on
create, and be inherited by the replacing order on modify.
trailingStopLoss:
The specification of the Trailing Stop Loss to
create/modify/cancel. If trailingStopLoss is set to null, the
Trailing Stop Loss Order will be cancelled if it exists. If
trailingStopLoss is not provided, the exisiting Trailing Stop
Loss Order will not be modified. If a sub-field of
trailngStopLoss is not specified, that field will be set to a
default value on create, and be inherited by the replacing
order on modify.
Returns:
v20.response.Response containing the results from submitting the
request
|
def _remove_unicode_encoding(xml_file):
'''
attempts to remove the "encoding='unicode'" from an xml file
as lxml does not support that on a windows node currently
see issue #38100
'''
with salt.utils.files.fopen(xml_file, 'rb') as f:
xml_content = f.read()
modified_xml = re.sub(r' encoding=[\'"]+unicode[\'"]+', '', xml_content.decode('utf-16'), count=1)
xmltree = lxml.etree.parse(six.StringIO(modified_xml))
return xmltree
|
attempts to remove the "encoding='unicode'" from an xml file
as lxml does not support that on a windows node currently
see issue #38100
|
def _kl_half_normal_half_normal(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`.
Args:
a: Instance of a `HalfNormal` distribution object.
b: Instance of a `HalfNormal` distribution object.
name: (optional) Name to use for created operations.
default is "kl_half_normal_half_normal".
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or "kl_half_normal_half_normal"):
# Consistent with
# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 119
return (tf.math.log(b.scale) - tf.math.log(a.scale) +
(a.scale**2 - b.scale**2) / (2 * b.scale**2))
|
Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`.
Args:
a: Instance of a `HalfNormal` distribution object.
b: Instance of a `HalfNormal` distribution object.
name: (optional) Name to use for created operations.
default is "kl_half_normal_half_normal".
Returns:
Batchwise KL(a || b)
|
def _evaluate_rhs(cls, funcs, nodes, problem):
"""
Compute the value of the right-hand side of the system of ODEs.
Parameters
----------
basis_funcs : list(function)
nodes : numpy.ndarray
problem : TwoPointBVPLike
Returns
-------
evaluated_rhs : list(float)
"""
evald_funcs = cls._evaluate_functions(funcs, nodes)
evald_rhs = problem.rhs(nodes, *evald_funcs, **problem.params)
return evald_rhs
|
Compute the value of the right-hand side of the system of ODEs.
Parameters
----------
basis_funcs : list(function)
nodes : numpy.ndarray
problem : TwoPointBVPLike
Returns
-------
evaluated_rhs : list(float)
|
def transfer(self, user):
"""Transfers app to given username's account."""
r = self._h._http_resource(
method='PUT',
resource=('apps', self.name),
data={'app[transfer_owner]': user}
)
return r.ok
|
Transfers app to given username's account.
|
def sensorupdate(self, data):
"""
Given a dict of sensors and values, updates those sensors with the
values in Scratch.
"""
if not isinstance(data, dict):
raise TypeError('Expected a dict')
msg = 'sensor-update '
for key in data.keys():
msg += '"%s" "%s" ' % (self._escape(str(key)),
self._escape(str(data[key])))
self._send(msg)
|
Given a dict of sensors and values, updates those sensors with the
values in Scratch.
|
def SkipAhead(self, file_object, number_of_characters):
"""Skips ahead a number of characters.
Args:
file_object (dfvfs.FileIO): file-like object.
number_of_characters (int): number of characters.
"""
lines_size = len(self.lines)
while number_of_characters >= lines_size:
number_of_characters -= lines_size
self.lines = ''
self.ReadLines(file_object)
lines_size = len(self.lines)
if lines_size == 0:
return
self.lines = self.lines[number_of_characters:]
|
Skips ahead a number of characters.
Args:
file_object (dfvfs.FileIO): file-like object.
number_of_characters (int): number of characters.
|
def seek_in_frame(self, pos, *args, **kwargs):
"""
Seeks relative to the total offset of the current contextual frames.
"""
super().seek(self._total_offset + pos, *args, **kwargs)
|
Seeks relative to the total offset of the current contextual frames.
|
def get_thumbnail(original, size, **options):
"""
Creates or gets an already created thumbnail for the given image with the given size and
options.
:param original: File-path, url or base64-encoded string of the image that you want an
thumbnail.
:param size: String with the wanted thumbnail size. On the form: ``200x200``, ``200`` or
``x200``.
:param crop: Crop settings, should be ``center``, ``top``, ``right``, ``bottom``, ``left``.
:param force: If set to ``True`` the thumbnail will be created even if it exists before.
:param quality: Overrides ``THUMBNAIL_QUALITY``, will set the quality used by the backend while
saving the thumbnail.
:param scale_up: Overrides ``THUMBNAIL_SCALE_UP``, if set to ``True`` the image will be scaled
up if necessary.
:param colormode: Overrides ``THUMBNAIL_COLORMODE``, The default colormode for thumbnails.
Supports all values supported by pillow. In other engines there is a best
effort translation from pillow modes to the modes supported by the current
engine.
:param format: Overrides the format the thumbnail will be saved in. This will override both the
detected file type as well as the one specified in ``THUMBNAIL_FALLBACK_FORMAT``.
:return: A Thumbnail object
"""
engine = get_engine()
cache = get_cache_backend()
original = SourceFile(original)
crop = options.get('crop', None)
options = engine.evaluate_options(options)
thumbnail_name = generate_filename(original, size, crop)
if settings.THUMBNAIL_DUMMY:
engine = DummyEngine()
return engine.get_thumbnail(thumbnail_name, engine.parse_size(size), crop, options)
cached = cache.get(thumbnail_name)
force = options is not None and 'force' in options and options['force']
if not force and cached:
return cached
thumbnail = Thumbnail(thumbnail_name, engine.get_format(original, options))
if force or not thumbnail.exists:
size = engine.parse_size(size)
thumbnail.image = engine.get_thumbnail(original, size, crop, options)
thumbnail.save(options)
for resolution in settings.THUMBNAIL_ALTERNATIVE_RESOLUTIONS:
resolution_size = engine.calculate_alternative_resolution_size(resolution, size)
image = engine.get_thumbnail(original, resolution_size, crop, options)
thumbnail.save_alternative_resolution(resolution, image, options)
cache.set(thumbnail)
return thumbnail
|
Creates or gets an already created thumbnail for the given image with the given size and
options.
:param original: File-path, url or base64-encoded string of the image that you want an
thumbnail.
:param size: String with the wanted thumbnail size. On the form: ``200x200``, ``200`` or
``x200``.
:param crop: Crop settings, should be ``center``, ``top``, ``right``, ``bottom``, ``left``.
:param force: If set to ``True`` the thumbnail will be created even if it exists before.
:param quality: Overrides ``THUMBNAIL_QUALITY``, will set the quality used by the backend while
saving the thumbnail.
:param scale_up: Overrides ``THUMBNAIL_SCALE_UP``, if set to ``True`` the image will be scaled
up if necessary.
:param colormode: Overrides ``THUMBNAIL_COLORMODE``, The default colormode for thumbnails.
Supports all values supported by pillow. In other engines there is a best
effort translation from pillow modes to the modes supported by the current
engine.
:param format: Overrides the format the thumbnail will be saved in. This will override both the
detected file type as well as the one specified in ``THUMBNAIL_FALLBACK_FORMAT``.
:return: A Thumbnail object
|
def new(self):
# type: () -> None
'''
A method to create a new UDF Implementation Use Volume Descriptor Implementation Use field.
Parameters:
None:
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor Implementation Use field already initialized')
self.char_set = _unicodecharset()
self.log_vol_ident = _ostaunicode_zero_pad('CDROM', 128)
self.lv_info1 = b'\x00' * 36
self.lv_info2 = b'\x00' * 36
self.lv_info3 = b'\x00' * 36
self.impl_ident = UDFEntityID()
self.impl_ident.new(0, b'*pycdlib', b'')
self.impl_use = b'\x00' * 128
self._initialized = True
|
A method to create a new UDF Implementation Use Volume Descriptor Implementation Use field.
Parameters:
None:
Returns:
Nothing.
|
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
|
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
|
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
|
return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
|
def nextLunarEclipse(date):
""" Returns the Datetime of the maximum phase of the
next global lunar eclipse.
"""
eclipse = swe.lunarEclipseGlobal(date.jd, backward=False)
return Datetime.fromJD(eclipse['maximum'], date.utcoffset)
|
Returns the Datetime of the maximum phase of the
next global lunar eclipse.
|
def recurse_taxonomy_map(tax_id_map, tax_id, parent=False):
"""
Takes the output dict from make_taxonomy_map and an input tax_id
and recurses either up or down through the tree to get /all/ children
(or parents) of the given tax_id.
"""
if parent:
# TODO: allow filtering on tax_id and its parents, too
pass
else:
def _child_recurse(tax_id, visited):
try:
children = [tax_id] + list(tax_id_map[tax_id])
except KeyError:
children = [tax_id]
for child in children:
if child not in visited:
visited.append(child)
children.extend(_child_recurse(child, visited))
return children
return list(set(_child_recurse(tax_id, [])))
|
Takes the output dict from make_taxonomy_map and an input tax_id
and recurses either up or down through the tree to get /all/ children
(or parents) of the given tax_id.
|
def escape_latex(text):
r"""Escape characters of given text.
This function takes the given text and escapes characters
that have a special meaning in LaTeX: # $ % ^ & _ { } ~ \
"""
text = unicode(text.decode('utf-8'))
CHARS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\~{}',
'^': r'\^{}',
'\\': r'\textbackslash{}',
}
escaped = "".join([CHARS.get(char, char) for char in text])
return escaped.encode('utf-8')
|
r"""Escape characters of given text.
This function takes the given text and escapes characters
that have a special meaning in LaTeX: # $ % ^ & _ { } ~ \
|
def import_sequence_flow_to_graph(diagram_graph, sequence_flows, process_id, flow_element):
"""
Adds a new edge to graph and a record to sequence_flows dictionary.
Input parameter is object of class xml.dom.Element.
Edges are identified by pair of sourceRef and targetRef attributes of BPMNFlow element. We also
provide a dictionary, that maps sequenceFlow ID attribute with its sourceRef and targetRef.
Method adds basic attributes of sequenceFlow element to edge. Those elements are:
- id - added as edge attribute, we assume that this is a required value,
- name - optional attribute, empty string by default.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param sequence_flows: dictionary (associative list) of sequence flows existing in diagram.
Key attribute is sequenceFlow ID, value is a dictionary consisting three key-value pairs: "name" (sequence
flow name), "sourceRef" (ID of node, that is a flow source) and "targetRef" (ID of node, that is a flow target),
:param process_id: string object, representing an ID of process element,
:param flow_element: object representing a BPMN XML 'sequenceFlow' element.
"""
flow_id = flow_element.getAttribute(consts.Consts.id)
name = flow_element.getAttribute(consts.Consts.name) if flow_element.hasAttribute(consts.Consts.name) else ""
source_ref = flow_element.getAttribute(consts.Consts.source_ref)
target_ref = flow_element.getAttribute(consts.Consts.target_ref)
sequence_flows[flow_id] = {consts.Consts.name: name, consts.Consts.source_ref: source_ref,
consts.Consts.target_ref: target_ref}
diagram_graph.add_edge(source_ref, target_ref)
diagram_graph[source_ref][target_ref][consts.Consts.id] = flow_id
diagram_graph[source_ref][target_ref][consts.Consts.process] = process_id
diagram_graph[source_ref][target_ref][consts.Consts.name] = name
diagram_graph[source_ref][target_ref][consts.Consts.source_ref] = source_ref
diagram_graph[source_ref][target_ref][consts.Consts.target_ref] = target_ref
for element in utils.BpmnImportUtils.iterate_elements(flow_element):
if element.nodeType != element.TEXT_NODE:
tag_name = utils.BpmnImportUtils.remove_namespace_from_tag_name(element.tagName)
if tag_name == consts.Consts.condition_expression:
condition_expression = element.firstChild.nodeValue
diagram_graph[source_ref][target_ref][consts.Consts.condition_expression] = {
consts.Consts.id: element.getAttribute(consts.Consts.id),
consts.Consts.condition_expression: condition_expression
}
'''
# Add incoming / outgoing nodes to corresponding elements. May be redundant action since this information is
added when processing nodes, but listing incoming / outgoing nodes under node element is optional - this way
we can make sure this info will be imported.
'''
if consts.Consts.outgoing_flow not in diagram_graph.node[source_ref]:
diagram_graph.node[source_ref][consts.Consts.outgoing_flow] = []
outgoing_list = diagram_graph.node[source_ref][consts.Consts.outgoing_flow]
if flow_id not in outgoing_list:
outgoing_list.append(flow_id)
if consts.Consts.incoming_flow not in diagram_graph.node[target_ref]:
diagram_graph.node[target_ref][consts.Consts.incoming_flow] = []
incoming_list = diagram_graph.node[target_ref][consts.Consts.incoming_flow]
if flow_id not in incoming_list:
incoming_list.append(flow_id)
|
Adds a new edge to graph and a record to sequence_flows dictionary.
Input parameter is object of class xml.dom.Element.
Edges are identified by pair of sourceRef and targetRef attributes of BPMNFlow element. We also
provide a dictionary, that maps sequenceFlow ID attribute with its sourceRef and targetRef.
Method adds basic attributes of sequenceFlow element to edge. Those elements are:
- id - added as edge attribute, we assume that this is a required value,
- name - optional attribute, empty string by default.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param sequence_flows: dictionary (associative list) of sequence flows existing in diagram.
Key attribute is sequenceFlow ID, value is a dictionary consisting three key-value pairs: "name" (sequence
flow name), "sourceRef" (ID of node, that is a flow source) and "targetRef" (ID of node, that is a flow target),
:param process_id: string object, representing an ID of process element,
:param flow_element: object representing a BPMN XML 'sequenceFlow' element.
|
def collage(img_spec,
num_rows=2,
num_cols=6,
rescale_method='global',
cmap='gray',
annot=None,
padding=5,
bkground_thresh=None,
output_path=None,
figsize=None,
**kwargs):
"Produces a collage of various slices from different orientations in the given 3D image"
num_rows, num_cols, padding = check_params(num_rows, num_cols, padding)
img = read_image(img_spec, bkground_thresh=bkground_thresh)
img = crop_image(img, padding)
img, (min_value, max_value) = check_rescaling_collage(img, rescale_method,
return_extrema=True)
num_slices_per_view = num_rows * num_cols
slices = pick_slices(img, num_slices_per_view)
plt.style.use('dark_background')
num_axes = 3
if figsize is None:
figsize = [3 * num_axes * num_rows, 3 * num_cols]
fig, ax = plt.subplots(num_axes * num_rows, num_cols, figsize=figsize)
# displaying some annotation text if provided
if annot is not None:
fig.suptitle(annot, backgroundcolor='black', color='g')
display_params = dict(interpolation='none', cmap=cmap,
aspect='equal', origin='lower',
vmin=min_value, vmax=max_value)
ax = ax.flatten()
ax_counter = 0
for dim_index in range(3):
for slice_num in slices[dim_index]:
plt.sca(ax[ax_counter])
ax_counter = ax_counter + 1
slice1 = get_axis(img, dim_index, slice_num)
# slice1 = crop_image(slice1, padding)
plt.imshow(slice1, **display_params)
plt.axis('off')
fig.tight_layout()
if output_path is not None:
output_path = output_path.replace(' ', '_')
fig.savefig(output_path + '.png', bbox_inches='tight')
# plt.close()
return fig
|
Produces a collage of various slices from different orientations in the given 3D image
|
def register(self, item):
"""
Registers a new orb object to this schema. This could be a column, index, or collector -- including
a virtual object defined through the orb.virtual decorator.
:param item: <variant>
:return:
"""
if callable(item) and hasattr(item, '__orb__'):
item = item.__orb__
key = item.name()
model = self.__model
# create class methods for indexes
if isinstance(item, orb.Index):
self.__indexes[key] = item
item.setSchema(self)
if model and not hasattr(model, key):
setattr(model, key, classmethod(item))
# create instance methods for collectors
elif isinstance(item, orb.Collector):
self.__collectors[key] = item
item.setSchema(self)
# create instance methods for columns
elif isinstance(item, orb.Column):
self.__columns[key] = item
item.setSchema(self)
|
Registers a new orb object to this schema. This could be a column, index, or collector -- including
a virtual object defined through the orb.virtual decorator.
:param item: <variant>
:return:
|
def selected(self, sel):
"""Called when this item has been selected (sel=True) OR deselected (sel=False)"""
ParameterItem.selected(self, sel)
if self.widget is None:
return
if sel and self.param.writable():
self.showEditor()
elif self.hideWidget:
self.hideEditor()
|
Called when this item has been selected (sel=True) OR deselected (sel=False)
|
def sizeof_fmt(num, suffix='B'):
"""
Adapted from https://stackoverflow.com/a/1094933
Re: precision - display enough decimals to show progress on a slow (<5 MB/s) Internet connection
"""
precision = {'': 0, 'Ki': 0, 'Mi': 0, 'Gi': 3, 'Ti': 6, 'Pi': 9, 'Ei': 12, 'Zi': 15}
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
format_string = "{number:.%df} {unit}{suffix}" % precision[unit]
return format_string.format(number=num, unit=unit, suffix=suffix)
num /= 1024.0
return "%.18f %s%s" % (num, 'Yi', suffix)
|
Adapted from https://stackoverflow.com/a/1094933
Re: precision - display enough decimals to show progress on a slow (<5 MB/s) Internet connection
|
def update_nginx_from_config(nginx_config):
"""Write the given config to disk as a Dusty sub-config
in the Nginx includes directory. Then, either start nginx
or tell it to reload its config to pick up what we've
just written."""
logging.info('Updating nginx with new Dusty config')
temp_dir = tempfile.mkdtemp()
os.mkdir(os.path.join(temp_dir, 'html'))
_write_nginx_config(constants.NGINX_BASE_CONFIG, os.path.join(temp_dir, constants.NGINX_PRIMARY_CONFIG_NAME))
_write_nginx_config(nginx_config['http'], os.path.join(temp_dir, constants.NGINX_HTTP_CONFIG_NAME))
_write_nginx_config(nginx_config['stream'], os.path.join(temp_dir, constants.NGINX_STREAM_CONFIG_NAME))
_write_nginx_config(constants.NGINX_502_PAGE_HTML, os.path.join(temp_dir, 'html', constants.NGINX_502_PAGE_NAME))
sync_local_path_to_vm(temp_dir, constants.NGINX_CONFIG_DIR_IN_VM)
|
Write the given config to disk as a Dusty sub-config
in the Nginx includes directory. Then, either start nginx
or tell it to reload its config to pick up what we've
just written.
|
def compute_edge_widths(self):
"""Compute the edge widths."""
if type(self.edge_width) is str:
edges = self.graph.edges
self.edge_widths = [edges[n][self.edge_width] for n in self.edges]
else:
self.edge_widths = self.edge_width
|
Compute the edge widths.
|
def convertShape(shapeString):
""" Convert xml shape string into float tuples.
This method converts the 2d or 3d shape string from SUMO's xml file
into a list containing 3d float-tuples. Non existant z coordinates default
to zero. If shapeString is empty, an empty list will be returned.
"""
cshape = []
for pointString in shapeString.split():
p = [float(e) for e in pointString.split(",")]
if len(p) == 2:
cshape.append((p[0], p[1], 0.))
elif len(p) == 3:
cshape.append(tuple(p))
else:
raise ValueError(
'Invalid shape point "%s", should be either 2d or 3d' % pointString)
return cshape
|
Convert xml shape string into float tuples.
This method converts the 2d or 3d shape string from SUMO's xml file
into a list containing 3d float-tuples. Non existant z coordinates default
to zero. If shapeString is empty, an empty list will be returned.
|
def _get_hourly_data(self, day_date, p_p_id):
"""Get Hourly Data."""
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_state": "normal",
"p_p_mode": "view",
"p_p_resource_id": "resourceObtenirDonneesConsommationHoraires",
"p_p_cacheability": "cacheLevelPage",
"p_p_col_id": "column-2",
"p_p_col_count": 1,
"date": day_date,
}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get hourly data")
try:
json_output = yield from raw_res.json(content_type='text/json')
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecAnnualError("Could not get hourly data")
hourly_consumption_data = json_output['results']['listeDonneesConsoEnergieHoraire']
hourly_power_data = json_output['results']['listeDonneesConsoPuissanceHoraire']
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_state": "normal",
"p_p_mode": "view",
"p_p_resource_id": "resourceObtenirDonneesMeteoHoraires",
"p_p_cacheability": "cacheLevelPage",
"p_p_col_id": "column-2",
"p_p_col_count": 1,
"dateDebut": day_date,
"dateFin": day_date,
}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get hourly data")
try:
json_output = yield from raw_res.json(content_type='text/json')
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecAnnualError("Could not get hourly data")
hourly_weather_data = []
if not json_output.get('results'):
# Missing Temperature data from Hydro-Quebec (but don't crash the app for that)
hourly_weather_data = [None]*24
else:
hourly_weather_data = json_output['results'][0]['listeTemperaturesHeure']
# Add temp in data
processed_hourly_data = [{'hour': data['heure'],
'lower': data['consoReg'],
'high': data['consoHaut'],
'total': data['consoTotal'],
'temp': hourly_weather_data[i]}
for i, data in enumerate(hourly_consumption_data)]
raw_hourly_data = {'Energy': hourly_consumption_data,
'Power': hourly_power_data,
'Weather': hourly_weather_data}
hourly_data = {'processed_hourly_data': processed_hourly_data,
'raw_hourly_data': raw_hourly_data}
return hourly_data
|
Get Hourly Data.
|
def _is_chunk_markdown(source):
"""Return whether a chunk contains Markdown contents."""
lines = source.splitlines()
if all(line.startswith('# ') for line in lines):
# The chunk is a Markdown *unless* it is commented Python code.
source = '\n'.join(line[2:] for line in lines
if not line[2:].startswith('#')) # skip headers
if not source:
return True
# Try to parse the chunk: if it fails, it is Markdown, otherwise,
# it is Python.
return not _is_python(source)
return False
|
Return whether a chunk contains Markdown contents.
|
def cmd_notice(self, connection, sender, target, payload):
"""
Sends a message
"""
msg_target, topic, content = self.parse_payload(payload)
def callback(sender, payload):
logging.info("NOTICE ACK from %s: %s", sender, payload)
self.__herald.notice(msg_target, topic, content, callback)
|
Sends a message
|
def create_tasks(self, wfk_file, scr_input):
"""
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
"""
assert len(self) == 0
wfk_file = self.wfk_file = os.path.abspath(wfk_file)
# Build a temporary work in the tmpdir that will use a shell manager
# to run ABINIT in order to get the list of q-points for the screening.
shell_manager = self.manager.to_shell_manager(mpi_procs=1)
w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager)
fake_input = scr_input.deepcopy()
fake_task = w.register(fake_input)
w.allocate()
w.build()
# Create the symbolic link and add the magic value
# nqpdm = -1 to the input to get the list of q-points.
fake_task.inlink_file(wfk_file)
fake_task.set_vars({"nqptdm": -1})
fake_task.start_and_wait()
# Parse the section with the q-points
with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader:
qpoints = reader.read_value("reduced_coordinates_of_kpoints")
#print("qpoints)
# Now we can register the task for the different q-points
for qpoint in qpoints:
qptdm_input = scr_input.deepcopy()
qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)
new_task = self.register_scr_task(qptdm_input, manager=self.manager)
# Add the garbage collector.
if self.flow.gc is not None:
new_task.set_gc(self.flow.gc)
self.allocate()
|
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
|
def frame_iv(algorithm, sequence_number):
"""Builds the deterministic IV for a body frame.
:param algorithm: Algorithm for which to build IV
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param int sequence_number: Frame sequence number
:returns: Generated IV
:rtype: bytes
:raises ActionNotAllowedError: if sequence number of out bounds
"""
if sequence_number < 1 or sequence_number > MAX_FRAME_COUNT:
raise ActionNotAllowedError(
"Invalid frame sequence number: {actual}\nMust be between 1 and {max}".format(
actual=sequence_number, max=MAX_FRAME_COUNT
)
)
prefix_len = algorithm.iv_len - 4
prefix = b"\x00" * prefix_len
return prefix + struct.pack(">I", sequence_number)
|
Builds the deterministic IV for a body frame.
:param algorithm: Algorithm for which to build IV
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param int sequence_number: Frame sequence number
:returns: Generated IV
:rtype: bytes
:raises ActionNotAllowedError: if sequence number of out bounds
|
def _request_one_trial_job(self):
"""get one trial job, i.e., one hyperparameter configuration.
If this function is called, Command will be sent by BOHB:
a. If there is a parameter need to run, will return "NewTrialJob" with a dict:
{
'parameter_id': id of new hyperparameter
'parameter_source': 'algorithm'
'parameters': value of new hyperparameter
}
b. If BOHB don't have parameter waiting, will return "NoMoreTrialJobs" with
{
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
"""
if not self.generated_hyper_configs:
ret = {
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
send(CommandType.NoMoreTrialJobs, json_tricks.dumps(ret))
return
assert self.generated_hyper_configs
params = self.generated_hyper_configs.pop()
ret = {
'parameter_id': params[0],
'parameter_source': 'algorithm',
'parameters': params[1]
}
self.parameters[params[0]] = params[1]
send(CommandType.NewTrialJob, json_tricks.dumps(ret))
self.credit -= 1
|
get one trial job, i.e., one hyperparameter configuration.
If this function is called, Command will be sent by BOHB:
a. If there is a parameter need to run, will return "NewTrialJob" with a dict:
{
'parameter_id': id of new hyperparameter
'parameter_source': 'algorithm'
'parameters': value of new hyperparameter
}
b. If BOHB don't have parameter waiting, will return "NoMoreTrialJobs" with
{
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
|
def atstart(callback, *args, **kwargs):
'''Schedule a callback to run before the main hook.
Callbacks are run in the order they were added.
This is useful for modules and classes to perform initialization
and inject behavior. In particular:
- Run common code before all of your hooks, such as logging
the hook name or interesting relation data.
- Defer object or module initialization that requires a hook
context until we know there actually is a hook context,
making testing easier.
- Rather than requiring charm authors to include boilerplate to
invoke your helper's behavior, have it run automatically if
your object is instantiated or module imported.
This is not at all useful after your hook framework as been launched.
'''
global _atstart
_atstart.append((callback, args, kwargs))
|
Schedule a callback to run before the main hook.
Callbacks are run in the order they were added.
This is useful for modules and classes to perform initialization
and inject behavior. In particular:
- Run common code before all of your hooks, such as logging
the hook name or interesting relation data.
- Defer object or module initialization that requires a hook
context until we know there actually is a hook context,
making testing easier.
- Rather than requiring charm authors to include boilerplate to
invoke your helper's behavior, have it run automatically if
your object is instantiated or module imported.
This is not at all useful after your hook framework as been launched.
|
def delete_evpn_local(route_type, route_dist, **kwargs):
"""Deletes/withdraws EVPN route from VRF identified by *route_dist*.
"""
try:
tm = CORE_MANAGER.get_core_service().table_manager
tm.update_vrf_table(route_dist,
route_family=VRF_RF_L2_EVPN,
route_type=route_type, is_withdraw=True, **kwargs)
# Send success response.
return [{EVPN_ROUTE_TYPE: route_type,
ROUTE_DISTINGUISHER: route_dist,
VRF_RF: VRF_RF_L2_EVPN}.update(kwargs)]
except BgpCoreError as e:
raise PrefixError(desc=e)
|
Deletes/withdraws EVPN route from VRF identified by *route_dist*.
|
def fastqWrite(fileHandleOrFile, name, seq, qualValues, mode="w"):
"""Writes out fastq file. If qualValues is None or '*' then prints a '*' instead.
"""
fileHandle = _getFileHandle(fileHandleOrFile, mode)
assert seq.__class__ == "".__class__
for i in seq:
if not ((i >= 'A' and i <= 'Z') or (i >= 'a' and i <= 'z') or i == '-'): #For safety and sanity I only allows roman alphabet characters in fasta sequences.
raise RuntimeError("Invalid FASTQ character, ASCII code = \'%d\', char = '%s' found in input sequence %s" % (ord(i), i, name))
if qualValues != None and qualValues != '*':
if len(seq) != len(qualValues):
raise RuntimeError("Got a mismatch between the number of sequence characters (%s) and number of qual values (%s) for sequence: %s " % (len(seq), len(qualValues), name))
for i in qualValues:
if i < 33 or i > 126:
raise RuntimeError("Got a qual value out of range %s (range is 33 to 126)" % i)
fileHandle.write("@%s\n%s\n+\n%s\n" % (name, seq, "".join([ chr(i) for i in qualValues ])))
else:
fileHandle.write("@%s\n%s\n+\n*\n" % (name, seq))
if isinstance(fileHandleOrFile, "".__class__):
fileHandle.close()
|
Writes out fastq file. If qualValues is None or '*' then prints a '*' instead.
|
def filter_duplicate(self, url):
"""
url去重
"""
if self.filterDuplicate:
if url in self.historys:
raise Exception('duplicate excepiton: %s is duplicate' % url)
else:
self.historys.add(url)
else:
pass
|
url去重
|
def delete_Variable(self,name):
'''
pops a variable from class and delete it from parameter list
:parameter name: name of the parameter to delete
'''
self.message(1,'Deleting variable {0}'.format(name))
self.par_list=self.par_list[self.par_list != name]
return self.__dict__.pop(name)
|
pops a variable from class and delete it from parameter list
:parameter name: name of the parameter to delete
|
def tempfilename(**kwargs):
"""
Reserve a temporary file for future use.
This is useful if you want to get a temporary file name, write to it in the
future and ensure that if an exception is thrown the temporary file is removed.
"""
kwargs.update(delete=False)
try:
f = NamedTemporaryFile(**kwargs)
f.close()
yield f.name
except Exception:
if os.path.exists(f.name):
# Ensure we clean up after ourself
os.unlink(f.name)
raise
|
Reserve a temporary file for future use.
This is useful if you want to get a temporary file name, write to it in the
future and ensure that if an exception is thrown the temporary file is removed.
|
def delete_network(context, id):
"""Delete a network.
: param context: neutron api request context
: param id: UUID representing the network to delete.
"""
LOG.info("delete_network %s for tenant %s" % (id, context.tenant_id))
with context.session.begin():
net = db_api.network_find(context=context, limit=None, sorts=['id'],
marker=None, page_reverse=False, id=id,
scope=db_api.ONE)
if not net:
raise n_exc.NetworkNotFound(net_id=id)
if not context.is_admin:
if STRATEGY.is_provider_network(net.id):
raise n_exc.NotAuthorized(net_id=id)
if net.ports:
raise n_exc.NetworkInUse(net_id=id)
net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"])
net_driver.delete_network(context, id)
for subnet in net["subnets"]:
subnets._delete_subnet(context, subnet)
db_api.network_delete(context, net)
|
Delete a network.
: param context: neutron api request context
: param id: UUID representing the network to delete.
|
def draw(self):
"""
N.draw()
Sets all N's stochastics to random values drawn from
the normal approximation to the posterior.
"""
devs = normal(size=self._sig.shape[1])
p = inner(self._sig, devs) + self._mu
self._set_stochastics(p)
|
N.draw()
Sets all N's stochastics to random values drawn from
the normal approximation to the posterior.
|
def get_my_feed(self, limit=150, offset=20, sort="updated", nid=None):
"""Get my feed
:type limit: int
:param limit: Number of posts from feed to get, starting from ``offset``
:type offset: int
:param offset: Offset starting from bottom of feed
:type sort: str
:param sort: How to sort feed that will be retrieved; only current
known value is "updated"
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
"""
r = self.request(
method="network.get_my_feed",
nid=nid,
data=dict(
limit=limit,
offset=offset,
sort=sort
)
)
return self._handle_error(r, "Could not retrieve your feed.")
|
Get my feed
:type limit: int
:param limit: Number of posts from feed to get, starting from ``offset``
:type offset: int
:param offset: Offset starting from bottom of feed
:type sort: str
:param sort: How to sort feed that will be retrieved; only current
known value is "updated"
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
|
def load_mayaplugins():
"""Loads the maya plugins (not jukebox plugins) of the pipeline
:returns: None
:rtype: None
:raises: None
"""
mpp = os.environ.get('MAYA_PLUG_IN_PATH')
if mpp is not None:
';'.join([mpp, MAYA_PLUGIN_PATH])
else:
mpp = MAYA_PLUGIN_PATH
# to simply load all plugins inside our plugin path, we override pluginpath temporarly
os.environ['MAYA_PLUG_IN_PATH'] = MAYA_PLUGIN_PATH
cmds.loadPlugin(allPlugins=True)
# then we set the MAYA_PLUG_IN_PATH to the correct value
# NOTE: this ignores the order of paths in MAYA_PLUG_IN_PATH completely
os.environ['MAYA_PLUG_IN_PATH'] = mpp
|
Loads the maya plugins (not jukebox plugins) of the pipeline
:returns: None
:rtype: None
:raises: None
|
def insert_inexistence(self, table, kwargs, condition):
""".. :py:method::
Usage::
>>> insert('hospital', {'id': '12de3wrv', 'province': 'shanghai'}, {'id': '12de3wrv'})
insert into hospital (id, province) select '12de3wrv', 'shanghai' where not exists (select 1 from hospital where id='12de3wrv' limit 1);
"""
sql = "insert into " + table + " ({}) "
select = "select {} "
condition = "where not exists (select 1 from " + table + "{} limit 1);".format( self.parse_condition(condition) )
keys, values = [], []
[ (keys.append(k), values.append(v)) for k, v in kwargs.iteritems() ]
sql = sql.format(', '.join(keys)) + select.format( ', '.join(['%s']*len(values)) ) + condition
super(PGWrapper, self).execute(sql, values, result=False)
|
.. :py:method::
Usage::
>>> insert('hospital', {'id': '12de3wrv', 'province': 'shanghai'}, {'id': '12de3wrv'})
insert into hospital (id, province) select '12de3wrv', 'shanghai' where not exists (select 1 from hospital where id='12de3wrv' limit 1);
|
def search(self, search_phrase, limit=None):
"""Search for datasets, and expand to database records"""
from ambry.identity import ObjectNumber
from ambry.orm.exc import NotFoundError
from ambry.library.search_backends.base import SearchTermParser
results = []
stp = SearchTermParser()
# Because of the split between searching for partitions and bundles, some terms don't behave right.
# The source term should be a limit on everything, but it isn't part of the partition doc,
# so we check for it here.
parsed_terms = stp.parse(search_phrase)
for r in self.search_datasets(search_phrase, limit):
vid = r.vid or ObjectNumber.parse(next(iter(r.partitions))).as_dataset
r.vid = vid
try:
r.bundle = self.library.bundle(r.vid)
if 'source' not in parsed_terms or parsed_terms['source'] in r.bundle.dataset.source:
results.append(r)
except NotFoundError:
pass
return sorted(results, key=lambda r : r.score, reverse=True)
|
Search for datasets, and expand to database records
|
def dump_registers_peek(registers, data, separator = ' ', width = 16):
"""
Dump data pointed to by the given registers, if any.
@type registers: dict( str S{->} int )
@param registers: Dictionary mapping register names to their values.
This value is returned by L{Thread.get_context}.
@type data: dict( str S{->} str )
@param data: Dictionary mapping register names to the data they point to.
This value is returned by L{Thread.peek_pointers_in_registers}.
@rtype: str
@return: Text suitable for logging.
"""
if None in (registers, data):
return ''
names = compat.keys(data)
names.sort()
result = ''
for reg_name in names:
tag = reg_name.lower()
dumped = HexDump.hexline(data[reg_name], separator, width)
result += '%s -> %s\n' % (tag, dumped)
return result
|
Dump data pointed to by the given registers, if any.
@type registers: dict( str S{->} int )
@param registers: Dictionary mapping register names to their values.
This value is returned by L{Thread.get_context}.
@type data: dict( str S{->} str )
@param data: Dictionary mapping register names to the data they point to.
This value is returned by L{Thread.peek_pointers_in_registers}.
@rtype: str
@return: Text suitable for logging.
|
def _clean_tx(response_dict):
''' Pythonize a blockcypher API response '''
confirmed_txrefs = []
for confirmed_txref in response_dict.get('txrefs', []):
confirmed_txref['confirmed'] = parser.parse(confirmed_txref['confirmed'])
confirmed_txrefs.append(confirmed_txref)
response_dict['txrefs'] = confirmed_txrefs
unconfirmed_txrefs = []
for unconfirmed_txref in response_dict.get('unconfirmed_txrefs', []):
unconfirmed_txref['received'] = parser.parse(unconfirmed_txref['received'])
unconfirmed_txrefs.append(unconfirmed_txref)
response_dict['unconfirmed_txrefs'] = unconfirmed_txrefs
return response_dict
|
Pythonize a blockcypher API response
|
def setup(self, database):
"""Setup connection to database."""
self.db = database
self.hgnc_collection = database.hgnc_gene
self.user_collection = database.user
self.whitelist_collection = database.whitelist
self.institute_collection = database.institute
self.event_collection = database.event
self.case_collection = database.case
self.panel_collection = database.gene_panel
self.hpo_term_collection = database.hpo_term
self.disease_term_collection = database.disease_term
self.variant_collection = database.variant
self.acmg_collection = database.acmg
self.clinvar_collection = database.clinvar
self.clinvar_submission_collection = database.clinvar_submission
self.exon_collection = database.exon
self.transcript_collection = database.transcript
|
Setup connection to database.
|
def pack(o, stream, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.pack and ensures that the passed object is unwrapped if it is
a proxy.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
orig_enc_func = kwargs.pop('default', lambda x: x)
def _enc_func(obj):
obj = ThreadLocalProxy.unproxy(obj)
return orig_enc_func(obj)
return msgpack_module.pack(o, stream, default=_enc_func, **kwargs)
|
.. versionadded:: 2018.3.4
Wraps msgpack.pack and ensures that the passed object is unwrapped if it is
a proxy.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
|
def delete_instance(self, instance_id):
'''
method for removing an instance from AWS EC2
:param instance_id: string of instance id on AWS
:return: string reporting state of instance
'''
title = '%s.delete_instance' % self.__class__.__name__
# validate inputs
input_fields = {
'instance_id': instance_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Removing instance %s from AWS region %s.' % (instance_id, self.iam.region_name))
# retrieve state
old_state = self.check_instance_state(instance_id)
# discover tags associated with instance id
tag_list = []
try:
response = self.connection.describe_tags(
Filters=[ { 'Name': 'resource-id', 'Values': [ instance_id ] } ]
)
import re
aws_tag_pattern = re.compile('aws:')
for i in range(0, len(response['Tags'])):
if not aws_tag_pattern.findall(response['Tags'][i]['Key']):
tag = {}
tag['Key'] = response['Tags'][i]['Key']
tag['Value'] = response['Tags'][i]['Value']
tag_list.append(tag)
except:
raise AWSConnectionError(title)
# remove tags from instance
try:
self.connection.delete_tags(
Resources=[ instance_id ],
Tags=tag_list
)
self.iam.printer('Tags have been deleted from %s.' % instance_id)
except:
raise AWSConnectionError(title)
# stop instance
try:
self.connection.stop_instances(
InstanceIds=[ instance_id ]
)
except:
raise AWSConnectionError(title)
# terminate instance
try:
response = self.connection.terminate_instances(
InstanceIds=[ instance_id ]
)
new_state = response['TerminatingInstances'][0]['CurrentState']['Name']
except:
raise AWSConnectionError(title)
# report outcome and return true
self.iam.printer('Instance %s was %s.' % (instance_id, old_state))
self.iam.printer('Instance %s is %s.' % (instance_id, new_state))
return new_state
|
method for removing an instance from AWS EC2
:param instance_id: string of instance id on AWS
:return: string reporting state of instance
|
def train(self, recall=0.95, index_predicates=True): # pragma: no cover
"""
Keyword arguments:
maximum_comparisons -- The maximum number of comparisons a
blocking rule is allowed to make.
Defaults to 1000000
recall -- The proportion of true dupe pairs in our training
data that that we the learned blocks must cover. If
we lower the recall, there will be pairs of true
dupes that we will never directly compare.
recall should be a float between 0.0 and 1.0, the default
is 0.95
index_predicates -- Should dedupe consider predicates that
rely upon indexing the data. Index predicates can
be slower and take susbstantial memory.
Defaults to True.
"""
examples, y = flatten_training(self.training_pairs)
self.classifier.fit(self.data_model.distances(examples), y)
self.predicates = self.active_learner.learn_predicates(
recall, index_predicates)
self.blocker = blocking.Blocker(self.predicates)
self.blocker.resetIndices()
|
Keyword arguments:
maximum_comparisons -- The maximum number of comparisons a
blocking rule is allowed to make.
Defaults to 1000000
recall -- The proportion of true dupe pairs in our training
data that that we the learned blocks must cover. If
we lower the recall, there will be pairs of true
dupes that we will never directly compare.
recall should be a float between 0.0 and 1.0, the default
is 0.95
index_predicates -- Should dedupe consider predicates that
rely upon indexing the data. Index predicates can
be slower and take susbstantial memory.
Defaults to True.
|
def get_supported_filepaths(filepaths, supported_extensions, max_depth=float('inf')):
"""Get filepaths with supported extensions from given filepaths.
Parameters:
filepaths (list or str): Filepath(s) to check.
supported_extensions (tuple or str): Supported file extensions or a single file extension.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the top directory.
Default: No limit.
Returns:
A list of supported filepaths.
"""
supported_filepaths = []
for path in filepaths:
if os.name == 'nt' and CYGPATH_RE.match(path):
path = convert_cygwin_path(path)
if os.path.isdir(path):
for root, __, files in walk_depth(path, max_depth):
for f in files:
if f.lower().endswith(supported_extensions):
supported_filepaths.append(os.path.join(root, f))
elif os.path.isfile(path) and path.lower().endswith(supported_extensions):
supported_filepaths.append(path)
return supported_filepaths
|
Get filepaths with supported extensions from given filepaths.
Parameters:
filepaths (list or str): Filepath(s) to check.
supported_extensions (tuple or str): Supported file extensions or a single file extension.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the top directory.
Default: No limit.
Returns:
A list of supported filepaths.
|
def PC_PI_calc(P, TOP, POP):
"""
Calculate percent chance agreement for Scott's Pi.
:param P: condition positive
:type P : dict
:param TOP: test outcome positive
:type TOP : dict
:param POP: population
:type POP:dict
:return: percent chance agreement as float
"""
try:
result = 0
for i in P.keys():
result += ((P[i] + TOP[i]) / (2 * POP[i]))**2
return result
except Exception:
return "None"
|
Calculate percent chance agreement for Scott's Pi.
:param P: condition positive
:type P : dict
:param TOP: test outcome positive
:type TOP : dict
:param POP: population
:type POP:dict
:return: percent chance agreement as float
|
def pylint_amnesty(pylint_output):
"""
Add ``# pylint: disable`` clauses to add exceptions to all existing pylint errors in a codebase.
"""
errors = defaultdict(lambda: defaultdict(set))
for pylint_error in parse_pylint_output(pylint_output):
errors[pylint_error.filename][pylint_error.linenum].add(pylint_error)
for file_with_errors in sorted(errors):
try:
opened_file = open(file_with_errors)
except IOError:
LOG.warning(u"Unable to open %s for edits", file_with_errors, exc_info=True)
else:
with opened_file as input_file:
output_lines = []
for line_num, line in enumerate(input_file, start=1):
output_lines.extend(
fix_pylint(
line,
errors[file_with_errors][line_num]
)
)
with open(file_with_errors, 'w') as output_file:
output_file.writelines(output_lines)
|
Add ``# pylint: disable`` clauses to add exceptions to all existing pylint errors in a codebase.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.