code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def on_button_release(self, event):
"""Write back changes
If one or more items have been moved, the new position are stored in the corresponding meta data and a signal
notifying the change is emitted.
:param event: The button event
"""
affected_models = {}
for inmotion in self._movable_items:
inmotion.move((event.x, event.y))
rel_pos = gap_helper.calc_rel_pos_to_parent(self.view.canvas, inmotion.item,
inmotion.item.handles()[NW])
if isinstance(inmotion.item, StateView):
state_v = inmotion.item
state_m = state_v.model
self.view.canvas.request_update(state_v)
if state_m.get_meta_data_editor()['rel_pos'] != rel_pos:
state_m.set_meta_data_editor('rel_pos', rel_pos)
affected_models[state_m] = ("position", True, state_v)
elif isinstance(inmotion.item, NameView):
state_v = inmotion.item
state_m = self.view.canvas.get_parent(state_v).model
self.view.canvas.request_update(state_v)
if state_m.get_meta_data_editor()['name']['rel_pos'] != rel_pos:
state_m.set_meta_data_editor('name.rel_pos', rel_pos)
affected_models[state_m] = ("name_position", False, state_v)
elif isinstance(inmotion.item, TransitionView):
transition_v = inmotion.item
transition_m = transition_v.model
self.view.canvas.request_update(transition_v)
current_waypoints = gap_helper.get_relative_positions_of_waypoints(transition_v)
old_waypoints = transition_m.get_meta_data_editor()['waypoints']
if current_waypoints != old_waypoints:
transition_m.set_meta_data_editor('waypoints', current_waypoints)
affected_models[transition_m] = ("waypoints", False, transition_v)
if len(affected_models) == 1:
model = next(iter(affected_models))
change, affects_children, view = affected_models[model]
self.view.graphical_editor.emit('meta_data_changed', model, change, affects_children)
elif len(affected_models) > 1:
# if more than one item has been moved, we need to call the meta_data_changed signal on a common parent
common_parents = None
for change, affects_children, view in affected_models.values():
parents_of_view = set(self.view.canvas.get_ancestors(view))
if common_parents is None:
common_parents = parents_of_view
else:
common_parents = common_parents.intersection(parents_of_view)
assert len(common_parents) > 0, "The selected elements do not have common parent element"
for state_v in common_parents:
# Find most nested state_v
children_of_state_v = self.view.canvas.get_all_children(state_v)
if any(common_parent in children_of_state_v for common_parent in common_parents):
continue
self.view.graphical_editor.emit('meta_data_changed', state_v.model, "positions", True)
break
if not affected_models and self._old_selection is not None:
# The selection is handled differently depending on whether states were moved or not
# If no move operation was performed, we reset the selection to that is was before the button-press event
# and let the state machine selection handle the selection
self.view.unselect_all()
self.view.select_item(self._old_selection)
self.view.handle_new_selection(self._item)
self._move_name_v = False
self._old_selection = None
return super(MoveItemTool, self).on_button_release(event)
|
Write back changes
If one or more items have been moved, the new position are stored in the corresponding meta data and a signal
notifying the change is emitted.
:param event: The button event
|
def attribute_exists(self, attribute, section):
"""
Checks if given attribute exists.
Usage::
>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
>>> sections_file_parser = SectionsFileParser()
>>> sections_file_parser.content = content
>>> sections_file_parser.parse()
<foundations.parsers.SectionsFileParser object at 0x234564563>
>>> sections_file_parser.attribute_exists("Attribute 1", "Section A")
True
>>> sections_file_parser.attribute_exists("Attribute 2", "Section A")
False
:param attribute: Attribute to check existence.
:type attribute: unicode
:param section: Section to search attribute into.
:type section: unicode
:return: Attribute existence.
:rtype: bool
"""
if foundations.namespace.remove_namespace(attribute, root_only=True) in self.get_attributes(section,
strip_namespaces=True):
LOGGER.debug("> '{0}' attribute exists in '{1}' section.".format(attribute, section))
return True
else:
LOGGER.debug("> '{0}' attribute doesn't exists in '{1}' section.".format(attribute, section))
return False
|
Checks if given attribute exists.
Usage::
>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
>>> sections_file_parser = SectionsFileParser()
>>> sections_file_parser.content = content
>>> sections_file_parser.parse()
<foundations.parsers.SectionsFileParser object at 0x234564563>
>>> sections_file_parser.attribute_exists("Attribute 1", "Section A")
True
>>> sections_file_parser.attribute_exists("Attribute 2", "Section A")
False
:param attribute: Attribute to check existence.
:type attribute: unicode
:param section: Section to search attribute into.
:type section: unicode
:return: Attribute existence.
:rtype: bool
|
def preShear(self, h, v):
"""Calculate pre shearing and replace current matrix."""
a, b = self.a, self.b
self.a += v * self.c
self.b += v * self.d
self.c += h * a
self.d += h * b
return self
|
Calculate pre shearing and replace current matrix.
|
def add_target(self, name=None):
"""
Add an SCons target to this nest.
The function decorated will be immediately called with each of the
output directories and current control dictionaries. Each result will
be added to the respective control dictionary for later nests to
access.
:param name: Name for the target in the name (default: function name).
"""
def deco(func):
def nestfunc(control):
destdir = os.path.join(self.dest_dir, control['OUTDIR'])
return [func(destdir, control)]
key = name or func.__name__
self.nest.add(key, nestfunc, create_dir=False)
self._register_alias(key)
return func
return deco
|
Add an SCons target to this nest.
The function decorated will be immediately called with each of the
output directories and current control dictionaries. Each result will
be added to the respective control dictionary for later nests to
access.
:param name: Name for the target in the name (default: function name).
|
def logstop(self):
"""Fully stop logging and close log file.
In order to start logging again, a new logstart() call needs to be
made, possibly (though not necessarily) with a new filename, mode and
other options."""
if self.logfile is not None:
self.logfile.close()
self.logfile = None
else:
print "Logging hadn't been started."
self.log_active = False
|
Fully stop logging and close log file.
In order to start logging again, a new logstart() call needs to be
made, possibly (though not necessarily) with a new filename, mode and
other options.
|
def update_ip(self, ip, record_type='A', domains=None, subdomains=None):
"""Update the IP address in all records, specified by type, to the value of ip. Returns True if no
exceptions occurred during the update. If no domains are provided, all domains returned from
self.get_domains() will be updated. By default, only A records are updated.
:param record_type: The type of records to update (eg. 'A')
:param ip: The new IP address (eg. '123.1.2.255')
:param domains: A list of the domains you want to update (eg. ['123.com','abc.net'])
:param subdomains: A list of the subdomains you want to update (eg. ['www','dev'])
:type record_type: str or unicode
:type ip: str or unicode
:type domains: str, list of str
:type subdomains: str, list of str
:return: True if no exceptions occurred
"""
if domains is None:
domains = self.get_domains()
elif sys.version_info < (3, 0):
if isinstance(domains, (str, unicode)):
domains = [domains]
elif sys.version_info >= (3, 0):
if isinstance(domains, str):
domains = [domains]
else:
# we have a tuple, set, or something else, try to convert it to a list
domains = list(domains)
for domain in domains:
a_records = self.get_records(domain, record_type=record_type)
for record in a_records:
r_name = str(record['name'])
r_ip = str(record['data'])
if not r_ip == ip:
if (subdomains is None or
(isinstance(subdomains, (unicode, str)) and r_name == subdomains) or
r_name in subdomains):
record.update(data=str(ip))
self.update_record(domain, record)
# If we didn't get any exceptions, return True to let the user know
return True
|
Update the IP address in all records, specified by type, to the value of ip. Returns True if no
exceptions occurred during the update. If no domains are provided, all domains returned from
self.get_domains() will be updated. By default, only A records are updated.
:param record_type: The type of records to update (eg. 'A')
:param ip: The new IP address (eg. '123.1.2.255')
:param domains: A list of the domains you want to update (eg. ['123.com','abc.net'])
:param subdomains: A list of the subdomains you want to update (eg. ['www','dev'])
:type record_type: str or unicode
:type ip: str or unicode
:type domains: str, list of str
:type subdomains: str, list of str
:return: True if no exceptions occurred
|
def initialize(self, runtime=None):
"""Initializes this manager.
A manager is initialized once at the time of creation.
arg: runtime (osid.OsidRuntimeManager): the runtime
environment
raise: CONFIGURATION_ERROR - an error with implementation
configuration
raise: ILLEGAL_STATE - this manager has already been
initialized by the OsidRuntime
raise: NullArgument - runtime is null
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
implementation notes: In addition to loading its runtime
configuration an implementation may create shared resources such
as connection pools to be shared among all sessions of this
service and released when this manager is closed. Providers must
thread-protect any data stored in the manager. To maximize
interoperability, providers should not honor a second call to
initialize() and must set an ILLEGAL_STATE error.
"""
if self._runtime is not None:
raise IllegalState()
self._runtime = runtime
config = runtime.get_configuration()
parameter_id = Id('parameter:hostName@dlkit_service')
host = config.get_value_by_parameter(parameter_id).get_string_value()
if host is not None:
self._host = host
parameter_id = Id('parameter:appKey@dlkit_service')
app_key = config.get_value_by_parameter(parameter_id).get_string_value()
if app_key is not None:
self._app_key = app_key
|
Initializes this manager.
A manager is initialized once at the time of creation.
arg: runtime (osid.OsidRuntimeManager): the runtime
environment
raise: CONFIGURATION_ERROR - an error with implementation
configuration
raise: ILLEGAL_STATE - this manager has already been
initialized by the OsidRuntime
raise: NullArgument - runtime is null
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
implementation notes: In addition to loading its runtime
configuration an implementation may create shared resources such
as connection pools to be shared among all sessions of this
service and released when this manager is closed. Providers must
thread-protect any data stored in the manager. To maximize
interoperability, providers should not honor a second call to
initialize() and must set an ILLEGAL_STATE error.
|
def _encrypt(self, archive):
"""Encrypts the compressed archive using GPG.
If encryption fails for any reason, it should be logged by sos but not
cause execution to stop. The assumption is that the unencrypted archive
would still be of use to the user, and/or that the end user has another
means of securing the archive.
Returns the name of the encrypted archive, or raises an exception to
signal that encryption failed and the unencrypted archive name should
be used.
"""
arc_name = archive.replace("sosreport-", "secured-sosreport-")
arc_name += ".gpg"
enc_cmd = "gpg --batch -o %s " % arc_name
env = None
if self.enc_opts["key"]:
# need to assume a trusted key here to be able to encrypt the
# archive non-interactively
enc_cmd += "--trust-model always -e -r %s " % self.enc_opts["key"]
enc_cmd += archive
if self.enc_opts["password"]:
# prevent change of gpg options using a long password, but also
# prevent the addition of quote characters to the passphrase
passwd = "%s" % self.enc_opts["password"].replace('\'"', '')
env = {"sos_gpg": passwd}
enc_cmd += "-c --passphrase-fd 0 "
enc_cmd = "/bin/bash -c \"echo $sos_gpg | %s\"" % enc_cmd
enc_cmd += archive
r = sos_get_command_output(enc_cmd, timeout=0, env=env)
if r["status"] == 0:
return arc_name
elif r["status"] == 2:
if self.enc_opts["key"]:
msg = "Specified key not in keyring"
else:
msg = "Could not read passphrase"
else:
# TODO: report the actual error from gpg. Currently, we cannot as
# sos_get_command_output() does not capture stderr
msg = "gpg exited with code %s" % r["status"]
raise Exception(msg)
|
Encrypts the compressed archive using GPG.
If encryption fails for any reason, it should be logged by sos but not
cause execution to stop. The assumption is that the unencrypted archive
would still be of use to the user, and/or that the end user has another
means of securing the archive.
Returns the name of the encrypted archive, or raises an exception to
signal that encryption failed and the unencrypted archive name should
be used.
|
def request(
self,
url: str,
method: str,
raise_for_status: bool = True,
path_to_errors: tuple = None,
*args,
**kwargs
) -> tuple:
"""
A wrapper method for :meth:`~requests.Session.request``, which adds some defaults and logging
:param url: The URL to send the reply to
:param method: The method to use
:param raise_for_status: Should an exception be raised for a failed response. Default is **True**
:param args: Additional args to be sent to the request
:param kwargs: Additional args to be sent to the request
:return: Dict of response body or original :class:`requests.Response`
"""
session = kwargs.get("session", requests.Session())
log.debug(
"sending a %s request to %s with args: %s kwargs: %s",
method.upper(),
url,
args,
kwargs,
)
rsp = session.request(method, url, *args, **kwargs)
log.debug("response: %s", rsp.text)
errors = None
if raise_for_status:
try:
rsp.raise_for_status()
except requests.RequestException as e:
if e.response is not None:
rsp = e.response
if path_to_errors:
try:
errors = rsp.json()
for arg in path_to_errors:
if errors.get(arg):
errors = errors[arg]
except json.decoder.JSONDecodeError:
errors = [rsp.text]
else:
errors = [rsp.text]
if not isinstance(errors, list):
errors = [errors]
else:
rsp = None
errors = [str(e)]
log.debug("errors when trying to access %s: %s", url, errors)
log.debug("returning response %s, errors %s", rsp, errors)
return rsp, errors
|
A wrapper method for :meth:`~requests.Session.request``, which adds some defaults and logging
:param url: The URL to send the reply to
:param method: The method to use
:param raise_for_status: Should an exception be raised for a failed response. Default is **True**
:param args: Additional args to be sent to the request
:param kwargs: Additional args to be sent to the request
:return: Dict of response body or original :class:`requests.Response`
|
def p_bound_terminal(self, p):
"""bound_terminal : unbound_terminal"""
if p[1][0].literal in ['*', '**']:
p[0] = [_Segment(_BINDING, '$%d' % self.binding_var_count),
p[1][0],
_Segment(_END_BINDING, '')]
self.binding_var_count += 1
else:
p[0] = p[1]
|
bound_terminal : unbound_terminal
|
def prepare_video_params(self, title=None, tags='Others', description='',
copyright_type='original', public_type='all',
category=None, watch_password=None,
latitude=None, longitude=None, shoot_time=None
):
""" util method for create video params to upload.
Only need to provide a minimum of two essential parameters:
title and tags, other video params are optional. All params spec
see: http://cloud.youku.com/docs?id=110#create .
Args:
title: string, 2-50 characters.
tags: string, 1-10 tags joind with comma.
description: string, less than 2000 characters.
copyright_type: string, 'original' or 'reproduced'
public_type: string, 'all' or 'friend' or 'password'
watch_password: string, if public_type is password.
latitude: double.
longitude: double.
shoot_time: datetime.
Returns:
dict params that upload/create method need.
"""
params = {}
if title is None:
title = self.file_name
elif len(title) > 80:
title = title[:80]
if len(description) > 2000:
description = description[0:2000]
params['title'] = title
params['tags'] = tags
params['description'] = description
params['copyright_type'] = copyright_type
params['public_type'] = public_type
if category:
params['category'] = category
if watch_password:
params['watch_password'] = watch_password
if latitude:
params['latitude'] = latitude
if longitude:
params['longitude'] = longitude
if shoot_time:
params['shoot_time'] = shoot_time
return params
|
util method for create video params to upload.
Only need to provide a minimum of two essential parameters:
title and tags, other video params are optional. All params spec
see: http://cloud.youku.com/docs?id=110#create .
Args:
title: string, 2-50 characters.
tags: string, 1-10 tags joind with comma.
description: string, less than 2000 characters.
copyright_type: string, 'original' or 'reproduced'
public_type: string, 'all' or 'friend' or 'password'
watch_password: string, if public_type is password.
latitude: double.
longitude: double.
shoot_time: datetime.
Returns:
dict params that upload/create method need.
|
def detect_fts(conn, table):
"Detect if table has a corresponding FTS virtual table and return it"
rows = conn.execute(detect_fts_sql(table)).fetchall()
if len(rows) == 0:
return None
else:
return rows[0][0]
|
Detect if table has a corresponding FTS virtual table and return it
|
def generate(self):
'''
Generate noise samples.
Returns:
`np.ndarray` of samples.
'''
sampled_arr = np.zeros((self.__batch_size, self.__channel, self.__seq_len, self.__dim))
for batch in range(self.__batch_size):
for i in range(len(self.__program_list)):
program_key = self.__program_list[i]
key = np.random.randint(low=0, high=len(self.__midi_df_list))
midi_df = self.__midi_df_list[key]
midi_df = midi_df[midi_df.program == program_key]
if midi_df.shape[0] < self.__seq_len:
continue
row = np.random.uniform(
low=midi_df.start.min(),
high=midi_df.end.max() - (self.__seq_len * self.__time_fraction)
)
for seq in range(self.__seq_len):
start = row + (seq * self.__time_fraction)
end = row + ((seq+1) * self.__time_fraction)
df = midi_df[(start <= midi_df.start) & (midi_df.start <= end)]
sampled_arr[batch, i, seq] = self.__convert_into_feature(df)
return sampled_arr
|
Generate noise samples.
Returns:
`np.ndarray` of samples.
|
def fields(cls):
"""
Return the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
|
Return the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
|
def fix_music(file_name):
'''
Searches for '.mp3' files in directory (optionally recursive)
and checks whether they already contain album art and album name tags or not.
'''
setup()
if not Py3:
file_name = file_name.encode('utf-8')
tags = File(file_name)
log.log(file_name)
log.log('> Adding metadata')
try:
artist, album, song_name, lyrics, match_bool, score = get_details_spotify(
file_name) # Try finding details through spotify
except Exception:
artist, album, song_name, lyrics, match_bool, score = get_details_letssingit(
file_name) # Use bad scraping method as last resort
try:
log.log_indented('* Trying to extract album art from Google.com')
albumart = albumsearch.img_search_google(artist+' '+album)
except Exception:
log.log_indented('* Trying to extract album art from Bing.com')
albumart = albumsearch.img_search_bing(artist+' '+album)
if match_bool:
add_albumart(albumart, file_name)
add_details(file_name, song_name, artist, album, lyrics)
try:
rename(file_name, artist+' - '+song_name+'.mp3')
except Exception:
log.log_error("Couldn't rename file")
pass
else:
log.log_error(
"* Couldn't find appropriate details of your song", indented=True)
log.log("Match score: %s/10.0" % round(score * 10, 1))
log.log(LOG_LINE_SEPERATOR)
log.log_success()
|
Searches for '.mp3' files in directory (optionally recursive)
and checks whether they already contain album art and album name tags or not.
|
def wait_until_finished(
self, refresh_period=DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD
):
"""Wait until a task instance with the given UUID is finished.
Args:
refresh_period (int, optional): How many seconds to wait
before checking the task's status. Defaults to 5
seconds.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
This task instance model after it finished.
"""
return self.manager.wait_until_finished(
uuid=self.uuid, refresh_period=refresh_period
)
|
Wait until a task instance with the given UUID is finished.
Args:
refresh_period (int, optional): How many seconds to wait
before checking the task's status. Defaults to 5
seconds.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
This task instance model after it finished.
|
def refresh(self):
"""
explicitely refresh user interface; useful when changing widgets dynamically
"""
logger.debug("refresh user interface")
try:
with self.refresh_lock:
self.draw_screen()
except AssertionError:
logger.warning("application is not running")
pass
|
explicitely refresh user interface; useful when changing widgets dynamically
|
def from_path(cls, path, suffix=''):
"""
Convenience method to run critic2 analysis on a folder containing
typical VASP output files.
This method will:
1. Look for files CHGCAR, AECAR0, AECAR2, POTCAR or their gzipped
counterparts.
2. If AECCAR* files are present, constructs a temporary reference
file as AECCAR0 + AECCAR2
3. Runs critic2 analysis twice: once for charge, and a second time
for the charge difference (magnetization density).
:param path: path to folder to search in
:param suffix: specific suffix to look for (e.g. '.relax1' for
'CHGCAR.relax1.gz')
:return:
"""
def _get_filepath(filename, warning, path=path, suffix=suffix):
paths = glob.glob(os.path.join(path, filename + suffix + '*'))
if not paths:
warnings.warn(warning)
return None
if len(paths) > 1:
# using reverse=True because, if multiple files are present,
# they likely have suffixes 'static', 'relax', 'relax2', etc.
# and this would give 'static' over 'relax2' over 'relax'
# however, better to use 'suffix' kwarg to avoid this!
paths.sort(reverse=True)
warnings.warn('Multiple files detected, using {}'.format(os.path.basename(path)))
path = paths[0]
return path
chgcar_path = _get_filepath('CHGCAR', 'Could not find CHGCAR!')
chgcar = Chgcar.from_file(chgcar_path)
aeccar0_path = _get_filepath('AECCAR0', 'Could not find AECCAR0, interpret Bader results with caution.')
aeccar0 = Chgcar.from_file(aeccar0_path) if aeccar0_path else None
aeccar2_path = _get_filepath('AECCAR2', 'Could not find AECCAR2, interpret Bader results with caution.')
aeccar2 = Chgcar.from_file(aeccar2_path) if aeccar2_path else None
chgcar_ref = aeccar0.linear_add(aeccar2) if (aeccar0 and aeccar2) else None
return cls(chgcar.structure, chgcar, chgcar_ref)
|
Convenience method to run critic2 analysis on a folder containing
typical VASP output files.
This method will:
1. Look for files CHGCAR, AECAR0, AECAR2, POTCAR or their gzipped
counterparts.
2. If AECCAR* files are present, constructs a temporary reference
file as AECCAR0 + AECCAR2
3. Runs critic2 analysis twice: once for charge, and a second time
for the charge difference (magnetization density).
:param path: path to folder to search in
:param suffix: specific suffix to look for (e.g. '.relax1' for
'CHGCAR.relax1.gz')
:return:
|
def close_event(self, id, **kwargs): # noqa: E501
"""Close a specific event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.close_event(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerEvent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.close_event_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.close_event_with_http_info(id, **kwargs) # noqa: E501
return data
|
Close a specific event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.close_event(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerEvent
If the method is called asynchronously,
returns the request thread.
|
def get_binary_property(value, is_bytes=False):
"""Get `BINARY` property."""
obj = unidata.ascii_binary if is_bytes else unidata.unicode_binary
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['binary'].get(negated, negated)
else:
value = unidata.unicode_alias['binary'].get(value, value)
return obj[value]
|
Get `BINARY` property.
|
def outgoing_args(self, nodeid):
"""
Return the arguments going from *nodeid* to other predications.
Valid arguments include regular variable arguments and scopal
(label-selecting or HCONS) arguments. MOD/EQ
links, intrinsic arguments, and constant arguments are not
included.
Args:
nodeid: the nodeid of the EP that is the arguments' source
Returns:
dict: `{role: tgt}`
"""
_vars = self._vars
_hcons = self._hcons
args = self.args(nodeid) # args is a copy; we can edit it
for arg, val in list(args.items()):
# don't include constant args or intrinsic args
if arg == IVARG_ROLE or val not in _vars:
del args[arg]
else:
refs = _vars[val]['refs']
# don't include if not HCONS or pointing to other IV or LBL
if not (val in _hcons or IVARG_ROLE in refs or 'LBL' in refs):
del args[arg]
return args
|
Return the arguments going from *nodeid* to other predications.
Valid arguments include regular variable arguments and scopal
(label-selecting or HCONS) arguments. MOD/EQ
links, intrinsic arguments, and constant arguments are not
included.
Args:
nodeid: the nodeid of the EP that is the arguments' source
Returns:
dict: `{role: tgt}`
|
def console_get_char(con: tcod.console.Console, x: int, y: int) -> int:
"""Return the character at the x,y of this console.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.ch`.
"""
return lib.TCOD_console_get_char(_console(con), x, y)
|
Return the character at the x,y of this console.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.ch`.
|
def gtd7(Input, flags, output):
'''The standard model subroutine (GTD7) always computes the
‘‘thermospheric’’ mass density by explicitly summing the masses of
the species in equilibrium at the thermospheric temperature T(z).
'''
mn3 = 5
zn3 = [32.5,20.0,15.0,10.0,0.0]
mn2 = 4
zn2 = [72.5,55.0,45.0,32.5]
zmix = 62.5
soutput = nrlmsise_output()
tselec(flags);
#/* Latitude variation of gravity (none for sw[2]=0) */
xlat=Input.g_lat;
if (flags.sw[2]==0): # pragma: no cover
xlat=45.0;
glatf(xlat, gsurf, re);
xmm = pdm[2][4];
#/* THERMOSPHERE / MESOSPHERE (above zn2[0]) */
if (Input.alt>zn2[0]):
altt=Input.alt;
else:
altt=zn2[0];
tmp=Input.alt;
Input.alt=altt;
gts7(Input, flags, soutput);
altt=Input.alt;
Input.alt=tmp;
if (flags.sw[0]): # pragma: no cover #/* metric adjustment */
dm28m= dm28*1.0E6;
else:
dm28m = dm28;
output.t[0]=soutput.t[0];
output.t[1]=soutput.t[1];
if (Input.alt>=zn2[0]):
for i in range(9):
output.d[i]=soutput.d[i];
return
#/* LOWER MESOSPHERE/UPPER STRATOSPHERE (between zn3[0] and zn2[0])
#* Temperature at nodes and gradients at end nodes
#* Inverse temperature a linear function of spherical harmonics
#*/
meso_tgn2[0]=meso_tgn1[1];
meso_tn2[0]=meso_tn1[4];
meso_tn2[1]=pma[0][0]*pavgm[0]/(1.0-flags.sw[20]*glob7s(pma[0], Input, flags));
meso_tn2[2]=pma[1][0]*pavgm[1]/(1.0-flags.sw[20]*glob7s(pma[1], Input, flags));
meso_tn2[3]=pma[2][0]*pavgm[2]/(1.0-flags.sw[20]*flags.sw[22]*glob7s(pma[2], Input, flags));
meso_tgn2[1]=pavgm[8]*pma[9][0]*(1.0+flags.sw[20]*flags.sw[22]*glob7s(pma[9], Input, flags))*meso_tn2[3]*meso_tn2[3]/(pow((pma[2][0]*pavgm[2]),2.0));
meso_tn3[0]=meso_tn2[3];
if (Input.alt<zn3[0]):
#/* LOWER STRATOSPHERE AND TROPOSPHERE (below zn3[0])
#* Temperature at nodes and gradients at end nodes
#* Inverse temperature a linear function of spherical harmonics
#*/
meso_tgn3[0]=meso_tgn2[1];
meso_tn3[1]=pma[3][0]*pavgm[3]/(1.0-flags.sw[22]*glob7s(pma[3], Input, flags));
meso_tn3[2]=pma[4][0]*pavgm[4]/(1.0-flags.sw[22]*glob7s(pma[4], Input, flags));
meso_tn3[3]=pma[5][0]*pavgm[5]/(1.0-flags.sw[22]*glob7s(pma[5], Input, flags));
meso_tn3[4]=pma[6][0]*pavgm[6]/(1.0-flags.sw[22]*glob7s(pma[6], Input, flags));
meso_tgn3[1]=pma[7][0]*pavgm[7]*(1.0+flags.sw[22]*glob7s(pma[7], Input, flags)) *meso_tn3[4]*meso_tn3[4]/(pow((pma[6][0]*pavgm[6]),2.0));
#/* LINEAR TRANSITION TO FULL MIXING BELOW zn2[0] */
dmc=0;
if (Input.alt>zmix):
dmc = 1.0 - (zn2[0]-Input.alt)/(zn2[0] - zmix);
dz28=soutput.d[2];
#/**** N2 density ****/
dmr=soutput.d[2] / dm28m - 1.0;
tz = [0.0]
output.d[2]=densm(Input.alt,dm28m,xmm, tz, mn3, zn3, meso_tn3, meso_tgn3, mn2, zn2, meso_tn2, meso_tgn2);
output.d[2]=output.d[2] * (1.0 + dmr*dmc);
#/**** HE density ****/
dmr = soutput.d[0] / (dz28 * pdm[0][1]) - 1.0;
output.d[0] = output.d[2] * pdm[0][1] * (1.0 + dmr*dmc);
#/**** O density ****/
output.d[1] = 0;
output.d[8] = 0;
#/**** O2 density ****/
dmr = soutput.d[3] / (dz28 * pdm[3][1]) - 1.0;
output.d[3] = output.d[2] * pdm[3][1] * (1.0 + dmr*dmc);
#/**** AR density ***/
dmr = soutput.d[4] / (dz28 * pdm[4][1]) - 1.0;
output.d[4] = output.d[2] * pdm[4][1] * (1.0 + dmr*dmc);
#/**** Hydrogen density ****/
output.d[6] = 0;
#/**** Atomic nitrogen density ****/
output.d[7] = 0;
#/**** Total mass density */
output.d[5] = 1.66E-24 * (4.0 * output.d[0] + 16.0 * output.d[1] + 28.0 * output.d[2] + 32.0 * output.d[3] + 40.0 * output.d[4] + output.d[6] + 14.0 * output.d[7]);
if (flags.sw[0]): # pragma: no cover
output.d[5]=output.d[5]/1000;
#/**** temperature at altitude ****/
global dd
dd = densm(Input.alt, 1.0, 0, tz, mn3, zn3, meso_tn3, meso_tgn3, mn2, zn2, meso_tn2, meso_tgn2);
output.t[1]=tz[0];
return
|
The standard model subroutine (GTD7) always computes the
‘‘thermospheric’’ mass density by explicitly summing the masses of
the species in equilibrium at the thermospheric temperature T(z).
|
def calculate_sunrise_sunset(self, month, day, depression=0.833,
is_solar_time=False):
"""Calculate sunrise, noon and sunset.
Return:
A dictionary. Keys are ("sunrise", "noon", "sunset")
"""
datetime = DateTime(month, day, hour=12, leap_year=self.is_leap_year)
return self.calculate_sunrise_sunset_from_datetime(datetime,
depression,
is_solar_time)
|
Calculate sunrise, noon and sunset.
Return:
A dictionary. Keys are ("sunrise", "noon", "sunset")
|
def parse_pattern(pattern):
"""Parse number format patterns"""
if isinstance(pattern, NumberPattern):
return pattern
def _match_number(pattern):
rv = number_re.search(pattern)
if rv is None:
raise ValueError('Invalid number pattern %r' % pattern)
return rv.groups()
pos_pattern = pattern
# Do we have a negative subpattern?
if ';' in pattern:
pos_pattern, neg_pattern = pattern.split(';', 1)
pos_prefix, number, pos_suffix = _match_number(pos_pattern)
neg_prefix, _, neg_suffix = _match_number(neg_pattern)
else:
pos_prefix, number, pos_suffix = _match_number(pos_pattern)
neg_prefix = '-' + pos_prefix
neg_suffix = pos_suffix
if 'E' in number:
number, exp = number.split('E', 1)
else:
exp = None
if '@' in number:
if '.' in number and '0' in number:
raise ValueError('Significant digit patterns can not contain '
'"@" or "0"')
if '.' in number:
integer, fraction = number.rsplit('.', 1)
else:
integer = number
fraction = ''
def parse_precision(p):
"""Calculate the min and max allowed digits"""
min = max = 0
for c in p:
if c in '@0':
min += 1
max += 1
elif c == '#':
max += 1
elif c == ',':
continue
else:
break
return min, max
int_prec = parse_precision(integer)
frac_prec = parse_precision(fraction)
if exp:
exp_plus = exp.startswith('+')
exp = exp.lstrip('+')
exp_prec = parse_precision(exp)
else:
exp_plus = None
exp_prec = None
grouping = babel.numbers.parse_grouping(integer)
return NumberPattern(pattern, (pos_prefix, neg_prefix),
(pos_suffix, neg_suffix), grouping,
int_prec, frac_prec,
exp_prec, exp_plus)
|
Parse number format patterns
|
def enviar_dados_venda(self, dados_venda):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.enviar_dados_venda`.
:return: Uma resposta SAT especializada em ``EnviarDadosVenda``.
:rtype: satcfe.resposta.enviardadosvenda.RespostaEnviarDadosVenda
"""
resp = self._http_post('enviardadosvenda',
dados_venda=dados_venda.documento())
conteudo = resp.json()
return RespostaEnviarDadosVenda.analisar(conteudo.get('retorno'))
|
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.enviar_dados_venda`.
:return: Uma resposta SAT especializada em ``EnviarDadosVenda``.
:rtype: satcfe.resposta.enviardadosvenda.RespostaEnviarDadosVenda
|
def customCompute(self, recordNum, patternNZ, classification):
"""
Just return the inference value from one input sample. The actual
learning happens in compute() -- if, and only if learning is enabled --
which is called when you run the network.
.. warning:: This method is deprecated and exists only to maintain backward
compatibility. This method is deprecated, and will be removed. Use
:meth:`nupic.engine.Network.run` instead, which will call
:meth:`~nupic.regions.sdr_classifier_region.compute`.
:param recordNum: (int) Record number of the input sample.
:param patternNZ: (list) of the active indices from the output below
:param classification: (dict) of the classification information:
* ``bucketIdx``: index of the encoder bucket
* ``actValue``: actual value going into the encoder
:returns: (dict) containing inference results, one entry for each step in
``self.steps``. The key is the number of steps, the value is an
array containing the relative likelihood for each ``bucketIdx``
starting from 0.
For example:
::
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
"""
# If the compute flag has not been initialized (for example if we
# restored a model from an old checkpoint) initialize it to False.
if not hasattr(self, "_computeFlag"):
self._computeFlag = False
if self._computeFlag:
# Will raise an exception if the deprecated method customCompute() is
# being used at the same time as the compute function.
warnings.simplefilter('error', DeprecationWarning)
warnings.warn("The customCompute() method should not be "
"called at the same time as the compute() "
"method. The compute() method is called "
"whenever network.run() is called.",
DeprecationWarning)
return self._sdrClassifier.compute(recordNum,
patternNZ,
classification,
self.learningMode,
self.inferenceMode)
|
Just return the inference value from one input sample. The actual
learning happens in compute() -- if, and only if learning is enabled --
which is called when you run the network.
.. warning:: This method is deprecated and exists only to maintain backward
compatibility. This method is deprecated, and will be removed. Use
:meth:`nupic.engine.Network.run` instead, which will call
:meth:`~nupic.regions.sdr_classifier_region.compute`.
:param recordNum: (int) Record number of the input sample.
:param patternNZ: (list) of the active indices from the output below
:param classification: (dict) of the classification information:
* ``bucketIdx``: index of the encoder bucket
* ``actValue``: actual value going into the encoder
:returns: (dict) containing inference results, one entry for each step in
``self.steps``. The key is the number of steps, the value is an
array containing the relative likelihood for each ``bucketIdx``
starting from 0.
For example:
::
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
|
def clear(self):
'''
Reset the current HyperLogLog to empty.
'''
self.reg = np.zeros((self.m,), dtype=np.int8)
|
Reset the current HyperLogLog to empty.
|
def enable_cloud_integration(self, id, **kwargs): # noqa: E501
"""Enable a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.enable_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.enable_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.enable_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
return data
|
Enable a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.enable_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
|
def get_paths(folder):
'''Return *_phase.txt files in `folder`'''
folder = pathlib.Path(folder).resolve()
files = folder.rglob("*_phase.txt")
return sorted(files)
|
Return *_phase.txt files in `folder`
|
def get_cube(self, name):
""" Given a cube name, construct that cube and return it. Do not
overwrite this method unless you need to. """
return Cube(self.get_engine(), name, self.get_cube_model(name))
|
Given a cube name, construct that cube and return it. Do not
overwrite this method unless you need to.
|
def load_if(s):
"""Load either a filename, or a string representation of yml/json."""
is_data_file = s.endswith('.json') or s.endswith('.yml')
return load(s) if is_data_file else loads(s)
|
Load either a filename, or a string representation of yml/json.
|
def __download_from_s3(self, key, dest_dir):
"""Private method for downloading from S3
This private helper method takes a key and the full path to
the destination directory, assumes that the args have been
validated by the public caller methods, and attempts to
download the specified key to the dest_dir.
:param key: (str) S3 key for the file to be downloaded
:param dest_dir: (str) Full path destination directory
:return: (str) Downloaded file destination if the file was
downloaded successfully, None otherwise.
"""
log = logging.getLogger(self.cls_logger + '.__download_from_s3')
filename = key.split('/')[-1]
if filename is None:
log.error('Could not determine the filename from key: %s', key)
return None
destination = dest_dir + '/' + filename
log.info('Attempting to download %s from bucket %s to destination %s',
key, self.bucket_name, destination)
max_tries = 10
count = 1
while count <= max_tries:
log.info('Attempting to download file %s, try %s of %s', key,
count, max_tries)
try:
self.s3client.download_file(
Bucket=self.bucket_name, Key=key, Filename=destination)
except ClientError:
if count >= max_tries:
_, ex, trace = sys.exc_info()
msg = 'Unable to download key {k} from S3 bucket {b}:\n{e}'.format(
k=key, b=self.bucket_name, e=str(ex))
log.error(msg)
raise S3UtilError, msg, trace
else:
log.warn('Download failed, re-trying...')
count += 1
time.sleep(5)
continue
else:
log.info('Successfully downloaded %s from S3 bucket %s to: %s',
key,
self.bucket_name,
destination)
return destination
|
Private method for downloading from S3
This private helper method takes a key and the full path to
the destination directory, assumes that the args have been
validated by the public caller methods, and attempts to
download the specified key to the dest_dir.
:param key: (str) S3 key for the file to be downloaded
:param dest_dir: (str) Full path destination directory
:return: (str) Downloaded file destination if the file was
downloaded successfully, None otherwise.
|
def plot_projected_dos(self, pdos_indices=None, legend=None):
"""Plot projected DOS
Parameters
----------
pdos_indices : list of list, optional
Sets of indices of atoms whose projected DOS are summed over.
The indices start with 0. An example is as follwos:
pdos_indices=[[0, 1], [2, 3, 4, 5]]
Default is None, which means
pdos_indices=[[i] for i in range(natom)]
legend : list of instances such as str or int, optional
The str(instance) are shown in legend.
It has to be len(pdos_indices)==len(legend). Default is None.
When None, legend is not shown.
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._pdos.plot(ax,
indices=pdos_indices,
legend=legend,
draw_grid=False)
ax.set_ylim((0, None))
return plt
|
Plot projected DOS
Parameters
----------
pdos_indices : list of list, optional
Sets of indices of atoms whose projected DOS are summed over.
The indices start with 0. An example is as follwos:
pdos_indices=[[0, 1], [2, 3, 4, 5]]
Default is None, which means
pdos_indices=[[i] for i in range(natom)]
legend : list of instances such as str or int, optional
The str(instance) are shown in legend.
It has to be len(pdos_indices)==len(legend). Default is None.
When None, legend is not shown.
|
def delete_terms_indexes(es, index_name: str = "terms_*"):
"""Delete all terms indexes"""
try:
es.indices.delete(index=index_name)
except Exception as e:
log.error(f"Could not delete all terms indices: {e}")
|
Delete all terms indexes
|
def header_little_endian(self):
"""Return the header_little_endian attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.HEADER_LITTLE_ENDIAN)
|
Return the header_little_endian attribute of the BFD file being
processed.
|
def drop(self):
"""
Remove this node from the taxonomy, maintaining child subtrees by
adding them to the node's parent, and moving sequences at this node
to the parent.
Not valid for root node.
"""
if self.is_root:
raise ValueError("Cannot drop root node!")
parent = self.parent
for child in self.children:
child.parent = parent
parent.children.add(child)
self.children = set()
parent.sequence_ids.update(self.sequence_ids)
self.sequence_ids = set()
parent.remove_child(self)
|
Remove this node from the taxonomy, maintaining child subtrees by
adding them to the node's parent, and moving sequences at this node
to the parent.
Not valid for root node.
|
def returner(load):
'''
Return data to the local job cache
'''
serial = salt.payload.Serial(__opts__)
# if a minion is returning a standalone job, get a jobid
if load['jid'] == 'req':
load['jid'] = prep_jid(nocache=load.get('nocache', False))
jid_dir = salt.utils.jid.jid_dir(load['jid'], _job_dir(), __opts__['hash_type'])
if os.path.exists(os.path.join(jid_dir, 'nocache')):
return
hn_dir = os.path.join(jid_dir, load['id'])
try:
os.makedirs(hn_dir)
except OSError as err:
if err.errno == errno.EEXIST:
# Minion has already returned this jid and it should be dropped
log.error(
'An extra return was detected from minion %s, please verify '
'the minion, this could be a replay attack', load['id']
)
return False
elif err.errno == errno.ENOENT:
log.error(
'An inconsistency occurred, a job was received with a job id '
'(%s) that is not present in the local cache', load['jid']
)
return False
raise
serial.dump(
dict((key, load[key]) for key in ['return', 'retcode', 'success'] if key in load),
# Use atomic open here to avoid the file being read before it's
# completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, RETURN_P), 'w+b'
)
)
if 'out' in load:
serial.dump(
load['out'],
# Use atomic open here to avoid the file being read before
# it's completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, OUT_P), 'w+b'
)
)
|
Return data to the local job cache
|
def merge_dicts(dict_a, dict_b):
"""Deep merge of two dicts"""
obj = {}
for key, value in iteritems(dict_a):
if key in dict_b:
if isinstance(dict_b[key], dict):
obj[key] = merge_dicts(value, dict_b.pop(key))
else:
obj[key] = value
for key, value in iteritems(dict_b):
obj[key] = value
return obj
|
Deep merge of two dicts
|
def calcSMAfromT(self, epsilon=0.7):
""" Calculates the semi-major axis based on planet temperature
"""
return eq.MeanPlanetTemp(self.albedo(), self.star.T, self.star.R, epsilon, self.T).a
|
Calculates the semi-major axis based on planet temperature
|
def click_text(self, text, exact_match=False):
"""Click text identified by ``text``.
By default tries to click first text involves given ``text``, if you would
like to click exactly matching text, then set ``exact_match`` to `True`.
If there are multiple use of ``text`` and you do not want first one,
use `locator` with `Get Web Elements` instead.
"""
self._element_find_by_text(text,exact_match).click()
|
Click text identified by ``text``.
By default tries to click first text involves given ``text``, if you would
like to click exactly matching text, then set ``exact_match`` to `True`.
If there are multiple use of ``text`` and you do not want first one,
use `locator` with `Get Web Elements` instead.
|
def xor_key(first, second, trafaret):
"""
xor_key - takes `first` and `second` key names and `trafaret`.
Checks if we have only `first` or only `second` in data, not both,
and at least one.
Then checks key value against trafaret.
"""
trafaret = t.Trafaret._trafaret(trafaret)
def check_(value):
if (first in value) ^ (second in value):
key = first if first in value else second
yield first, t.catch_error(trafaret, value[key]), (key,)
elif first in value and second in value:
yield first, t.DataError(error='correct only if {} is not defined'.format(second)), (first,)
yield second, t.DataError(error='correct only if {} is not defined'.format(first)), (second,)
else:
yield first, t.DataError(error='is required if {} is not defined'.format('second')), (first,)
yield second, t.DataError(error='is required if {} is not defined'.format('first')), (second,)
return check_
|
xor_key - takes `first` and `second` key names and `trafaret`.
Checks if we have only `first` or only `second` in data, not both,
and at least one.
Then checks key value against trafaret.
|
def create_inputs(inspecs):
"""Create input :obj:`nnabla.Variable` from :obj:`Inspec`.
Args:
inspecs (:obj:`list` of :obj:`Inspec`): A list of ``Inspec``.
Returns:
:obj:`list` of :obj:`nnabla.Variable`: Input variables.
"""
ret = []
for i in inspecs:
v = nn.Variable(i.shape, need_grad=i.need_grad)
v.d = i.init(v.shape)
ret.append(v)
return ret
|
Create input :obj:`nnabla.Variable` from :obj:`Inspec`.
Args:
inspecs (:obj:`list` of :obj:`Inspec`): A list of ``Inspec``.
Returns:
:obj:`list` of :obj:`nnabla.Variable`: Input variables.
|
def and_join(strings):
"""Join the given ``strings`` by commas with last `' and '` conjuction.
>>> and_join(['Korea', 'Japan', 'China', 'Taiwan'])
'Korea, Japan, China, and Taiwan'
:param strings: a list of words to join
:type string: :class:`collections.abc.Sequence`
:returns: a joined string
:rtype: :class:`str`, :class:`basestring`
"""
last = len(strings) - 1
if last == 0:
return strings[0]
elif last < 0:
return ''
iterator = enumerate(strings)
return ', '.join('and ' + s if i == last else s for i, s in iterator)
|
Join the given ``strings`` by commas with last `' and '` conjuction.
>>> and_join(['Korea', 'Japan', 'China', 'Taiwan'])
'Korea, Japan, China, and Taiwan'
:param strings: a list of words to join
:type string: :class:`collections.abc.Sequence`
:returns: a joined string
:rtype: :class:`str`, :class:`basestring`
|
def add_missing_price_information_message(request, item):
"""
Add a message to the Django messages store indicating that we failed to retrieve price information about an item.
:param request: The current request.
:param item: The item for which price information is missing. Example: a program title, or a course.
"""
messages.warning(
request,
_(
'{strong_start}We could not gather price information for {em_start}{item}{em_end}.{strong_end} '
'{span_start}If you continue to have these issues, please contact '
'{link_start}{platform_name} support{link_end}.{span_end}'
).format(
item=item,
em_start='<em>',
em_end='</em>',
link_start='<a href="{support_link}" target="_blank">'.format(
support_link=get_configuration_value('ENTERPRISE_SUPPORT_URL', settings.ENTERPRISE_SUPPORT_URL),
),
platform_name=get_configuration_value('PLATFORM_NAME', settings.PLATFORM_NAME),
link_end='</a>',
span_start='<span>',
span_end='</span>',
strong_start='<strong>',
strong_end='</strong>',
)
)
|
Add a message to the Django messages store indicating that we failed to retrieve price information about an item.
:param request: The current request.
:param item: The item for which price information is missing. Example: a program title, or a course.
|
def new_address(self, sender=None, nonce=None):
"""Create a fresh 160bit address"""
if sender is not None and nonce is None:
nonce = self.get_nonce(sender)
new_address = self.calculate_new_address(sender, nonce)
if sender is None and new_address in self:
return self.new_address(sender, nonce)
return new_address
|
Create a fresh 160bit address
|
def animate_correlation_matrix(sync_output_dynamic, animation_velocity = 75, colormap = 'cool', save_movie = None):
"""!
@brief Shows animation of correlation matrix between oscillators during simulation.
@param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network.
@param[in] animation_velocity (uint): Interval between frames in milliseconds.
@param[in] colormap (string): Name of colormap that is used by matplotlib ('gray', 'pink', 'cool', spring', etc.).
@param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.
"""
figure = plt.figure()
correlation_matrix = sync_output_dynamic.allocate_correlation_matrix(0)
artist = plt.imshow(correlation_matrix, cmap = plt.get_cmap(colormap), interpolation='kaiser', vmin = 0.0, vmax = 1.0)
def init_frame():
return [ artist ]
def frame_generation(index_dynamic):
correlation_matrix = sync_output_dynamic.allocate_correlation_matrix(index_dynamic)
artist.set_data(correlation_matrix)
return [ artist ]
correlation_animation = animation.FuncAnimation(figure, frame_generation, len(sync_output_dynamic), init_func = init_frame, interval = animation_velocity , repeat_delay = 1000, blit = True)
if (save_movie is not None):
correlation_animation.save(save_movie, writer = 'ffmpeg', fps = 15, bitrate = 1500)
else:
plt.show()
|
!
@brief Shows animation of correlation matrix between oscillators during simulation.
@param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network.
@param[in] animation_velocity (uint): Interval between frames in milliseconds.
@param[in] colormap (string): Name of colormap that is used by matplotlib ('gray', 'pink', 'cool', spring', etc.).
@param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.
|
async def wasSet(self, node, oldv):
'''
Fire the onset() handlers for this property.
Args:
node (synapse.lib.node.Node): The node whose property was set.
oldv (obj): The previous value of the property.
'''
for func in self.onsets:
try:
await s_coro.ornot(func, node, oldv)
except asyncio.CancelledError:
raise
except Exception:
logger.exception('onset() error for %s' % (self.full,))
|
Fire the onset() handlers for this property.
Args:
node (synapse.lib.node.Node): The node whose property was set.
oldv (obj): The previous value of the property.
|
def _make_cache_key(key_prefix):
"""Make cache key from prefix
Borrowed from Flask-Cache extension
"""
if callable(key_prefix):
cache_key = key_prefix()
elif '%s' in key_prefix:
cache_key = key_prefix % request.path
else:
cache_key = key_prefix
cache_key = cache_key.encode('utf-8')
return cache_key
|
Make cache key from prefix
Borrowed from Flask-Cache extension
|
def fig_kernel_lfp_EITN_II(savefolders, params, transient=200, T=[800., 1000.], X='L5E',
lags=[20, 20], channels=[0,3,7,11,13]):
'''
This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels.
Arguments
::
transient : the time in milliseconds, after which the analysis should begin
so as to avoid any starting transients
X : id of presynaptic trigger population
'''
# Electrode geometry
zvec = np.r_[params.electrodeParams['z']]
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ana_params.set_PLOS_2column_fig_style(ratio=0.5)
# Start the figure
fig = plt.figure()
fig.subplots_adjust(left=0.06, right=0.95, bottom=0.08, top=0.9, hspace=0.23, wspace=0.55)
# create grid_spec
gs = gridspec.GridSpec(len(channels), 7)
###########################################################################
# spikegen "network" activity
############################################################################
# path to simulation files
savefolder = 'simulation_output_spikegen'
params.savefolder = os.path.join(os.path.split(params.savefolder)[0],
savefolder)
params.figures_path = os.path.join(params.savefolder, 'figures')
params.spike_output_path = os.path.join(params.savefolder,
'processed_nest_output')
params.networkSimParams['spike_output_path'] = params.spike_output_path
# Get the spikegen LFP:
f = h5py.File(os.path.join('simulation_output_spikegen', 'LFPsum.h5'))
srate = f['srate'].value
tvec = np.arange(f['data'].shape[1]) * 1000. / srate
# slice
inds = (tvec < params.tstop) & (tvec >= transient)
data_sg_raw = f['data'].value.astype(float)
data_sg = data_sg_raw[:, inds]
f.close()
# kernel width
kwidth = 20
# create some dummy spike times
activationtimes = np.array([x*100 for x in range(3,11)] + [200])
networkSimSpikegen = CachedNetwork(**params.networkSimParams)
x, y = networkSimSpikegen.get_xy([transient, params.tstop])
############################################################################
## Part A: spatiotemporal kernels, all presynaptic populations
#############################################################################
#
#titles = ['TC',
# 'L23E/I',
# 'LFP kernels \n L4E/I',
# 'L5E/I',
# 'L6E/I',
# ]
#
#COUNTER = 0
#for i, X__ in enumerate(([['TC']]) + zip(params.X[1::2], params.X[2::2])):
# ax = fig.add_subplot(gs[:len(channels), i])
# if i == 0:
# phlp.annotate_subplot(ax, ncols=7, nrows=4, letter=alphabet[0], linear_offset=0.02)
#
# for j, X_ in enumerate(X__):
# # create spikegen histogram for population Y
# cinds = np.arange(activationtimes[np.arange(-1, 8)][COUNTER]-kwidth,
# activationtimes[np.arange(-1, 8)][COUNTER]+kwidth+2)
# x0_sg = np.histogram(x[X_], bins=cinds)[0].astype(float)
#
# if X_ == ('TC'):
# color='r'
# else:
# color=('r', 'b')[j]
#
#
# # plot kernel as correlation of spikegen LFP signal with delta spike train
# xcorr, vlimround = plotting_correlation(params,
# x0_sg/x0_sg.sum()**2,
# data_sg_raw[:, cinds[:-1]]*1E3,
# ax, normalize=False,
# lag=kwidth,
# color=color,
# scalebar=False)
# if i > 0:
# ax.set_yticklabels([])
#
# ## Create scale bar
# ax.plot([kwidth, kwidth],
# [-1500 + j*3*100, -1400 + j*3*100], lw=2, color=color,
# clip_on=False)
# ax.text(kwidth*1.08, -1450 + j*3*100, '%.1f $\mu$V' % vlimround,
# rotation='vertical', va='center')
#
# ax.set_xlim((-5, kwidth))
# ax.set_xticks([-20, 0, 20])
# ax.set_xticklabels([-20, 0, 20])
#
# COUNTER += 1
#
# ax.set_title(titles[i])
################################################
# Iterate over savefolders
################################################
for i, (savefolder, lag) in enumerate(zip(savefolders, lags)):
# path to simulation files
params.savefolder = os.path.join(os.path.split(params.savefolder)[0],
savefolder)
params.figures_path = os.path.join(params.savefolder, 'figures')
params.spike_output_path = os.path.join(params.savefolder,
'processed_nest_output')
params.networkSimParams['spike_output_path'] = params.spike_output_path
#load spike as database inside function to avoid buggy behaviour
networkSim = CachedNetwork(**params.networkSimParams)
# Get the Compound LFP: LFPsum : data[nchannels, timepoints ]
f = h5py.File(os.path.join(params.savefolder, 'LFPsum.h5'))
data_raw = f['data'].value
srate = f['srate'].value
tvec = np.arange(data_raw.shape[1]) * 1000. / srate
# slice
inds = (tvec < params.tstop) & (tvec >= transient)
data = data_raw[:, inds]
# subtract mean
dataT = data.T - data.mean(axis=1)
data = dataT.T
f.close()
# Get the spikegen LFP:
f = h5py.File(os.path.join('simulation_output_spikegen', 'LFPsum.h5'))
data_sg_raw = f['data'].value
f.close()
#
#
#
#
#########################################################################
## Part B: STA LFP
#########################################################################
#
#titles = ['staLFP(%s)\n(spont.)' % X, 'staLFP(%s)\n(AC. mod.)' % X]
#ax = fig.add_subplot(gs[:len(channels), 5 + i])
#if i == 0:
# phlp.annotate_subplot(ax, ncols=15, nrows=4, letter=alphabet[i+1],
# linear_offset=0.02)
#
#collect the spikes x is the times, y is the id of the cell.
x, y = networkSim.get_xy([0,params.tstop])
#
## Get the spikes for the population of interest given as 'Y'
bins = np.arange(0, params.tstop+2) + 0.5
x0_raw = np.histogram(x[X], bins=bins)[0]
x0 = x0_raw[inds].astype(float)
#
## correlation between firing rate and LFP deviation
## from mean normalized by the number of spikes
#xcorr, vlimround = plotting_correlation(params,
# x0/x0.sum(),
# data*1E3,
# ax, normalize=False,
# #unit='%.3f mV',
# lag=lag,
# scalebar=False,
# color='k',
# title=titles[i],
# )
#
## Create scale bar
#ax.plot([lag, lag],
# [-1500, -1400], lw=2, color='k',
# clip_on=False)
#ax.text(lag*1.08, -1450, '%.1f $\mu$V' % vlimround,
# rotation='vertical', va='center')
#
#
#[Xind] = np.where(np.array(networkSim.X) == X)[0]
#
## create spikegen histogram for population Y
#x0_sg = np.zeros(x0.shape, dtype=float)
#x0_sg[activationtimes[Xind]] += params.N_X[Xind]
#
#
#ax.set_yticklabels([])
#ax.set_xticks([-lag, 0, lag])
#ax.set_xticklabels([-lag, 0, lag])
###########################################################################
# Part C, F: LFP and reconstructed LFP
############################################################################
# create grid_spec
gsb = gridspec.GridSpec(len(channels), 8)
ax = fig.add_subplot(gsb[:, (i*4):(i*4+2)])
phlp.annotate_subplot(ax, ncols=8/2., nrows=4, letter=alphabet[i*3+2],
linear_offset=0.02)
# extract kernels, force negative lags to be zero
kernels = np.zeros((len(params.N_X), 16, kwidth*2))
for j in range(len(params.X)):
kernels[j, :, kwidth:] = data_sg_raw[:, (j+2)*100:kwidth+(j+2)*100]/params.N_X[j]
LFP_reconst_raw = np.zeros(data_raw.shape)
for j, pop in enumerate(params.X):
x0_raw = np.histogram(x[pop], bins=bins)[0].astype(float)
for ch in range(kernels.shape[1]):
LFP_reconst_raw[ch] += np.convolve(x0_raw, kernels[j, ch],
'same')
# slice
LFP_reconst = LFP_reconst_raw[:, inds]
# subtract mean
LFP_reconstT = LFP_reconst.T - LFP_reconst.mean(axis=1)
LFP_reconst = LFP_reconstT.T
vlimround = plot_signal_sum(ax, params,
fname=os.path.join(params.savefolder,
'LFPsum.h5'),
unit='mV', scalebar=True,
T=T, ylim=[-1550, 50],
color='k', label='$real$',
rasterized=False)
plot_signal_sum(ax, params, fname=LFP_reconst_raw,
unit='mV', scaling_factor= 1., scalebar=False,
vlimround=vlimround,
T=T, ylim=[-1550, 50],
color='r', label='$reconstr$',
rasterized=False)
ax.set_title('LFP & population \n rate predictor')
if i > 0:
ax.set_yticklabels([])
###########################################################################
# Part D,G: Correlation coefficient
############################################################################
ax = fig.add_subplot(gsb[:, i*4+2:i*4+3])
phlp.remove_axis_junk(ax)
phlp.annotate_subplot(ax, ncols=8./1, nrows=4, letter=alphabet[i*3+3],
linear_offset=0.02)
cc = np.zeros(len(zvec))
for ch in np.arange(len(zvec)):
cc[ch] = np.corrcoef(data[ch], LFP_reconst[ch])[1, 0]
ax.barh(zvec, cc, height=90, align='center', color='1', linewidth=0.5)
ax.set_ylim([-1550, 50])
ax.set_yticklabels([])
ax.set_yticks(zvec)
ax.set_xlim([0.0, 1.])
ax.set_xticks([0.0, 0.5, 1])
ax.yaxis.tick_left()
ax.set_xlabel('$cc$ (-)', labelpad=0.1)
ax.set_title('corr. \n coef.')
print 'correlation coefficients:'
print cc
###########################################################################
# Part E,H: Power spectra
############################################################################
#compute PSDs ratio between ground truth and estimate
freqs, PSD_data = calc_signal_power(params, fname=data,
transient=transient, Df=None, mlab=True,
NFFT=256, noverlap=128,
window=plt.mlab.window_hanning)
freqs, PSD_LFP_reconst = calc_signal_power(params, fname=LFP_reconst,
transient=transient, Df=None, mlab=True,
NFFT=256, noverlap=128,
window=plt.mlab.window_hanning)
zv = np.r_[params.electrodeParams['z']]
zv = np.r_[zv, zv[-1] + np.diff(zv)[-1]]
inds = freqs >= 1 # frequencies greater than 1 Hz
for j, ch in enumerate(channels):
ax = fig.add_subplot(gsb[j, (i*4+3):(i*4+4)])
if j == 0:
phlp.annotate_subplot(ax, ncols=8./1, nrows=4.5*len(channels),
letter=alphabet[i*3+4], linear_offset=0.02)
ax.set_title('PSD')
phlp.remove_axis_junk(ax)
ax.loglog(freqs[inds], PSD_data[ch, inds], 'k', label='LFP', clip_on=True)
ax.loglog(freqs[inds], PSD_LFP_reconst[ch, inds], 'r', label='predictor', clip_on=True)
ax.set_xlim([4E0,4E2])
ax.set_ylim([1E-8, 1E-4])
ax.tick_params(axis='y', which='major', pad=0)
ax.set_yticks([1E-8,1E-6,1E-4])
ax.yaxis.set_minor_locator(plt.NullLocator())
ax.text(0.8, 0.9, 'ch. %i' % (ch+1),
horizontalalignment='left',
verticalalignment='center',
fontsize=6,
transform=ax.transAxes)
if j == 0:
ax.set_ylabel('(mV$^2$/Hz)', labelpad=0.)
if j > 0:
ax.set_yticklabels([])
if j == len(channels)-1:
ax.set_xlabel(r'$f$ (Hz)', labelpad=0.)
else:
ax.set_xticklabels([])
return fig, PSD_LFP_reconst, PSD_data
|
This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels.
Arguments
::
transient : the time in milliseconds, after which the analysis should begin
so as to avoid any starting transients
X : id of presynaptic trigger population
|
def log_to_ganttplot(execution_history_items):
"""
Example how to use the DataFrame representation
"""
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import numpy as np
d = log_to_DataFrame(execution_history_items)
# de-duplicate states and make mapping from state to idx
unique_states, idx = np.unique(d.path_by_name, return_index=True)
ordered_unique_states = np.array(d.path_by_name)[np.sort(idx)]
name2idx = {k: i for i, k in enumerate(ordered_unique_states)}
calldate = dates.date2num(d.timestamp_call.dt.to_pydatetime())
returndate = dates.date2num(d.timestamp_return.dt.to_pydatetime())
state2color = {'HierarchyState': 'k',
'ExecutionState': 'g',
'BarrierConcurrencyState': 'y',
'PreemptiveConcurrencyState': 'y'}
fig, ax = plt.subplots(1, 1)
ax.barh(bottom=[name2idx[k] for k in d.path_by_name], width=returndate-calldate,
left=calldate, align='center', color=[state2color[s] for s in d.state_type], lw=0.0)
plt.yticks(list(range(len(ordered_unique_states))), ordered_unique_states)
|
Example how to use the DataFrame representation
|
def AddArg(self, arg):
"""Adds a new arg to this expression.
Args:
arg: The argument to add (string).
Returns:
True if this arg is the last arg, False otherwise.
Raises:
ParseError: If there are too many args.
"""
self.args.append(arg)
if len(self.args) > self.number_of_args:
raise ParseError("Too many args for this expression.")
elif len(self.args) == self.number_of_args:
return True
return False
|
Adds a new arg to this expression.
Args:
arg: The argument to add (string).
Returns:
True if this arg is the last arg, False otherwise.
Raises:
ParseError: If there are too many args.
|
def get_area(self):
"""
Compute area as the sum of the mesh cells area values.
"""
mesh = self.mesh
_, _, _, area = mesh.get_cell_dimensions()
return numpy.sum(area)
|
Compute area as the sum of the mesh cells area values.
|
def find_prefix(self, iri: Union[URIRef, Literal, str]) -> Union[None, str]:
""" Finds if uri is in common_namespaces
Auto adds prefix if incoming iri has a uri in common_namespaces. If its not in the local
library, then it will just be spit out as the iri and not saved/condensed into qualified
names.
The reason for the maxes is find the longest string match. This is to avoid accidently
matching iris with small uris when really is a more complete uri that is the match.
Args: iri: iri to be searched to find a known uri in it.
Eample:
In [1]: print(find_prefix("http://www.w3.org/2000/01/rdf-schema#label"))
Out [1]: "http://www.w3.org/2000/01/rdf-schema#"
In [2]: print(find_prefix("http://made_up_uri/label"))
Out [2]: None
"""
iri = str(iri)
max_iri_len = 0
max_prefix = None
for prefix, uri in common_namespaces.items():
if uri in iri and max_iri_len < len(uri): # if matched uri is larger; replace as king
max_prefix = prefix
max_iri_len = len(uri)
return max_prefix
|
Finds if uri is in common_namespaces
Auto adds prefix if incoming iri has a uri in common_namespaces. If its not in the local
library, then it will just be spit out as the iri and not saved/condensed into qualified
names.
The reason for the maxes is find the longest string match. This is to avoid accidently
matching iris with small uris when really is a more complete uri that is the match.
Args: iri: iri to be searched to find a known uri in it.
Eample:
In [1]: print(find_prefix("http://www.w3.org/2000/01/rdf-schema#label"))
Out [1]: "http://www.w3.org/2000/01/rdf-schema#"
In [2]: print(find_prefix("http://made_up_uri/label"))
Out [2]: None
|
def replace(self, name, newname):
"""
Replace all occurrences of name with newname
"""
if not re.match("[a-zA-Z]\w*", name):
return None
if not re.match("[a-zA-Z]\w*", newname):
return None
def _replace(match):
return match.group(0).replace(match.group('name'), newname)
pattern = re.compile("(\W|^)(?P<name>" + name + ")(\W|$)")
cut = re.sub(pattern, _replace, str(self))
return Cut(cut)
|
Replace all occurrences of name with newname
|
def columns_used(self):
"""
Returns all the columns used in this model for filtering
and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.model_expression))))
|
Returns all the columns used in this model for filtering
and in the model expression.
|
def register_on_serial_port_changed(self, callback):
"""Set the callback function to consume on serial port changed events.
Callback receives a ISerialPortChangedEvent object.
Returns the callback_id
"""
event_type = library.VBoxEventType.on_serial_port_changed
return self.event_source.register_callback(callback, event_type)
|
Set the callback function to consume on serial port changed events.
Callback receives a ISerialPortChangedEvent object.
Returns the callback_id
|
def decode(s, cls=PENMANCodec, **kwargs):
"""
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the Graph object described by *s*
Example:
>>> decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
"""
codec = cls(**kwargs)
return codec.decode(s)
|
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the Graph object described by *s*
Example:
>>> decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
|
def all(
self,
count=500,
offset=0,
type=None,
inactive=None,
emailFilter=None,
tag=None,
messageID=None,
fromdate=None,
todate=None,
):
"""
Returns many bounces.
:param int count: Number of bounces to return per request.
:param int offset: Number of bounces to skip.
:param str type: Filter by type of bounce.
:param bool inactive: Filter by emails that were deactivated by Postmark due to the bounce.
:param str emailFilter: Filter by email address.
:param str tag: Filter by tag.
:param str messageID: Filter by messageID.
:param date fromdate: Filter messages starting from the date specified (inclusive).
:param date todate: Filter messages up to the date specified (inclusive).
:return: A list of :py:class:`Bounce` instances.
:rtype: `list`
"""
responses = self.call_many(
"GET",
"/bounces/",
count=count,
offset=offset,
type=type,
inactive=inactive,
emailFilter=emailFilter,
tag=tag,
messageID=messageID,
fromdate=fromdate,
todate=todate,
)
return self.expand_responses(responses, "Bounces")
|
Returns many bounces.
:param int count: Number of bounces to return per request.
:param int offset: Number of bounces to skip.
:param str type: Filter by type of bounce.
:param bool inactive: Filter by emails that were deactivated by Postmark due to the bounce.
:param str emailFilter: Filter by email address.
:param str tag: Filter by tag.
:param str messageID: Filter by messageID.
:param date fromdate: Filter messages starting from the date specified (inclusive).
:param date todate: Filter messages up to the date specified (inclusive).
:return: A list of :py:class:`Bounce` instances.
:rtype: `list`
|
def _get_menu_width(self, max_width, complete_state):
"""
Return the width of the main column.
"""
return min(max_width, max(self.MIN_WIDTH, max(get_cwidth(c.display)
for c in complete_state.current_completions) + 2))
|
Return the width of the main column.
|
def info_factory(name, libnames, headers, frameworks=None,
section=None, classname=None):
"""Create a system_info class.
Parameters
----------
name : str
name of the library
libnames : seq
list of libraries to look for
headers : seq
list of headers to look for
classname : str
name of the returned class
section : str
section name in the site.cfg
Returns
-------
a system_info-derived class with the given meta-parameters
"""
if not classname:
classname = '%s_info' % name
if not section:
section = name
if not frameworks:
framesworks = []
class _ret(system_info):
def __init__(self):
system_info.__init__(self)
def library_extensions(self):
return system_info.library_extensions(self)
def calc_info(self):
""" Compute the informations of the library """
if libnames:
libs = self.get_libs('libraries', '')
if not libs:
libs = libnames
# Look for the shared library
lib_dirs = self.get_lib_dirs()
tmp = None
for d in lib_dirs:
tmp = self.check_libs(d, libs)
if tmp is not None:
info = tmp
break
if tmp is None:
return
# Look for the header file
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, headers)
if p:
inc_dir = os.path.dirname(p[0])
dict_append(info, include_dirs=[d])
break
if inc_dir is None:
log.info(' %s not found' % name)
return
self.set_info(**info)
else:
# Look for frameworks
if frameworks:
fargs = []
for f in frameworks:
p = "/System/Library/Frameworks/%s.framework" % f
if os.path.exists(p):
fargs.append("-framework")
fargs.append(f)
if fargs:
self.set_info(extra_link_args=fargs)
return
_ret.__name__ = classname
_ret.section = section
return _ret
|
Create a system_info class.
Parameters
----------
name : str
name of the library
libnames : seq
list of libraries to look for
headers : seq
list of headers to look for
classname : str
name of the returned class
section : str
section name in the site.cfg
Returns
-------
a system_info-derived class with the given meta-parameters
|
def add_hookcall_monitoring(self, before, after):
""" add before/after tracing functions for all hooks
and return an undo function which, when called,
will remove the added tracers.
``before(hook_name, hook_impls, kwargs)`` will be called ahead
of all hook calls and receive a hookcaller instance, a list
of HookImpl instances and the keyword arguments for the hook call.
``after(outcome, hook_name, hook_impls, kwargs)`` receives the
same arguments as ``before`` but also a :py:class:`_Result`` object
which represents the result of the overall hook call.
"""
return _tracing._TracedHookExecution(self, before, after).undo
|
add before/after tracing functions for all hooks
and return an undo function which, when called,
will remove the added tracers.
``before(hook_name, hook_impls, kwargs)`` will be called ahead
of all hook calls and receive a hookcaller instance, a list
of HookImpl instances and the keyword arguments for the hook call.
``after(outcome, hook_name, hook_impls, kwargs)`` receives the
same arguments as ``before`` but also a :py:class:`_Result`` object
which represents the result of the overall hook call.
|
def create_geoms(self, gdefs, plot):
"""
Add geoms to the guide definitions
"""
new_gdefs = []
for gdef in gdefs:
gdef = gdef.create_geoms(plot)
if gdef:
new_gdefs.append(gdef)
return new_gdefs
|
Add geoms to the guide definitions
|
def unmarshal(self, values, bind_client=None):
"""
Cast the list.
"""
if values is not None:
return [super(EntityCollection, self).unmarshal(v, bind_client=bind_client) for v in values]
|
Cast the list.
|
def openidf(fname, idd=None, epw=None):
"""automatically set idd and open idf file. Uses version from idf to set correct idd
It will work under the following circumstances:
- the IDF file should have the VERSION object.
- Needs the version of EnergyPlus installed that matches the IDF version.
- Energyplus should be installed in the default location.
Parameters
----------
fname : str, StringIO or IOBase
Filepath IDF file,
File handle of IDF file open to read
StringIO with IDF contents within
idd : str, StringIO or IOBase
This is an optional argument. easyopen will find the IDD without this arg
Filepath IDD file,
File handle of IDD file open to read
StringIO with IDD contents within
epw : str
path name to the weather file. This arg is needed to run EneryPlus from eppy.
"""
import eppy.easyopen as easyopen
return easyopen.easyopen(fname, idd=idd, epw=epw)
|
automatically set idd and open idf file. Uses version from idf to set correct idd
It will work under the following circumstances:
- the IDF file should have the VERSION object.
- Needs the version of EnergyPlus installed that matches the IDF version.
- Energyplus should be installed in the default location.
Parameters
----------
fname : str, StringIO or IOBase
Filepath IDF file,
File handle of IDF file open to read
StringIO with IDF contents within
idd : str, StringIO or IOBase
This is an optional argument. easyopen will find the IDD without this arg
Filepath IDD file,
File handle of IDD file open to read
StringIO with IDD contents within
epw : str
path name to the weather file. This arg is needed to run EneryPlus from eppy.
|
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
|
def map_value(self, value, gid):
"""
Return the value for a group id, applying requested mapping.
Map only groups related to a filter, ie when the basename of
the group is identical to the name of a filter.
"""
base_gid = self.base_gid_pattern.search(gid).group(1)
if self.anonymyze:
try:
if value in self._maps[base_gid]:
return self._maps[base_gid][value]
else:
k = (len(self._maps[base_gid]) + 1) % self.mapmax
new_item = u'{0}_{1:0{2}d}'.format(base_gid.upper(), k, self.mapexp)
self._maps[base_gid][value] = new_item
return new_item
except KeyError:
return value
elif base_gid in ['client', 'mail', 'from', 'rcpt', 'user'] and self.ip_lookup:
ip_match = self.ip_pattern.search(value)
if ip_match is None:
return value
host = self.gethost(ip_match.group(1))
if host == ip_match.group(1) or value.startswith(host):
return value
return u''.join([
value[:ip_match.start(1)],
self.gethost(ip_match.group(1)),
value[ip_match.end(1):]])
elif (base_gid == 'user' or base_gid == 'uid') and self.uid_lookup:
return self.getuname(value)
else:
return value
|
Return the value for a group id, applying requested mapping.
Map only groups related to a filter, ie when the basename of
the group is identical to the name of a filter.
|
def request(self, hash_, quickkey, doc_type, page=None,
output=None, size_id=None, metadata=None,
request_conversion_only=None):
"""Query conversion server
hash_: 4 characters of file hash
quickkey: File quickkey
doc_type: "i" for image, "d" for documents
page: The page to convert. If page is set to 'initial', the first
10 pages of the document will be provided. (document)
output: "pdf", "img", or "swf" (document)
size_id: 0,1,2 (document)
0-9, a-f, z (image)
metadata: Set to 1 to get metadata dict
request_conversion_only: Request conversion w/o content
"""
if len(hash_) > 4:
hash_ = hash_[:4]
query = QueryParams({
'quickkey': quickkey,
'doc_type': doc_type,
'page': page,
'output': output,
'size_id': size_id,
'metadata': metadata,
'request_conversion_only': request_conversion_only
})
url = API_ENDPOINT + '?' + hash_ + '&' + urlencode(query)
response = self.http.get(url, stream=True)
if response.status_code == 204:
raise ConversionServerError("Unable to fulfill request. "
"The document will not be converted.",
response.status_code)
response.raise_for_status()
if response.headers['content-type'] == 'application/json':
return response.json()
return response
|
Query conversion server
hash_: 4 characters of file hash
quickkey: File quickkey
doc_type: "i" for image, "d" for documents
page: The page to convert. If page is set to 'initial', the first
10 pages of the document will be provided. (document)
output: "pdf", "img", or "swf" (document)
size_id: 0,1,2 (document)
0-9, a-f, z (image)
metadata: Set to 1 to get metadata dict
request_conversion_only: Request conversion w/o content
|
def queryset(self, request, queryset):
"""
Return the filtered queryset based on the value provided in the query string.
source: https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_filter
"""
if self.value() is None:
return queryset.all()
else:
return queryset.filter(subscriptions__status=self.value()).distinct()
|
Return the filtered queryset based on the value provided in the query string.
source: https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_filter
|
def _map_update(
self,
prior_mean,
prior_cov,
global_cov_scaled,
new_observation):
"""Maximum A Posterior (MAP) update of a parameter
Parameters
----------
prior_mean : float or 1D array
Prior mean of parameters.
prior_cov : float or 1D array
Prior variance of scalar parameter, or
prior covariance of multivariate parameter
global_cov_scaled : float or 1D array
Global prior variance of scalar parameter, or
global prior covariance of multivariate parameter
new_observation : 1D or 2D array, with shape [n_dim, n_subj]
New observations on parameters.
Returns
-------
posterior_mean : float or 1D array
Posterior mean of parameters.
posterior_cov : float or 1D array
Posterior variance of scalar parameter, or
posterior covariance of multivariate parameter
"""
common = np.linalg.inv(prior_cov + global_cov_scaled)
observation_mean = np.mean(new_observation, axis=1)
posterior_mean = prior_cov.dot(common.dot(observation_mean)) +\
global_cov_scaled.dot(common.dot(prior_mean))
posterior_cov =\
prior_cov.dot(common.dot(global_cov_scaled))
return posterior_mean, posterior_cov
|
Maximum A Posterior (MAP) update of a parameter
Parameters
----------
prior_mean : float or 1D array
Prior mean of parameters.
prior_cov : float or 1D array
Prior variance of scalar parameter, or
prior covariance of multivariate parameter
global_cov_scaled : float or 1D array
Global prior variance of scalar parameter, or
global prior covariance of multivariate parameter
new_observation : 1D or 2D array, with shape [n_dim, n_subj]
New observations on parameters.
Returns
-------
posterior_mean : float or 1D array
Posterior mean of parameters.
posterior_cov : float or 1D array
Posterior variance of scalar parameter, or
posterior covariance of multivariate parameter
|
def coord2healpix(coords, frame, nside, nest=True):
"""
Calculate HEALPix indices from an astropy SkyCoord. Assume the HEALPix
system is defined on the coordinate frame ``frame``.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The input coordinates.
frame (:obj:`str`): The frame in which the HEALPix system is defined.
nside (:obj:`int`): The HEALPix nside parameter to use. Must be a power of 2.
nest (Optional[:obj:`bool`]): ``True`` (the default) if nested HEALPix ordering
is desired. ``False`` for ring ordering.
Returns:
An array of pixel indices (integers), with the same shape as the input
SkyCoord coordinates (:obj:`coords.shape`).
Raises:
:obj:`dustexceptions.CoordFrameError`: If the specified frame is not supported.
"""
if coords.frame.name != frame:
c = coords.transform_to(frame)
else:
c = coords
if hasattr(c, 'ra'):
phi = c.ra.rad
theta = 0.5*np.pi - c.dec.rad
return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest)
elif hasattr(c, 'l'):
phi = c.l.rad
theta = 0.5*np.pi - c.b.rad
return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest)
elif hasattr(c, 'x'):
return hp.pixelfunc.vec2pix(nside, c.x.kpc, c.y.kpc, c.z.kpc, nest=nest)
elif hasattr(c, 'w'):
return hp.pixelfunc.vec2pix(nside, c.w.kpc, c.u.kpc, c.v.kpc, nest=nest)
else:
raise dustexceptions.CoordFrameError(
'No method to transform from coordinate frame "{}" to HEALPix.'.format(
frame))
|
Calculate HEALPix indices from an astropy SkyCoord. Assume the HEALPix
system is defined on the coordinate frame ``frame``.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The input coordinates.
frame (:obj:`str`): The frame in which the HEALPix system is defined.
nside (:obj:`int`): The HEALPix nside parameter to use. Must be a power of 2.
nest (Optional[:obj:`bool`]): ``True`` (the default) if nested HEALPix ordering
is desired. ``False`` for ring ordering.
Returns:
An array of pixel indices (integers), with the same shape as the input
SkyCoord coordinates (:obj:`coords.shape`).
Raises:
:obj:`dustexceptions.CoordFrameError`: If the specified frame is not supported.
|
def lookup(self, host_value):
"""Get a host value matching the given value.
:param host_value: a value of the host of a type that can be
listed by the service
:returns: an instance of AddressListItem representing
a matched value
:raises InvalidHostError: if the argument is not a valid
host string
"""
try:
host_object = self._host_factory(host_value)
except InvalidHostError:
return None
result = self._get_match_and_classification(
host_object
)
host_item, classification = result
if host_item is not None:
return AddressListItem(
host_item.to_unicode(),
self,
classification
)
return None
|
Get a host value matching the given value.
:param host_value: a value of the host of a type that can be
listed by the service
:returns: an instance of AddressListItem representing
a matched value
:raises InvalidHostError: if the argument is not a valid
host string
|
def _hide_column(self, column):
'''Hides a column by prefixing the name with \'__\''''
column = _ensure_string_from_expression(column)
new_name = self._find_valid_name('__' + column)
self._rename(column, new_name)
|
Hides a column by prefixing the name with \'__\
|
def create_question_dialog(self, text, second_text):
"""
Function creates a question dialog with title text
and second_text
"""
dialog = self.create_message_dialog(
text, buttons=Gtk.ButtonsType.YES_NO, icon=Gtk.MessageType.QUESTION
)
dialog.format_secondary_text(second_text)
response = dialog.run()
dialog.destroy()
return response
|
Function creates a question dialog with title text
and second_text
|
def recv_sub(self, id_, name, params):
"""DDP sub handler."""
self.api.sub(id_, name, *params)
|
DDP sub handler.
|
def vpc_peering_connection_present(name, requester_vpc_id=None, requester_vpc_name=None,
peer_vpc_id=None, peer_vpc_name=None, conn_name=None,
peer_owner_id=None, peer_region=None, region=None,
key=None, keyid=None, profile=None):
'''
name
Name of the state
requester_vpc_id
ID of the requesting VPC. Exclusive with requester_vpc_name.
requester_vpc_name
Name tag of the requesting VPC. Exclusive with requester_vpc_id.
peer_vpc_id
ID of the VPC tp crete VPC peering connection with. This can be a VPC in
another account. Exclusive with peer_vpc_name.
peer_vpc_name
Name tag of the VPC tp crete VPC peering connection with. This can only
be a VPC in the same account, else resolving it into a vpc ID will fail.
Exclusive with peer_vpc_id.
conn_name
The name to use for this VPC peering connection.
peer_owner_id
ID of the owner of the peer VPC. Defaults to your account ID, so a value
is required if peering with a VPC in a different account.
peer_region
Region of peer VPC. For inter-region vpc peering connections. Not required
for intra-region peering connections.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.11.0
Example:
.. code-block:: yaml
ensure peering twixt local vpc and the other guys:
boto_vpc.vpc_peering_connection_present:
- requester_vpc_name: my_local_vpc
- peer_vpc_name: some_other_guys_vpc
- conn_name: peering_from_here_to_there
- peer_owner_id: 012345654321
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
if __salt__['boto_vpc.is_peering_connection_pending'](conn_name=conn_name, region=region,
key=key, keyid=keyid, profile=profile):
if __salt__['boto_vpc.peering_connection_pending_from_vpc'](conn_name=conn_name,
vpc_id=requester_vpc_id,
vpc_name=requester_vpc_name,
region=region, key=key,
keyid=keyid, profile=profile):
ret['comment'] = ('VPC peering {0} already requested - pending '
'acceptance by {1}'.format(conn_name, peer_owner_id
or peer_vpc_name or peer_vpc_id))
log.info(ret['comment'])
return ret
return accept_vpc_peering_connection(name=name, conn_name=conn_name,
region=region, key=key, keyid=keyid,
profile=profile)
return request_vpc_peering_connection(name=name, requester_vpc_id=requester_vpc_id,
requester_vpc_name=requester_vpc_name,
peer_vpc_id=peer_vpc_id, peer_vpc_name=peer_vpc_name,
conn_name=conn_name, peer_owner_id=peer_owner_id,
peer_region=peer_region, region=region, key=key,
keyid=keyid, profile=profile)
|
name
Name of the state
requester_vpc_id
ID of the requesting VPC. Exclusive with requester_vpc_name.
requester_vpc_name
Name tag of the requesting VPC. Exclusive with requester_vpc_id.
peer_vpc_id
ID of the VPC tp crete VPC peering connection with. This can be a VPC in
another account. Exclusive with peer_vpc_name.
peer_vpc_name
Name tag of the VPC tp crete VPC peering connection with. This can only
be a VPC in the same account, else resolving it into a vpc ID will fail.
Exclusive with peer_vpc_id.
conn_name
The name to use for this VPC peering connection.
peer_owner_id
ID of the owner of the peer VPC. Defaults to your account ID, so a value
is required if peering with a VPC in a different account.
peer_region
Region of peer VPC. For inter-region vpc peering connections. Not required
for intra-region peering connections.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.11.0
Example:
.. code-block:: yaml
ensure peering twixt local vpc and the other guys:
boto_vpc.vpc_peering_connection_present:
- requester_vpc_name: my_local_vpc
- peer_vpc_name: some_other_guys_vpc
- conn_name: peering_from_here_to_there
- peer_owner_id: 012345654321
|
def list_required(self, type=None, service=None): # pylint: disable=redefined-builtin
"""
Displays all packages required by the current role
based on the documented services provided.
"""
from burlap.common import (
required_system_packages,
required_python_packages,
required_ruby_packages,
)
service = (service or '').strip().upper()
type = (type or '').lower().strip()
assert not type or type in PACKAGE_TYPES, 'Unknown package type: %s' % (type,)
packages_set = set()
packages = []
version = self.os_version
for _service, satchel in self.all_other_enabled_satchels.items():
_service = _service.strip().upper()
if service and service != _service:
continue
_new = []
if not type or type == SYSTEM:
#TODO:deprecated, remove
_new.extend(required_system_packages.get(
_service, {}).get((version.distro, version.release), []))
try:
_pkgs = satchel.packager_system_packages
if self.verbose:
print('pkgs:')
pprint(_pkgs, indent=4)
for _key in [(version.distro, version.release), version.distro]:
if self.verbose:
print('checking key:', _key)
if _key in _pkgs:
if self.verbose:
print('satchel %s requires:' % satchel, _pkgs[_key])
_new.extend(_pkgs[_key])
break
except AttributeError:
pass
if not type or type == PYTHON:
#TODO:deprecated, remove
_new.extend(required_python_packages.get(
_service, {}).get((version.distro, version.release), []))
try:
_pkgs = satchel.packager_python_packages
for _key in [(version.distro, version.release), version.distro]:
if _key in _pkgs:
_new.extend(_pkgs[_key])
except AttributeError:
pass
print('_new:', _new)
if not type or type == RUBY:
#TODO:deprecated, remove
_new.extend(required_ruby_packages.get(
_service, {}).get((version.distro, version.release), []))
for _ in _new:
if _ in packages_set:
continue
packages_set.add(_)
packages.append(_)
if self.verbose:
for package in sorted(packages):
print('package:', package)
return packages
|
Displays all packages required by the current role
based on the documented services provided.
|
async def on_raw_kick(self, message):
""" KICK command. """
kicker, kickermeta = self._parse_user(message.source)
self._sync_user(kicker, kickermeta)
if len(message.params) > 2:
channels, targets, reason = message.params
else:
channels, targets = message.params
reason = None
channels = channels.split(',')
targets = targets.split(',')
for channel, target in itertools.product(channels, targets):
target, targetmeta = self._parse_user(target)
self._sync_user(target, targetmeta)
if self.is_same_nick(target, self.nickname):
self._destroy_channel(channel)
else:
# Update nick list on channel.
if self.in_channel(channel):
self._destroy_user(target, channel)
await self.on_kick(channel, target, kicker, reason)
|
KICK command.
|
def _derive_temporalnetwork(self, f, i, tag, params, confounds_exist, confound_files):
"""
Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing.
"""
data = load_tabular_file(f, index_col=True, header=True)
fs, _ = drop_bids_suffix(f)
save_name, save_dir, _ = self._save_namepaths_bids_derivatives(
fs, tag, 'tvc', 'tvcconn')
if 'weight-var' in params.keys():
if params['weight-var'] == 'from-subject-fc':
fc_files = self.get_selected_files(
quiet=1, pipeline='functionalconnectivity', forfile=f)
if len(fc_files) == 1:
# Could change to load_data call
params['weight-var'] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError('Cannot correctly find FC files')
if 'weight-mean' in params.keys():
if params['weight-mean'] == 'from-subject-fc':
fc_files = self.get_selected_files(
quiet=1, pipeline='functionalconnectivity', forfile=f)
if len(fc_files) == 1:
# Could change to load_data call
params['weight-mean'] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError('Cannot correctly find FC files')
params['report'] = 'yes'
params['report_path'] = save_dir + '/report/'
params['report_filename'] = save_name + '_derivationreport.html'
if not os.path.exists(params['report_path']):
os.makedirs(params['report_path'])
if 'dimord' not in params:
params['dimord'] = 'time,node'
dfc = teneto.timeseries.derive_temporalnetwork(data.values, params)
dfc_net = TemporalNetwork(from_array=dfc, nettype='wu')
dfc_net.network.to_csv(save_dir + save_name + '.tsv', sep='\t')
sidecar = get_sidecar(f)
sidecar['tvc'] = params
if 'weight-var' in sidecar['tvc']:
sidecar['tvc']['weight-var'] = True
sidecar['tvc']['fc source'] = fc_files
if 'weight-mean' in sidecar['tvc']:
sidecar['tvc']['weight-mean'] = True
sidecar['tvc']['fc source'] = fc_files
sidecar['tvc']['inputfile'] = f
sidecar['tvc']['description'] = 'Time varying connectivity information.'
with open(save_dir + save_name + '.json', 'w') as fs:
json.dump(sidecar, fs)
if confounds_exist:
analysis_step = 'tvc-derive'
df = pd.read_csv(confound_files[i], sep='\t')
df = df.fillna(df.median())
ind = np.triu_indices(dfc.shape[0], k=1)
dfc_df = pd.DataFrame(dfc[ind[0], ind[1], :].transpose())
# If windowed, prune df so that it matches with dfc_df
if len(df) != len(dfc_df):
df = df.iloc[int(np.round((params['windowsize']-1)/2)): int(np.round((params['windowsize']-1)/2)+len(dfc_df))]
df.reset_index(inplace=True, drop=True)
# NOW CORRELATE DF WITH DFC BUT ALONG INDEX NOT DF.
dfc_df_z = (dfc_df - dfc_df.mean())
df_z = (df - df.mean())
R_df = dfc_df_z.T.dot(df_z).div(len(dfc_df)).div(
df_z.std(ddof=0)).div(dfc_df_z.std(ddof=0), axis=0)
R_df_describe = R_df.describe()
desc_index = R_df_describe.index
confound_report_dir = params['report_path'] + \
'/' + save_name + '_confoundcorr/'
confound_report_figdir = confound_report_dir + 'figures/'
if not os.path.exists(confound_report_figdir):
os.makedirs(confound_report_figdir)
report = '<html><body>'
report += '<h1> Correlation of ' + analysis_step + ' and confounds.</h1>'
for c in R_df.columns:
fig, ax = plt.subplots(1)
ax = sns.distplot(
R_df[c], hist=False, color='m', ax=ax, kde_kws={"shade": True})
fig.savefig(confound_report_figdir + c + '.png')
plt.close(fig)
report += '<h2>' + c + '</h2>'
for ind_name, r in enumerate(R_df_describe[c]):
report += str(desc_index[ind_name]) + ': '
report += str(r) + '<br>'
report += 'Distribution of corrlation values:'
report += '<img src=' + \
os.path.abspath(confound_report_figdir) + \
'/' + c + '.png><br><br>'
report += '</body></html>'
with open(confound_report_dir + save_name + '_confoundcorr.html', 'w') as file:
file.write(report)
|
Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing.
|
def from_settings(cls, settings):
"""Read Mongodb Source configuration from the provided settings"""
if not 'mongodb' in settings or not 'collection' in settings or \
settings['mongodb'] == '' or settings['collection'] == '':
raise Exception(
"Erroneous mongodb settings, "
"needs a collection and mongodb setting",
settings)
cx_uri = urlparse.urlsplit(settings["mongodb"])
db_name = cx_uri.path
if '?' in db_name:
db_name, query = db_name.split('?', 1)
db_name = db_name[1:]
if db_name == "":
raise Exception(
"Erroneous mongodb settings, "
"missing db_name", settings)
cx_uri = urlparse.urlunsplit(
(cx_uri.scheme, cx_uri.netloc, "/", cx_uri.query, cx_uri.fragment))
options = copy.deepcopy(settings)
del options['mongodb']
del options['collection']
return Mongodb(
cls.connection_for_uri(cx_uri),
db_name, settings['collection'], options)
|
Read Mongodb Source configuration from the provided settings
|
def assign(var, new_val, assign_fn=assign_slice):
"""Assign a new value to a variable.
Args:
var: either a Variable operation or its output Tensor.
new_val: a Tensor
assign_fn: a function from
(mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation
Returns:
an Operation
Raises:
ValueError: if var is not a Variable and var.operation is not a Variable
"""
if isinstance(var, Tensor):
var = var.operation
if not isinstance(var, Variable):
raise ValueError("var must be a mtf.Variable or its output Tensor.")
return Assign([var], [new_val], assign_fn=assign_fn)
|
Assign a new value to a variable.
Args:
var: either a Variable operation or its output Tensor.
new_val: a Tensor
assign_fn: a function from
(mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation
Returns:
an Operation
Raises:
ValueError: if var is not a Variable and var.operation is not a Variable
|
def date_between(self, start_date='-30y', end_date='today'):
"""
Get a Date object based on a random date between two given dates.
Accepts date strings that can be recognized by strtotime().
:param start_date Defaults to 30 years ago
:param end_date Defaults to "today"
:example Date('1999-02-02')
:return Date
"""
start_date = self._parse_date(start_date)
end_date = self._parse_date(end_date)
return self.date_between_dates(date_start=start_date, date_end=end_date)
|
Get a Date object based on a random date between two given dates.
Accepts date strings that can be recognized by strtotime().
:param start_date Defaults to 30 years ago
:param end_date Defaults to "today"
:example Date('1999-02-02')
:return Date
|
def heating_degree_days(T, T_base=F2K(65), truncate=True):
r'''Calculates the heating degree days for a period of time.
.. math::
\text{heating degree days} = max(T - T_{base}, 0)
Parameters
----------
T : float
Measured temperature; sometimes an average over a length of time is used,
other times the average of the lowest and highest temperature in a
period are used, [K]
T_base : float, optional
Reference temperature for the degree day calculation, defaults
to 65 °F (18.33 °C, 291.483 K), the value most used in the US, [K]
truncate : bool
If truncate is True, no negative values will be returned; if negative,
the value is truncated to 0, [-]
Returns
-------
heating_degree_days : float
Degree above the base temperature multiplied by the length of time of
the measurement, normally days [day*K]
Notes
-----
Some common base temperatures are 18 °C (Canada), 15.5 °C (EU),
17 °C (Denmark, Finland), 12 °C Switzerland. The base temperature
should always be presented with the results.
The time unit does not have to be days; it can be any time unit, and the
calculation behaves the same.
Examples
--------
>>> heating_degree_days(303.8)
12.31666666666672
>>> heating_degree_days(273)
0.0
>>> heating_degree_days(322, T_base=300)
22
References
----------
.. [1] "Heating Degree Day." Wikipedia, January 24, 2018.
https://en.wikipedia.org/w/index.php?title=Heating_degree_day&oldid=822187764.
'''
dd = T - T_base
if truncate and dd < 0.0:
dd = 0.0
return dd
|
r'''Calculates the heating degree days for a period of time.
.. math::
\text{heating degree days} = max(T - T_{base}, 0)
Parameters
----------
T : float
Measured temperature; sometimes an average over a length of time is used,
other times the average of the lowest and highest temperature in a
period are used, [K]
T_base : float, optional
Reference temperature for the degree day calculation, defaults
to 65 °F (18.33 °C, 291.483 K), the value most used in the US, [K]
truncate : bool
If truncate is True, no negative values will be returned; if negative,
the value is truncated to 0, [-]
Returns
-------
heating_degree_days : float
Degree above the base temperature multiplied by the length of time of
the measurement, normally days [day*K]
Notes
-----
Some common base temperatures are 18 °C (Canada), 15.5 °C (EU),
17 °C (Denmark, Finland), 12 °C Switzerland. The base temperature
should always be presented with the results.
The time unit does not have to be days; it can be any time unit, and the
calculation behaves the same.
Examples
--------
>>> heating_degree_days(303.8)
12.31666666666672
>>> heating_degree_days(273)
0.0
>>> heating_degree_days(322, T_base=300)
22
References
----------
.. [1] "Heating Degree Day." Wikipedia, January 24, 2018.
https://en.wikipedia.org/w/index.php?title=Heating_degree_day&oldid=822187764.
|
def setHeight(self, vehID, height):
"""setHeight(string, double) -> None
Sets the height in m for this vehicle.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_HEIGHT, vehID, height)
|
setHeight(string, double) -> None
Sets the height in m for this vehicle.
|
def get_apex(self, lat, height=None):
""" Calculate apex height
Parameters
-----------
lat : (float)
Latitude in degrees
height : (float or NoneType)
Height above the surface of the earth in km or NoneType to use
reference height (default=None)
Returns
----------
apex_height : (float)
Height of the field line apex in km
"""
lat = helpers.checklat(lat, name='alat')
if height is None:
height = self.refh
cos_lat_squared = np.cos(np.radians(lat))**2
apex_height = (self.RE + height) / cos_lat_squared - self.RE
return apex_height
|
Calculate apex height
Parameters
-----------
lat : (float)
Latitude in degrees
height : (float or NoneType)
Height above the surface of the earth in km or NoneType to use
reference height (default=None)
Returns
----------
apex_height : (float)
Height of the field line apex in km
|
def navigate(self):
"""Return the longitudes and latitudes of the scene.
"""
tic = datetime.now()
lons40km = self._data["pos"][:, :, 1] * 1e-4
lats40km = self._data["pos"][:, :, 0] * 1e-4
try:
from geotiepoints import SatelliteInterpolator
except ImportError:
logger.warning("Could not interpolate lon/lats, "
"python-geotiepoints missing.")
self.lons, self.lats = lons40km, lats40km
else:
cols40km = np.arange(24, 2048, 40)
cols1km = np.arange(2048)
lines = lons40km.shape[0]
rows40km = np.arange(lines)
rows1km = np.arange(lines)
along_track_order = 1
cross_track_order = 3
satint = SatelliteInterpolator(
(lons40km, lats40km), (rows40km, cols40km), (rows1km, cols1km),
along_track_order, cross_track_order)
self.lons, self.lats = satint.interpolate()
logger.debug("Navigation time %s", str(datetime.now() - tic))
|
Return the longitudes and latitudes of the scene.
|
def app_token(vault_client, app_id, user_id):
"""Returns a vault token based on the app and user id."""
resp = vault_client.auth_app_id(app_id, user_id)
if 'auth' in resp and 'client_token' in resp['auth']:
return resp['auth']['client_token']
else:
raise aomi.exceptions.AomiCredentials('invalid apptoken')
|
Returns a vault token based on the app and user id.
|
def delete(self, hdfs_path, recursive=False):
"""Delete a file located at `hdfs_path`."""
return self.client.delete(hdfs_path, recursive=recursive)
|
Delete a file located at `hdfs_path`.
|
def linearBlend(img1, img2, overlap, backgroundColor=None):
'''
Stitch 2 images vertically together.
Smooth the overlap area of both images with a linear fade from img1 to img2
@param img1: numpy.2dArray
@param img2: numpy.2dArray of the same shape[1,2] as img1
@param overlap: number of pixels both images overlap
@returns: stitched-image
'''
(sizex, sizey) = img1.shape[:2]
overlapping = True
if overlap < 0:
overlapping = False
overlap = -overlap
# linear transparency change:
alpha = np.tile(np.expand_dims(np.linspace(1, 0, overlap), 1), sizey)
if len(img2.shape) == 3: # multi channel img like rgb
# make alpha 3d with n channels
alpha = np.dstack(([alpha for _ in range(img2.shape[2])]))
if overlapping:
img1_cut = img1[sizex - overlap:sizex, :]
img2_cut = img2[0:overlap, :]
else:
# take average of last 5 rows:
img1_cut = np.tile(img1[-min(sizex, 5):, :].mean(
axis=0), (overlap, 1)).reshape(alpha.shape)
img2_cut = np.tile(img2[:min(img2.shape[0], 5), :].mean(
axis=0), (overlap, 1)).reshape(alpha.shape)
# fill intermediate area as mixture of both images
#################bg transparent############
inter = (img1_cut * alpha + img2_cut * (1 - alpha)).astype(img1.dtype)
# set background areas to value of respective other img:
if backgroundColor is not None:
mask = np.logical_and(img1_cut == backgroundColor,
img2_cut != backgroundColor)
inter[mask] = img2_cut[mask]
mask = np.logical_and(img2_cut == backgroundColor,
img1_cut != backgroundColor)
inter[mask] = img1_cut[mask]
if not overlapping:
overlap = 0
return np.vstack((img1[0:sizex - overlap, :],
inter,
img2[overlap:, :]))
|
Stitch 2 images vertically together.
Smooth the overlap area of both images with a linear fade from img1 to img2
@param img1: numpy.2dArray
@param img2: numpy.2dArray of the same shape[1,2] as img1
@param overlap: number of pixels both images overlap
@returns: stitched-image
|
def set_level(logger=None, log_level=None):
'''Set logging levels using logger names.
:param logger: Name of the logger
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:rtype: None
'''
log_level = logging.getLevelName(os.getenv('VERBOSITY', 'WARNING'))
logging.getLogger(logger).setLevel(log_level)
|
Set logging levels using logger names.
:param logger: Name of the logger
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:rtype: None
|
def get_dated_items(self):
"""
Override get_dated_items to add a useful 'week_end_day'
variable in the extra context of the view.
"""
self.date_list, self.object_list, extra_context = super(
EntryWeek, self).get_dated_items()
self.date_list = self.get_date_list(self.object_list, 'day')
extra_context['week_end_day'] = extra_context[
'week'] + datetime.timedelta(days=6)
return self.date_list, self.object_list, extra_context
|
Override get_dated_items to add a useful 'week_end_day'
variable in the extra context of the view.
|
def check_text(self, text):
"""Disable empty layout name possibility"""
if to_text_string(text) == u'':
self.button_ok.setEnabled(False)
else:
self.button_ok.setEnabled(True)
|
Disable empty layout name possibility
|
def find_fields(self, classname=".*", fieldname=".*", fieldtype=".*", accessflags=".*"):
"""
find fields by regex
:param classname: regular expression of the classname
:param fieldname: regular expression of the fieldname
:param fieldtype: regular expression of the fieldtype
:param accessflags: regular expression of the access flags
:rtype: generator of `FieldClassAnalysis`
"""
for cname, c in self.classes.items():
if re.match(classname, cname):
for f in c.get_fields():
z = f.get_field()
if re.match(fieldname, z.get_name()) and \
re.match(fieldtype, z.get_descriptor()) and \
re.match(accessflags, z.get_access_flags_string()):
yield f
|
find fields by regex
:param classname: regular expression of the classname
:param fieldname: regular expression of the fieldname
:param fieldtype: regular expression of the fieldtype
:param accessflags: regular expression of the access flags
:rtype: generator of `FieldClassAnalysis`
|
def distribution_to_markdown(distribution):
"""Genera texto en markdown a partir de los metadatos de una
`distribution`.
Args:
distribution (dict): Diccionario con metadatos de una
`distribution`.
Returns:
str: Texto que describe una `distribution`.
"""
text_template = """
### {title}
{description}
#### Campos del recurso
{fields}
"""
if "field" in distribution:
fields = "- " + \
"\n- ".join(map(field_to_markdown, distribution["field"]))
else:
fields = ""
text = text_template.format(
title=distribution["title"],
description=distribution.get("description", ""),
fields=fields
)
return text
|
Genera texto en markdown a partir de los metadatos de una
`distribution`.
Args:
distribution (dict): Diccionario con metadatos de una
`distribution`.
Returns:
str: Texto que describe una `distribution`.
|
def from_rational(
cls,
value,
to_base,
precision=None,
method=RoundingMethods.ROUND_DOWN
):
"""
Convert rational value to a base.
:param Rational value: the value to convert
:param int to_base: base of result, must be at least 2
:param precision: number of digits in total or None
:type precision: int or NoneType
:param method: rounding method
:type method: element of RoundingMethods.METHODS()
:returns: the conversion result and its relation to actual result
:rtype: Radix * int
:raises BasesValueError: if to_base is less than 2
Complexity: Uncalculated.
"""
# pylint: disable=too-many-locals
if to_base < 2:
raise BasesValueError(to_base, "to_base", "must be at least 2")
if precision is not None and precision < 0:
raise BasesValueError(precision, "precision", "must be at least 0")
if value == 0:
non_repeating_part = [] if precision is None else precision * [0]
return (Radix(0, [], non_repeating_part, [], to_base), 0)
if value < 0:
sign = -1
else:
sign = 1
div_method = method
if sign == -1:
value = abs(value)
div_method = cls._reverse_rounding_method(method)
numerator = Nats.convert_from_int(value.numerator, to_base)
denominator = Nats.convert_from_int(value.denominator, to_base)
(integer_part, non_repeating_part, repeating_part, relation) = \
NatDivision.division(
denominator,
numerator,
to_base,
precision,
div_method
)
relation = relation * sign
result = Radix(
sign,
integer_part,
non_repeating_part,
repeating_part,
to_base
)
if precision is not None:
(result, rel) = result.rounded(precision, method)
relation = relation if rel == 0 else rel
return (result, relation)
|
Convert rational value to a base.
:param Rational value: the value to convert
:param int to_base: base of result, must be at least 2
:param precision: number of digits in total or None
:type precision: int or NoneType
:param method: rounding method
:type method: element of RoundingMethods.METHODS()
:returns: the conversion result and its relation to actual result
:rtype: Radix * int
:raises BasesValueError: if to_base is less than 2
Complexity: Uncalculated.
|
def url(**attributes):
"""Parses an URL and validates its attributes."""
def check_url(value):
validate(text, value)
parsed = urlparse(value)
if not parsed.netloc:
raise ValueError("'{0}' is not a valid URL".format(value))
for name, schema in attributes.items():
if not _hasattr(parsed, name):
raise ValueError("Invalid URL attribute '{0}'".format(name))
try:
validate(schema, _getattr(parsed, name))
except ValueError as err:
raise ValueError(
"Unable to validate URL attribute '{0}': {1}".format(
name, err
)
)
return True
# Convert "http" to be either any("http", "https") for convenience
if attributes.get("scheme") == "http":
attributes["scheme"] = any("http", "https")
return check_url
|
Parses an URL and validates its attributes.
|
def deleteQueue(destinationRoot, queueArk, debug=False):
"""
Delete an entry from the queue
"""
url = urlparse.urljoin(destinationRoot, "APP/queue/" + queueArk + "/")
response, content = doWaitWebRequest(url, "DELETE")
if response.getcode() != 200:
raise Exception(
"Error updating queue %s to url %s. Response code is %s\n%s" %
(queueArk, url, response.getcode(), content)
)
|
Delete an entry from the queue
|
def get_variants(self, arch=None, types=None, recursive=False):
"""
Return all variants of given arch and types.
Supported variant types:
self - include the top-level ("self") variant as well
addon
variant
optional
"""
types = types or []
result = []
if "self" in types:
result.append(self)
for variant in six.itervalues(self.variants):
if types and variant.type not in types:
continue
if arch and arch not in variant.arches.union(["src"]):
continue
result.append(variant)
if recursive:
result.extend(variant.get_variants(types=[i for i in types if i != "self"], recursive=True))
result.sort(key=lambda x: x.uid)
return result
|
Return all variants of given arch and types.
Supported variant types:
self - include the top-level ("self") variant as well
addon
variant
optional
|
def times_update(self, factor):
"""Update each this multiset by multiplying each element's multiplicity with the given scalar factor.
>>> ms = Multiset('aab')
>>> ms.times_update(2)
>>> sorted(ms)
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*=`` operator for the same effect:
>>> ms = Multiset('ac')
>>> ms *= 3
>>> sorted(ms)
['a', 'a', 'a', 'c', 'c', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`times`.
Args:
factor: The factor to multiply each multiplicity with.
"""
if factor < 0:
raise ValueError("The factor must not be negative.")
elif factor == 0:
self.clear()
else:
_elements = self._elements
for element in _elements:
_elements[element] *= factor
self._total *= factor
|
Update each this multiset by multiplying each element's multiplicity with the given scalar factor.
>>> ms = Multiset('aab')
>>> ms.times_update(2)
>>> sorted(ms)
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*=`` operator for the same effect:
>>> ms = Multiset('ac')
>>> ms *= 3
>>> sorted(ms)
['a', 'a', 'a', 'c', 'c', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`times`.
Args:
factor: The factor to multiply each multiplicity with.
|
def get_session_id(self):
"""
get a unique id (shortish string) to allow simple aggregation
of log records from multiple sources. This id is used for the
life of the running program to allow extraction from all logs.
WARING - this can give duplicate sessions when 2 apps hit it
at the same time.
"""
max_session = '0'
try:
with open(self.log_folder + os.sep + '_sessions.txt', 'r') as f:
for _ in f:
txt = f.readline()
if txt.strip('\n') != '':
max_session = txt
except Exception:
max_session = '1'
this_session = str(int(max_session) + random.randint(9,100)).zfill(9) # not a great way to ensure uniqueness - TODO FIX
with open(self.log_folder + os.sep + '_sessions.txt', 'a') as f2:
f2.write(this_session + '\n')
return this_session
|
get a unique id (shortish string) to allow simple aggregation
of log records from multiple sources. This id is used for the
life of the running program to allow extraction from all logs.
WARING - this can give duplicate sessions when 2 apps hit it
at the same time.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.