code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
async def restart_stream(self):
"""
Restart the stream on error
"""
await self.response.release()
await asyncio.sleep(self._error_timeout)
await self.connect()
logger.info("Reconnected to the stream")
self._reconnecting = False
return {'stream_restart': True}
|
Restart the stream on error
|
def ChunksExist(self, chunk_numbers):
"""Do we have this chunk in the index?"""
index_urns = {
self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_number): chunk_number
for chunk_number in chunk_numbers
}
res = {chunk_number: False for chunk_number in chunk_numbers}
for metadata in aff4.FACTORY.Stat(index_urns):
res[index_urns[metadata["urn"]]] = True
return res
|
Do we have this chunk in the index?
|
def keypress(self, size, key):
"""Handle keypresses for changing tabs."""
key = super().keypress(size, key)
num_tabs = len(self._widgets)
if key == self._keys['prev_tab']:
self._tab_index = (self._tab_index - 1) % num_tabs
self._update_tabs()
elif key == self._keys['next_tab']:
self._tab_index = (self._tab_index + 1) % num_tabs
self._update_tabs()
elif key == self._keys['close_tab']:
# Don't allow closing the Conversations tab
if self._tab_index > 0:
curr_tab = self._widgets[self._tab_index]
self._widgets.remove(curr_tab)
del self._widget_title[curr_tab]
self._tab_index -= 1
self._update_tabs()
else:
return key
|
Handle keypresses for changing tabs.
|
def serialize_json_string(self, value):
"""
Tries to load an encoded json string back into an object
:param json_string:
:return:
"""
# Check if the value might be a json string
if not isinstance(value, six.string_types):
return value
# Make sure it starts with a brace
if not value.startswith('{') or value.startswith('['):
return value
# Try to load the string
try:
return json.loads(value)
except:
return value
|
Tries to load an encoded json string back into an object
:param json_string:
:return:
|
def end(target):
"""schedule a greenlet to be stopped immediately
:param target: the greenlet to end
:type target: greenlet
"""
if not isinstance(target, compat.greenlet):
raise TypeError("argument must be a greenlet")
if not target.dead:
schedule(target)
state.to_raise[target] = compat.GreenletExit()
|
schedule a greenlet to be stopped immediately
:param target: the greenlet to end
:type target: greenlet
|
def create_strategy(name=None):
"""
Create a strategy, or just returns it if it's already one.
:param name:
:return: Strategy
"""
import logging
from bonobo.execution.strategies.base import Strategy
if isinstance(name, Strategy):
return name
if name is None:
name = DEFAULT_STRATEGY
logging.debug("Creating execution strategy {!r}...".format(name))
try:
factory = STRATEGIES[name]
except KeyError as exc:
raise RuntimeError(
"Invalid strategy {}. Available choices: {}.".format(repr(name), ", ".join(sorted(STRATEGIES.keys())))
) from exc
return factory()
|
Create a strategy, or just returns it if it's already one.
:param name:
:return: Strategy
|
def etree(self, data, root=None):
'''Convert data structure into a list of etree.Element'''
result = self.list() if root is None else root
if isinstance(data, (self.dict, dict)):
for key, value in data.items():
if isinstance(value, (self.dict, dict)):
elem = self.element(key)
if elem is None:
continue
result.append(elem)
if 'attributes' in value:
for k, v in value['attributes'].items():
elem.set(k, self._tostring(v))
# else:
# raise ValueError('Cobra requires "attributes" key for each element')
if 'children' in value:
for v in value['children']:
self.etree(v, root=elem)
else:
elem = self.element(key)
if elem is None:
continue
elem.text = self._tostring(value)
result.append(elem)
else:
if root is not None:
root.text = self._tostring(data)
else:
elem = self.element(self._tostring(data))
if elem is not None:
result.append(elem)
return result
|
Convert data structure into a list of etree.Element
|
def replace_from_url(self, url, **kwds):
"""
Endpoint: /photo/<id>replace.json
Import a photo from the specified URL to replace this photo.
"""
result = self._client.photo.replace_from_url(self, url, **kwds)
self._replace_fields(result.get_fields())
|
Endpoint: /photo/<id>replace.json
Import a photo from the specified URL to replace this photo.
|
def map(self, arg, na_action=None):
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict`` or
a :class:`Series`.
Parameters
----------
arg : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to the
mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.apply : Apply a function row-/column-wise.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``NaN``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``NaN``.
Examples
--------
>>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
>>> s
0 cat
1 dog
2 NaN
3 rabbit
dtype: object
``map`` accepts a ``dict`` or a ``Series``. Values that are not found
in the ``dict`` are converted to ``NaN``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 NaN
3 NaN
dtype: object
It also accepts a function:
>>> s.map('I am a {}'.format)
0 I am a cat
1 I am a dog
2 I am a nan
3 I am a rabbit
dtype: object
To avoid applying the function to missing values (and keep them as
``NaN``) ``na_action='ignore'`` can be used:
>>> s.map('I am a {}'.format, na_action='ignore')
0 I am a cat
1 I am a dog
2 NaN
3 I am a rabbit
dtype: object
"""
new_values = super()._map_values(
arg, na_action=na_action)
return self._constructor(new_values,
index=self.index).__finalize__(self)
|
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict`` or
a :class:`Series`.
Parameters
----------
arg : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to the
mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.apply : Apply a function row-/column-wise.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``NaN``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``NaN``.
Examples
--------
>>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
>>> s
0 cat
1 dog
2 NaN
3 rabbit
dtype: object
``map`` accepts a ``dict`` or a ``Series``. Values that are not found
in the ``dict`` are converted to ``NaN``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 NaN
3 NaN
dtype: object
It also accepts a function:
>>> s.map('I am a {}'.format)
0 I am a cat
1 I am a dog
2 I am a nan
3 I am a rabbit
dtype: object
To avoid applying the function to missing values (and keep them as
``NaN``) ``na_action='ignore'`` can be used:
>>> s.map('I am a {}'.format, na_action='ignore')
0 I am a cat
1 I am a dog
2 NaN
3 I am a rabbit
dtype: object
|
def huji_sample(orient_file, meths='FS-FD:SO-POM:SO-SUN', location_name='unknown',
samp_con="1", ignore_dip=True, data_model_num=3,
samp_file="samples.txt", site_file="sites.txt",
dir_path=".", input_dir_path=""):
"""
Convert HUJI sample file to MagIC file(s)
Parameters
----------
orient_file : str
input file name
meths : str
colon-delimited sampling methods, default FS-FD:SO-POM:SO-SUN
for more options, see info below
location : str
location name, default "unknown"
samp_con : str
sample/site naming convention, default '1', see info below
ignore_dip : bool
set sample az/dip to 0, default True
data_model_num : int
MagIC data model 2 or 3, default 3
samp_file : str
sample file name to output (default : samples.txt)
site_file : str
site file name to output (default : site.txt)
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, file name written)
Info
--------
Sampling method codes:
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
"""
try:
samp_con, Z = samp_con.split("-")
except ValueError:
samp_con = samp_con
Z = 1
version_num = pmag.get_version()
if data_model_num == 2:
loc_col = "er_location_name"
site_col = "er_site_name"
samp_col = "er_sample_name"
citation_col = "er_citation_names"
class_col = "site_class"
lithology_col = "site_lithology"
definition_col = "site_definition"
type_col = "site_type"
sample_bed_dip_direction_col = "sample_bed_dip_direction"
sample_bed_dip_col = "sample_bed_dip"
site_bed_dip_direction_col = "site_bed_dip_direction"
site_bed_dip_col = "site_bed_dip"
sample_dip_col = "sample_dip"
sample_az_col = "sample_azimuth"
sample_lat_col = "sample_lat"
sample_lon_col = "sample_lon"
site_lat_col = "site_lat"
site_lon_col = "site_lon"
meth_col = "magic_method_codes"
software_col = "magic_software_packages"
else:
loc_col = "location"
site_col = "site"
samp_col = "sample"
citation_col = "citations"
class_col = "class"
lithology_col = "lithology"
definition_col = "definition"
type_col = "type"
sample_bed_dip_direction_col = 'bed_dip_direction'
sample_bed_dip_col = 'bed_dip'
site_bed_dip_direction_col = 'bed_dip_direction'
site_bed_dip_col = "bed_dip"
sample_dip_col = "dip"
sample_az_col = "azimuth"
sample_lat_col = "lat"
sample_lon_col = "lon"
site_lat_col = "lat"
site_lon_col = "lon"
meth_col = "method_codes"
software_col = "software_packages"
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
site_file = pmag.resolve_file_name(site_file, dir_path)
orient_file = pmag.resolve_file_name(orient_file, input_dir_path)
print("-I- reading in: {}".format(orient_file))
#
# read in file to convert
#
with open(orient_file, 'r') as azfile:
AzDipDat = azfile.readlines()
SampOut = []
SiteOut = []
for line in AzDipDat[1:]:
orec = line.split()
if len(orec) > 1:
labaz, labdip = pmag.orient(float(orec[1]), float(orec[2]), '3')
bed_dip_dir = (orec[3])
bed_dip = (orec[4])
SampRec = {}
SiteRec = {}
SampRec[loc_col] = location_name
SampRec[citation_col] = "This study"
SiteRec[loc_col] = location_name
SiteRec[citation_col] = "This study"
SiteRec[class_col] = ""
SiteRec[lithology_col] = ""
SiteRec[type_col] = ""
SiteRec[definition_col] = "s"
#
# parse information common to all orientation methods
#
SampRec[samp_col] = orec[0]
SampRec[sample_bed_dip_direction_col] = orec[3]
SampRec[sample_bed_dip_col] = orec[4]
SiteRec[site_bed_dip_direction_col] = orec[3]
SiteRec[site_bed_dip_col] = orec[4]
if not ignore_dip:
SampRec[sample_dip_col] = '%7.1f' % (labdip)
SampRec[sample_az_col] = '%7.1f' % (labaz)
else:
SampRec[sample_dip_col] = '0'
SampRec[sample_az_col] = '0'
SampRec[sample_lat_col] = orec[5]
SampRec[sample_lon_col] = orec[6]
SiteRec[site_lat_col] = orec[5]
SiteRec[site_lon_col] = orec[6]
SampRec[meth_col] = meths
# parse out the site name
site = pmag.parse_site(orec[0], samp_con, Z)
SampRec[site_col] = site
SampRec[software_col] = version_num
SiteRec[site_col] = site
SiteRec[software_col] = version_num
SampOut.append(SampRec)
SiteOut.append(SiteRec)
if data_model_num == 2:
pmag.magic_write(samp_file, SampOut, "er_samples")
pmag.magic_write(site_file, SiteOut, "er_sites")
else:
pmag.magic_write(samp_file, SampOut, "samples")
pmag.magic_write(site_file, SiteOut, "sites")
print("Sample info saved in ", samp_file)
print("Site info saved in ", site_file)
return True, samp_file
|
Convert HUJI sample file to MagIC file(s)
Parameters
----------
orient_file : str
input file name
meths : str
colon-delimited sampling methods, default FS-FD:SO-POM:SO-SUN
for more options, see info below
location : str
location name, default "unknown"
samp_con : str
sample/site naming convention, default '1', see info below
ignore_dip : bool
set sample az/dip to 0, default True
data_model_num : int
MagIC data model 2 or 3, default 3
samp_file : str
sample file name to output (default : samples.txt)
site_file : str
site file name to output (default : site.txt)
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, file name written)
Info
--------
Sampling method codes:
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
|
def matchBlocks(self, blocks, threshold=.5, *args, **kwargs):
"""
Partitions blocked data and generates a sequence of clusters,
where each cluster is a tuple of record ids
Keyword arguments:
blocks -- Sequence of tuples of records, where each tuple is a
set of records covered by a blocking predicate
threshold -- Number between 0 and 1 (default is .5). We will
only consider as duplicates record pairs as
duplicates if their estimated duplicate
likelihood is greater than the threshold.
Lowering the number will increase recall,
raising it will increase precision
"""
candidate_records = itertools.chain.from_iterable(self._blockedPairs(blocks))
matches = core.scoreDuplicates(candidate_records,
self.data_model,
self.classifier,
self.num_cores,
threshold=0)
logger.debug("matching done, begin clustering")
for cluster in self._cluster(matches, threshold, *args, **kwargs):
yield cluster
try:
match_file = matches.filename
del matches
os.remove(match_file)
except AttributeError:
pass
|
Partitions blocked data and generates a sequence of clusters,
where each cluster is a tuple of record ids
Keyword arguments:
blocks -- Sequence of tuples of records, where each tuple is a
set of records covered by a blocking predicate
threshold -- Number between 0 and 1 (default is .5). We will
only consider as duplicates record pairs as
duplicates if their estimated duplicate
likelihood is greater than the threshold.
Lowering the number will increase recall,
raising it will increase precision
|
def pre_run_cell(self, cellno, code):
"""Executes before the user-entered code in `ipython` is run. This
intercepts loops and other problematic code that would produce lots of
database entries and streamlines it to produce only a single entry.
Args:
cellno (int): the cell number that is about to be executed.
code (str): python source code that is about to be executed.
"""
#First, we look for loops and list/dict comprehensions in the code. Find
#the id of the latest cell that was executed.
self.cellid = cellno
#If there is a loop somewhere in the code, it could generate millions of
#database entries and make the notebook unusable.
import ast
if findloop(ast.parse(code)):
#Disable the acorn logging systems so that we don't pollute the
#database.
from acorn.logging.decoration import set_streamlining
set_streamlining(True)
#Create the pre-execute entry for the database.
from time import time
self.pre = {
"m": "loop",
"a": None,
"s": time(),
"r": None,
"c": code,
}
|
Executes before the user-entered code in `ipython` is run. This
intercepts loops and other problematic code that would produce lots of
database entries and streamlines it to produce only a single entry.
Args:
cellno (int): the cell number that is about to be executed.
code (str): python source code that is about to be executed.
|
def _from_deprecated_string(cls, serialized):
"""
Return an instance of `cls` parsed from its deprecated `serialized` form.
This will be called only if :meth:`OpaqueKey.from_string` is unable to
parse a key out of `serialized`, and only if `set_deprecated_fallback` has
been called to register a fallback class.
Args:
cls: The :class:`OpaqueKey` subclass.
serialized (unicode): A serialized :class:`OpaqueKey`, with namespace already removed.
Raises:
InvalidKeyError: Should be raised if `serialized` is not a valid serialized key
understood by `cls`.
"""
if serialized.count('/') != 2:
raise InvalidKeyError(cls, serialized)
return cls(*serialized.split('/'), deprecated=True)
|
Return an instance of `cls` parsed from its deprecated `serialized` form.
This will be called only if :meth:`OpaqueKey.from_string` is unable to
parse a key out of `serialized`, and only if `set_deprecated_fallback` has
been called to register a fallback class.
Args:
cls: The :class:`OpaqueKey` subclass.
serialized (unicode): A serialized :class:`OpaqueKey`, with namespace already removed.
Raises:
InvalidKeyError: Should be raised if `serialized` is not a valid serialized key
understood by `cls`.
|
def remodel_run(self, c=None, **global_optargs):
"""
Passes a connection from the connection pool so that we can call .run()
on a query without an explicit connection
"""
if not c:
with remodel.connection.get_conn() as conn:
return run(self, conn, **global_optargs)
else:
return run(self, c, **global_optargs)
|
Passes a connection from the connection pool so that we can call .run()
on a query without an explicit connection
|
def check_variable_names(self, ds):
"""
Ensures all variables have a standard_name set.
"""
msgs = []
count = 0
for k, v in ds.variables.items():
if 'standard_name' in v.ncattrs():
count += 1
else:
msgs.append("Variable '{}' missing standard_name attr".format(k))
return Result(BaseCheck.MEDIUM, (count, len(ds.variables)), 'Variable Names', msgs)
|
Ensures all variables have a standard_name set.
|
def joinRes(lstPrfRes, varPar, idxPos, inFormat='1D'):
"""Join results from different processing units (here cores).
Parameters
----------
lstPrfRes : list
Output of results from parallelization.
varPar : integer, positive
Number of cores that were used during parallelization
idxPos : integer, positive
List position index that we expect the results to be collected to have.
inFormat : string
Specifies whether input will be 1d or 2d.
Returns
-------
aryOut : numpy array
Numpy array with results collected from different cores
"""
if inFormat == '1D':
# initialize output array
aryOut = np.zeros((0,))
# gather arrays from different processing units
for idxRes in range(0, varPar):
aryOut = np.append(aryOut, lstPrfRes[idxRes][idxPos])
elif inFormat == '2D':
# initialize output array
aryOut = np.zeros((0, lstPrfRes[0][idxPos].shape[-1]))
# gather arrays from different processing units
for idxRes in range(0, varPar):
aryOut = np.concatenate((aryOut, lstPrfRes[idxRes][idxPos]),
axis=0)
return aryOut
|
Join results from different processing units (here cores).
Parameters
----------
lstPrfRes : list
Output of results from parallelization.
varPar : integer, positive
Number of cores that were used during parallelization
idxPos : integer, positive
List position index that we expect the results to be collected to have.
inFormat : string
Specifies whether input will be 1d or 2d.
Returns
-------
aryOut : numpy array
Numpy array with results collected from different cores
|
def ASHRAE_k(ID):
r'''Returns thermal conductivity of a building or insulating material
from a table in [1]_. Thermal conductivity is independent of temperature
here. Many entries in the table are listed for varying densities, but the
appropriate ID from the table must be selected to account for that.
Parameters
----------
ID : str
ID corresponding to a material in the dictionary `ASHRAE`
Returns
-------
k : float
Thermal conductivity of the material, [W/m/K]
Examples
--------
>>> ASHRAE_k(ID='Mineral fiber')
0.036
References
----------
.. [1] ASHRAE Handbook: Fundamentals. American Society of Heating,
Refrigerating and Air-Conditioning Engineers, Incorporated, 2013.
'''
values = ASHRAE[ID]
if values[2]:
return values[2]
else:
R = values[3]
t = values[4]/1000. # mm to m
return R_to_k(R, t)
|
r'''Returns thermal conductivity of a building or insulating material
from a table in [1]_. Thermal conductivity is independent of temperature
here. Many entries in the table are listed for varying densities, but the
appropriate ID from the table must be selected to account for that.
Parameters
----------
ID : str
ID corresponding to a material in the dictionary `ASHRAE`
Returns
-------
k : float
Thermal conductivity of the material, [W/m/K]
Examples
--------
>>> ASHRAE_k(ID='Mineral fiber')
0.036
References
----------
.. [1] ASHRAE Handbook: Fundamentals. American Society of Heating,
Refrigerating and Air-Conditioning Engineers, Incorporated, 2013.
|
def add_localedir_translations(self, localedir):
"""Merge translations from localedir."""
global _localedirs
if localedir in self.localedirs:
return
self.localedirs.append(localedir)
full_localedir = os.path.join(localedir, 'locale')
if os.path.exists(full_localedir):
translation = self._new_gnu_trans(full_localedir)
self.merge(translation)
|
Merge translations from localedir.
|
def with_(self, *relations):
"""
Set the relationships that should be eager loaded.
:return: The current Builder instance
:rtype: Builder
"""
if not relations:
return self
eagers = self._parse_relations(list(relations))
self._eager_load.update(eagers)
return self
|
Set the relationships that should be eager loaded.
:return: The current Builder instance
:rtype: Builder
|
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
|
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
|
def add_handler(self, message_type, handler):
"""Manage callbacks for message handlers."""
if message_type not in self._handlers:
self._handlers[message_type] = []
if handler not in self._handlers[message_type]:
self._handlers[message_type].append(handler)
|
Manage callbacks for message handlers.
|
def register_resource(mod, view, **kwargs):
"""Register the resource on the resource name or a custom url"""
resource_name = view.__name__.lower()[:-8]
endpoint = kwargs.get('endpoint', "{}_api".format(resource_name))
plural_resource_name = inflect.engine().plural(resource_name)
path = kwargs.get('url', plural_resource_name).strip('/')
url = '/{}'.format(path)
setattr(view, '_url', url) # need this for 201 location header
view_func = view.as_view(endpoint)
mod.add_url_rule(url, view_func=view_func,
methods=['GET', 'POST', 'OPTIONS'])
mod.add_url_rule('{}/<obj_id>'.format(url),
view_func=view_func,
methods=['GET', 'PATCH', 'PUT', 'DELETE', 'OPTIONS'])
|
Register the resource on the resource name or a custom url
|
def _write_bin(self, data, stream, byte_order):
'''
Write data to a binary stream.
'''
(len_t, val_t) = self.list_dtype(byte_order)
data = _np.asarray(data, dtype=val_t).ravel()
_write_array(stream, _np.array(data.size, dtype=len_t))
_write_array(stream, data)
|
Write data to a binary stream.
|
def _to_pypi(self, docs_base, release):
"""Upload to PyPI."""
url = None
with self._zipped(docs_base) as handle:
reply = requests.post(self.params['url'], auth=get_pypi_auth(), allow_redirects=False,
files=dict(content=(self.cfg.project.name + '.zip', handle, 'application/zip')),
data={':action': 'doc_upload', 'name': self.cfg.project.name})
if reply.status_code in range(200, 300):
notify.info("{status_code} {reason}".format(**vars(reply)))
elif reply.status_code == 301:
url = reply.headers['location']
else:
data = self.cfg.copy()
data.update(self.params)
data.update(vars(reply))
notify.error("{status_code} {reason} for POST to {url}".format(**data))
return url
|
Upload to PyPI.
|
def create_BIP122_uri(
chain_id: str, resource_type: str, resource_identifier: str
) -> URI:
"""
See: https://github.com/bitcoin/bips/blob/master/bip-0122.mediawiki
"""
if resource_type != BLOCK:
raise ValueError("Invalid resource_type. Must be one of 'block'")
elif not is_block_or_transaction_hash(resource_identifier):
raise ValueError(
"Invalid resource_identifier. Must be a hex encoded 32 byte value"
)
elif not is_block_or_transaction_hash(chain_id):
raise ValueError("Invalid chain_id. Must be a hex encoded 32 byte value")
return URI(
parse.urlunsplit(
[
"blockchain",
remove_0x_prefix(chain_id),
f"{resource_type}/{remove_0x_prefix(resource_identifier)}",
"",
"",
]
)
)
|
See: https://github.com/bitcoin/bips/blob/master/bip-0122.mediawiki
|
def story_node_add_arc_element_update_characters_locations(sender, instance, created, *args, **kwargs):
'''
If an arc element is added to a story element node, add any missing elements or locations.
'''
arc_node = ArcElementNode.objects.get(pk=instance.pk)
logger.debug('Scanning arc_node %s' % arc_node)
if arc_node.arc_element_type == 'root':
logger.debug("root node. skipping...")
else:
logger.debug('Checking arc node for story element relationship...')
if arc_node.story_element_node:
logger.debug('Found a story element node for arc element...')
# This change was initiated by the arc element node as opposed to the story node.
story_node = arc_node.story_element_node
if arc_node.assoc_characters.count() > 0:
logger.debug('Found %d characters to add...' % arc_node.assoc_characters.count())
for character in arc_node.assoc_characters.all():
story_node.assoc_characters.add(character)
if arc_node.assoc_locations.count() > 0:
logger.debug('Found %d locations to add...' % arc_node.assoc_locations.count())
for location in arc_node.assoc_locations.all():
story_node.assoc_locations.add(location)
|
If an arc element is added to a story element node, add any missing elements or locations.
|
def httprettified(test=None, allow_net_connect=True):
"""decorator for test functions
.. tip:: Also available under the alias :py:func:`httpretty.activate`
:param test: a callable
example usage with `nosetests <https://nose.readthedocs.io/en/latest/>`_
.. testcode::
import sure
from httpretty import httprettified
@httprettified
def test_using_nosetests():
httpretty.register_uri(
httpretty.GET,
'https://httpbin.org/ip'
)
response = requests.get('https://httpbin.org/ip')
response.json().should.equal({
"message": "HTTPretty :)"
})
example usage with `unittest module <https://docs.python.org/3/library/unittest.html>`_
.. testcode::
import unittest
from sure import expect
from httpretty import httprettified
@httprettified
class TestWithPyUnit(unittest.TestCase):
def test_httpbin(self):
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip')
response = requests.get('https://httpbin.org/ip')
expect(response.json()).to.equal({
"message": "HTTPretty :)"
})
"""
def decorate_unittest_TestCase_setUp(klass):
# Prefer addCleanup (added in python 2.7), but fall back
# to using tearDown if it isn't available
use_addCleanup = hasattr(klass, 'addCleanup')
original_setUp = (klass.setUp
if hasattr(klass, 'setUp')
else None)
def new_setUp(self):
httpretty.reset()
httpretty.enable(allow_net_connect)
if use_addCleanup:
self.addCleanup(httpretty.disable)
if original_setUp:
original_setUp(self)
klass.setUp = new_setUp
if not use_addCleanup:
original_tearDown = (klass.setUp
if hasattr(klass, 'tearDown')
else None)
def new_tearDown(self):
httpretty.disable()
httpretty.reset()
if original_tearDown:
original_tearDown(self)
klass.tearDown = new_tearDown
return klass
def decorate_test_methods(klass):
for attr in dir(klass):
if not attr.startswith('test_'):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
setattr(klass, attr, decorate_callable(attr_value))
return klass
def is_unittest_TestCase(klass):
try:
import unittest
return issubclass(klass, unittest.TestCase)
except ImportError:
return False
"A decorator for tests that use HTTPretty"
def decorate_class(klass):
if is_unittest_TestCase(klass):
return decorate_unittest_TestCase_setUp(klass)
return decorate_test_methods(klass)
def decorate_callable(test):
@functools.wraps(test)
def wrapper(*args, **kw):
with httprettized(allow_net_connect):
return test(*args, **kw)
return wrapper
if isinstance(test, ClassTypes):
return decorate_class(test)
elif callable(test):
return decorate_callable(test)
return decorate_callable
|
decorator for test functions
.. tip:: Also available under the alias :py:func:`httpretty.activate`
:param test: a callable
example usage with `nosetests <https://nose.readthedocs.io/en/latest/>`_
.. testcode::
import sure
from httpretty import httprettified
@httprettified
def test_using_nosetests():
httpretty.register_uri(
httpretty.GET,
'https://httpbin.org/ip'
)
response = requests.get('https://httpbin.org/ip')
response.json().should.equal({
"message": "HTTPretty :)"
})
example usage with `unittest module <https://docs.python.org/3/library/unittest.html>`_
.. testcode::
import unittest
from sure import expect
from httpretty import httprettified
@httprettified
class TestWithPyUnit(unittest.TestCase):
def test_httpbin(self):
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip')
response = requests.get('https://httpbin.org/ip')
expect(response.json()).to.equal({
"message": "HTTPretty :)"
})
|
def parse_questions(raw_page):
"""Parse a StackExchange API raw response.
The method parses the API response retrieving the
questions from the received items
:param items: items from where to parse the questions
:returns: a generator of questions
"""
raw_questions = json.loads(raw_page)
questions = raw_questions['items']
for question in questions:
yield question
|
Parse a StackExchange API raw response.
The method parses the API response retrieving the
questions from the received items
:param items: items from where to parse the questions
:returns: a generator of questions
|
def unroll_state_saver(input_layer, name, state_shapes, template, lengths=None):
"""Unrolls the given function with state taken from the state saver.
Args:
input_layer: The input sequence.
name: The name of this layer.
state_shapes: A list of shapes, one for each state variable.
template: A template with unbound variables for input and states that
returns a RecurrentResult.
lengths: The length of each item in the batch. If provided, use this to
truncate computation.
Returns:
A sequence from applying the given template to each item in the input
sequence.
"""
state_saver = input_layer.bookkeeper.recurrent_state
state_names = [STATE_NAME % name + '_%d' % i
for i in xrange(len(state_shapes))]
if hasattr(state_saver, 'add_state'):
for state_name, state_shape in zip(state_names, state_shapes):
initial_state = tf.zeros(state_shape[1:], dtype=input_layer.dtype)
state_saver.add_state(state_name,
initial_state=initial_state,
batch_size=state_shape[0])
if lengths is not None:
max_length = tf.reduce_max(lengths)
else:
max_length = None
results = []
prev_states = []
for state_name, state_shape in zip(state_names, state_shapes):
my_shape = list(state_shape)
my_shape[0] = -1
prev_states.append(tf.reshape(state_saver.state(state_name), my_shape))
my_parameters = None
for i, layer in enumerate(input_layer.sequence):
with input_layer.g.name_scope('unroll_%00d' % i):
if i > 0 and max_length is not None:
# TODO(eiderman): Right now the everything after length is undefined.
# If we can efficiently propagate the last result to the end, then
# models with only a final output would require a single softmax
# computation.
# pylint: disable=cell-var-from-loop
result = control_flow_ops.cond(
i < max_length,
lambda: unwrap_all(*template(layer, *prev_states).flatten()),
lambda: unwrap_all(out, *prev_states))
out = result[0]
prev_states = result[1:]
else:
out, prev_states = template(layer, *prev_states)
if my_parameters is None:
my_parameters = out.layer_parameters
results.append(prettytensor.unwrap(out))
updates = [state_saver.save_state(state_name, prettytensor.unwrap(prev_state))
for state_name, prev_state in zip(state_names, prev_states)]
# Set it up so that update is evaluated when the result of this method is
# evaluated by injecting a dependency on an arbitrary result.
with tf.control_dependencies(updates):
results[0] = tf.identity(results[0])
return input_layer.with_sequence(results, parameters=my_parameters)
|
Unrolls the given function with state taken from the state saver.
Args:
input_layer: The input sequence.
name: The name of this layer.
state_shapes: A list of shapes, one for each state variable.
template: A template with unbound variables for input and states that
returns a RecurrentResult.
lengths: The length of each item in the batch. If provided, use this to
truncate computation.
Returns:
A sequence from applying the given template to each item in the input
sequence.
|
def mito(args):
"""
%prog mito chrM.fa input.bam
Identify mitochondrial deletions.
"""
p = OptionParser(mito.__doc__)
p.set_aws_opts(store="hli-mv-data-science/htang/mito-deletions")
p.add_option("--realignonly", default=False, action="store_true",
help="Realign only")
p.add_option("--svonly", default=False, action="store_true",
help="Run Realign => SV calls only")
p.add_option("--support", default=1, type="int",
help="Minimum number of supporting reads")
p.set_home("speedseq", default="/mnt/software/speedseq/bin")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
chrMfa, bamfile = args
store = opts.output_path
cleanup = not opts.nocleanup
if not op.exists(chrMfa):
logging.debug("File `{}` missing. Exiting.".format(chrMfa))
return
chrMfai = chrMfa + ".fai"
if not op.exists(chrMfai):
cmd = "samtools index {}".format(chrMfa)
sh(cmd)
if not bamfile.endswith(".bam"):
bamfiles = [x.strip() for x in open(bamfile)]
else:
bamfiles = [bamfile]
if store:
computed = ls_s3(store)
computed = [op.basename(x).split('.')[0] for x in computed if
x.endswith(".depth")]
remaining_samples = [x for x in bamfiles
if op.basename(x).split(".")[0] not in computed]
logging.debug("Already computed on `{}`: {}".
format(store, len(bamfiles) - len(remaining_samples)))
bamfiles = remaining_samples
logging.debug("Total samples: {}".format(len(bamfiles)))
for bamfile in bamfiles:
run_mito(chrMfa, bamfile, opts,
realignonly=opts.realignonly,
svonly=opts.svonly,
store=store, cleanup=cleanup)
|
%prog mito chrM.fa input.bam
Identify mitochondrial deletions.
|
def getMeta(self, uri):
"""Return meta information about an action. Cache the result as specified by the server"""
action = urlparse(uri).path
mediaKey = self.cacheKey + '_meta_' + action
mediaKey = mediaKey.replace(' ', '__')
meta = cache.get(mediaKey, None)
# Nothing found -> Retrieve it from the server and cache it
if not meta:
r = self.doQuery('meta/' + uri)
if r.status_code == 200: # Get the content if there is not problem. If there is, template will stay to None
meta = r.json()
if 'expire' not in r.headers:
expire = 5 * 60 # 5 minutes of cache if the server didn't specified anything
else:
expire = int((parser.parse(r.headers['expire']) - datetime.datetime.now(tzutc())).total_seconds()) # Use the server value for cache
if expire > 0: # Do the server want us to cache ?
cache.set(mediaKey, meta, expire)
return meta
|
Return meta information about an action. Cache the result as specified by the server
|
def get_api_root_view(self, api_urls=None):
"""
Return a basic root view.
"""
api_root_dict = OrderedDict()
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
class APIRootView(views.APIView):
_ignore_model_permissions = True
exclude_from_schema = True
def get(self, request, *args, **kwargs):
# Return a plain {"name": "hyperlink"} response.
ret = OrderedDict()
namespace = request.resolver_match.namespace
for key, url_name in sorted(api_root_dict.items(), key=itemgetter(0)):
if namespace:
url_name = namespace + ':' + url_name
try:
ret[key] = reverse(
url_name,
args=args,
kwargs=kwargs,
request=request,
format=kwargs.get('format', None)
)
except NoReverseMatch:
# Don't bail out if eg. no list routes exist, only detail routes.
continue
return Response(ret)
return APIRootView.as_view()
|
Return a basic root view.
|
def should_skip(filename, config, path='/'):
"""Returns True if the file should be skipped based on the passed in settings."""
for skip_path in config['skip']:
if posixpath.abspath(posixpath.join(path, filename)) == posixpath.abspath(skip_path.replace('\\', '/')):
return True
position = os.path.split(filename)
while position[1]:
if position[1] in config['skip']:
return True
position = os.path.split(position[0])
for glob in config['skip_glob']:
if fnmatch.fnmatch(filename, glob):
return True
return False
|
Returns True if the file should be skipped based on the passed in settings.
|
def p_expr_list_assign(p):
'expr : LIST LPAREN assignment_list RPAREN EQUALS expr'
p[0] = ast.ListAssignment(p[3], p[6], lineno=p.lineno(1))
|
expr : LIST LPAREN assignment_list RPAREN EQUALS expr
|
def get_parent_id(chebi_id):
'''Returns parent id'''
if len(__PARENT_IDS) == 0:
__parse_compounds()
return __PARENT_IDS[chebi_id] if chebi_id in __PARENT_IDS else float('NaN')
|
Returns parent id
|
def libvlc_video_get_size(p_mi, num):
'''Get the pixel dimensions of a video.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px pixel width, py pixel height.
'''
f = _Cfunctions.get('libvlc_video_get_size', None) or \
_Cfunction('libvlc_video_get_size', ((1,), (1,), (2,), (2,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
return f(p_mi, num)
|
Get the pixel dimensions of a video.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px pixel width, py pixel height.
|
def option(self, key, value=None, **kwargs):
"""Creates a new option inside a section
Args:
key (str): key of the option
value (str or None): value of the option
**kwargs: are passed to the constructor of :class:`Option`
Returns:
self for chaining
"""
if not isinstance(self._container, Section):
raise ValueError("Options can only be added inside a section!")
option = Option(key, value, container=self._container, **kwargs)
option.value = value
self._container.structure.insert(self._idx, option)
self._idx += 1
return self
|
Creates a new option inside a section
Args:
key (str): key of the option
value (str or None): value of the option
**kwargs: are passed to the constructor of :class:`Option`
Returns:
self for chaining
|
def load_indexes(self):
"""Add the proper indexes to the scout instance.
All indexes are specified in scout/constants/indexes.py
If this method is utilised when new indexes are defined those should be added
"""
for collection_name in INDEXES:
existing_indexes = self.indexes(collection_name)
indexes = INDEXES[collection_name]
for index in indexes:
index_name = index.document.get('name')
if index_name in existing_indexes:
LOG.info("Deleting old index: %s" % index_name)
self.db[collection_name].drop_index(index_name)
LOG.info("creating indexes for {0} collection: {1}".format(
collection_name,
', '.join([index.document.get('name') for index in indexes])
))
self.db[collection_name].create_indexes(indexes)
|
Add the proper indexes to the scout instance.
All indexes are specified in scout/constants/indexes.py
If this method is utilised when new indexes are defined those should be added
|
def _stripe_object_to_subscription_items(cls, target_cls, data, subscription):
"""
Retrieves SubscriptionItems for a subscription.
If the subscription item doesn't exist already then it is created.
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: ``SubscriptionItem``
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param invoice: The invoice object that should hold the invoice items.
:type invoice: ``djstripe.models.Subscription``
"""
items = data.get("items")
if not items:
return []
subscriptionitems = []
for item_data in items.get("data", []):
item, _ = target_cls._get_or_create_from_stripe_object(item_data, refetch=False)
subscriptionitems.append(item)
return subscriptionitems
|
Retrieves SubscriptionItems for a subscription.
If the subscription item doesn't exist already then it is created.
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: ``SubscriptionItem``
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param invoice: The invoice object that should hold the invoice items.
:type invoice: ``djstripe.models.Subscription``
|
def get_input_list(self):
"""
Description:
Get input list
Returns an ordered list of all available input keys and names
"""
inputs = [' '] * len(self.command['input'])
for key in self.command['input']:
inputs[self.command['input'][key]['order']] = {"key":key, "name":self.command['input'][key]['name']}
return inputs
|
Description:
Get input list
Returns an ordered list of all available input keys and names
|
def support_in_progress_warcs():
'''
Monkey-patch pywb.warc.pathresolvers.PrefixResolver to include warcs still
being written to (warcs having ".open" suffix). This way if a cdx entry
references foo.warc.gz, pywb will try both foo.warc.gz and
foo.warc.gz.open.
'''
_orig_prefix_resolver_call = pywb.warc.pathresolvers.PrefixResolver.__call__
def _prefix_resolver_call(self, filename, cdx=None):
raw_results = _orig_prefix_resolver_call(self, filename, cdx)
results = []
for warc_path in raw_results:
results.append(warc_path)
results.append('%s.open' % warc_path)
return results
pywb.warc.pathresolvers.PrefixResolver.__call__ = _prefix_resolver_call
|
Monkey-patch pywb.warc.pathresolvers.PrefixResolver to include warcs still
being written to (warcs having ".open" suffix). This way if a cdx entry
references foo.warc.gz, pywb will try both foo.warc.gz and
foo.warc.gz.open.
|
def get_package(self):
"""Get the URL or sandbox to release.
"""
directory = self.directory
develop = self.develop
scmtype = self.scmtype
self.scm = self.scms.get_scm(scmtype, directory)
if self.scm.is_valid_url(directory):
directory = self.urlparser.abspath(directory)
self.remoteurl = directory
self.isremote = self.push = True
else:
directory = abspath(expanduser(directory))
self.isremote = False
self.scm.check_valid_sandbox(directory)
self.setuptools.check_valid_package(directory)
name, version = self.setuptools.get_package_info(directory, develop)
print('Releasing', name, version)
if not self.skipcommit:
if self.scm.is_dirty_sandbox(directory):
self.scm.commit_sandbox(directory, name, version, self.push)
|
Get the URL or sandbox to release.
|
def nvmlDeviceGetTemperature(handle, sensor):
r"""
/**
* Retrieves the current temperature readings for the device, in degrees C.
*
* For all products.
*
* See \ref nvmlTemperatureSensors_t for details on available temperature sensors.
*
* @param device The identifier of the target device
* @param sensorType Flag that indicates which sensor reading to retrieve
* @param temp Reference in which to return the temperature reading
*
* @return
* - \ref NVML_SUCCESS if \a temp has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetTemperature
"""
c_temp = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTemperature")
ret = fn(handle, _nvmlTemperatureSensors_t(sensor), byref(c_temp))
_nvmlCheckReturn(ret)
return bytes_to_str(c_temp.value)
|
r"""
/**
* Retrieves the current temperature readings for the device, in degrees C.
*
* For all products.
*
* See \ref nvmlTemperatureSensors_t for details on available temperature sensors.
*
* @param device The identifier of the target device
* @param sensorType Flag that indicates which sensor reading to retrieve
* @param temp Reference in which to return the temperature reading
*
* @return
* - \ref NVML_SUCCESS if \a temp has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetTemperature
|
def rangeChange(self, pw, ranges):
"""Adjusts the stimulus signal to keep it at the top of a plot,
after any ajustment to the axes ranges takes place.
This is a slot for the undocumented pyqtgraph signal sigRangeChanged.
From what I can tell the arguments are:
:param pw: reference to the emitting object (plot widget in my case)
:type pw: object
:param ranges: I am only interested when this turns out to be a nested list of axis bounds
:type ranges: object
"""
if hasattr(ranges, '__iter__'):
# adjust the stim signal so that it falls in the correct range
yrange_size = ranges[1][1] - ranges[1][0]
stim_x, stim_y = self.stimPlot.getData()
if stim_y is not None:
stim_height = yrange_size*STIM_HEIGHT
# take it to 0
stim_y = stim_y - np.amin(stim_y)
# normalize
if np.amax(stim_y) != 0:
stim_y = stim_y/np.amax(stim_y)
# scale for new size
stim_y = stim_y*stim_height
# raise to right place in plot
stim_y = stim_y + (ranges[1][1] - (stim_height*1.1 + (stim_height*0.2)))
self.stimPlot.setData(stim_x, stim_y)
# rmax = self.rasterTop*yrange_size + ranges[1][0]
# rmin = self.rasterBottom*yrange_size + ranges[1][0]
self.updateRasterBounds()
|
Adjusts the stimulus signal to keep it at the top of a plot,
after any ajustment to the axes ranges takes place.
This is a slot for the undocumented pyqtgraph signal sigRangeChanged.
From what I can tell the arguments are:
:param pw: reference to the emitting object (plot widget in my case)
:type pw: object
:param ranges: I am only interested when this turns out to be a nested list of axis bounds
:type ranges: object
|
def rollback(self):
"""Undoes the uninstall by moving stashed files back."""
for p in self._moves:
logging.info("Moving to %s\n from %s", *p)
for new_path, path in self._moves:
try:
logger.debug('Replacing %s from %s', new_path, path)
if os.path.isfile(new_path):
os.unlink(new_path)
elif os.path.isdir(new_path):
rmtree(new_path)
renames(path, new_path)
except OSError as ex:
logger.error("Failed to restore %s", new_path)
logger.debug("Exception: %s", ex)
self.commit()
|
Undoes the uninstall by moving stashed files back.
|
def add_user_js(self, js_list):
""" Adds supplementary user javascript files to the presentation. The
``js_list`` arg can be either a ``list`` or a string.
"""
if isinstance(js_list, string_types):
js_list = [js_list]
for js_path in js_list:
if js_path and js_path not in self.user_js:
if js_path.startswith("http:"):
self.user_js.append({
'path_url': js_path,
'contents': '',
})
elif not os.path.exists(js_path):
raise IOError('%s user js file not found' % (js_path,))
else:
with codecs.open(js_path,
encoding=self.encoding) as js_file:
self.user_js.append({
'path_url': utils.get_path_url(js_path,
self.relative),
'contents': js_file.read(),
})
|
Adds supplementary user javascript files to the presentation. The
``js_list`` arg can be either a ``list`` or a string.
|
def install(shell=None, prog_name=None, env_name=None, path=None, append=None, extra_env=None):
"""Install the completion
Parameters
----------
shell : Shell
The shell type targeted. It will be guessed with get_auto_shell() if the value is None (Default value = None)
prog_name : str
The program name on the command line. It will be automatically computed if the value is None
(Default value = None)
env_name : str
The environment variable name used to control the completion. It will be automatically computed if the value is
None (Default value = None)
path : str
The installation path of the code to be evaluated by the shell. The standard installation path is used if the
value is None (Default value = None)
append : bool
Whether to append the content to the file or to override it. The default behavior depends on the shell type
(Default value = None)
extra_env : dict
A set of environment variables and their values to be added to the generated code (Default value = None)
"""
prog_name = prog_name or click.get_current_context().find_root().info_name
shell = shell or get_auto_shell()
if append is None and path is not None:
append = True
if append is not None:
mode = 'a' if append else 'w'
else:
mode = None
if shell == 'fish':
path = path or os.path.expanduser('~') + '/.config/fish/completions/%s.fish' % prog_name
mode = mode or 'w'
elif shell == 'bash':
path = path or os.path.expanduser('~') + '/.bash_completion'
mode = mode or 'a'
elif shell == 'zsh':
ohmyzsh = os.path.expanduser('~') + '/.oh-my-zsh'
if os.path.exists(ohmyzsh):
path = path or ohmyzsh + '/completions/_%s' % prog_name
mode = mode or 'w'
else:
path = path or os.path.expanduser('~') + '/.zshrc'
mode = mode or 'a'
elif shell == 'powershell':
subprocess.check_call(['powershell', 'Set-ExecutionPolicy Unrestricted -Scope CurrentUser'])
path = path or subprocess.check_output(['powershell', '-NoProfile', 'echo $profile']).strip() if install else ''
mode = mode or 'a'
else:
raise click.ClickException('%s is not supported.' % shell)
if append is not None:
mode = 'a' if append else 'w'
else:
mode = mode
d = os.path.dirname(path)
if not os.path.exists(d):
os.makedirs(d)
f = open(path, mode)
f.write(get_code(shell, prog_name, env_name, extra_env))
f.write("\n")
f.close()
return shell, path
|
Install the completion
Parameters
----------
shell : Shell
The shell type targeted. It will be guessed with get_auto_shell() if the value is None (Default value = None)
prog_name : str
The program name on the command line. It will be automatically computed if the value is None
(Default value = None)
env_name : str
The environment variable name used to control the completion. It will be automatically computed if the value is
None (Default value = None)
path : str
The installation path of the code to be evaluated by the shell. The standard installation path is used if the
value is None (Default value = None)
append : bool
Whether to append the content to the file or to override it. The default behavior depends on the shell type
(Default value = None)
extra_env : dict
A set of environment variables and their values to be added to the generated code (Default value = None)
|
def _get_bnl(self, C_AMP, vs30):
"""
Gets the nonlinear term, given by equation 8 of Atkinson & Boore 2006
"""
# Default case 8d
bnl = np.zeros_like(vs30)
if np.all(vs30 >= self.CONSTS["Vref"]):
return bnl
# Case 8a
bnl[vs30 < self.CONSTS["v1"]] = C_AMP["b1sa"]
# Cade 8b
idx = np.logical_and(vs30 > self.CONSTS["v1"],
vs30 <= self.CONSTS["v2"])
if np.any(idx):
bnl[idx] = (C_AMP["b1sa"] - C_AMP["b2sa"]) *\
(np.log(vs30[idx] / self.CONSTS["v2"]) /
np.log(self.CONSTS["v1"] / self.CONSTS["v2"])) + C_AMP["b2sa"]
# Case 8c
idx = np.logical_and(vs30 > self.CONSTS["v2"],
vs30 < self.CONSTS["Vref"])
if np.any(idx):
bnl[idx] = C_AMP["b2sa"] *\
np.log(vs30[idx] / self.CONSTS["Vref"]) /\
np.log(self.CONSTS["v2"] / self.CONSTS["Vref"])
return bnl
|
Gets the nonlinear term, given by equation 8 of Atkinson & Boore 2006
|
def from_ros_pose_msg(pose_msg,
from_frame='unassigned',
to_frame='world'):
"""Creates a RigidTransform from a ROS pose msg.
Parameters
----------
pose_msg : :obj:`geometry_msgs.msg.Pose`
ROS pose message
"""
quaternion = np.array([pose_msg.orientation.w,
pose_msg.orientation.x,
pose_msg.orientation.y,
pose_msg.orientation.z])
position = np.array([pose_msg.position.x,
pose_msg.position.y,
pose_msg.position.z])
pose = RigidTransform(rotation=quaternion,
translation=position,
from_frame=from_frame,
to_frame=to_frame)
return pose
|
Creates a RigidTransform from a ROS pose msg.
Parameters
----------
pose_msg : :obj:`geometry_msgs.msg.Pose`
ROS pose message
|
def parse_nem_file(nem_file) -> NEMFile:
""" Parse NEM file and return meter readings named tuple """
reader = csv.reader(nem_file, delimiter=',')
return parse_nem_rows(reader, file_name=nem_file)
|
Parse NEM file and return meter readings named tuple
|
def _report_problem(self, problem, level=logging.ERROR):
'''Report a given problem'''
problem = self.basename + ': ' + problem
if self._logger.isEnabledFor(level):
self._problematic = True
if self._check_raises:
raise DapInvalid(problem)
self._logger.log(level, problem)
|
Report a given problem
|
def read_namespaced_pod_preset(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_pod_preset # noqa: E501
read the specified PodPreset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_preset(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
|
read_namespaced_pod_preset # noqa: E501
read the specified PodPreset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_preset(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
|
def wait_for_edge(channel, trigger, timeout=-1):
"""
This function is designed to block execution of your program until an edge
is detected.
:param channel: the channel based on the numbering system you have specified
(:py:attr:`GPIO.BOARD`, :py:attr:`GPIO.BCM` or :py:attr:`GPIO.SUNXI`).
:param trigger: The event to detect, one of: :py:attr:`GPIO.RISING`,
:py:attr:`GPIO.FALLING` or :py:attr:`GPIO.BOTH`.
:param timeout: (optional) TODO
In other words, the polling example above that waits for a button press
could be rewritten as:
.. code:: python
GPIO.wait_for_edge(channel, GPIO.RISING)
Note that you can detect edges of type :py:attr:`GPIO.RISING`,
:py:attr`GPIO.FALLING` or :py:attr:`GPIO.BOTH`. The advantage of doing it
this way is that it uses a negligible amount of CPU, so there is plenty left
for other tasks.
If you only want to wait for a certain length of time, you can use the
timeout parameter:
.. code:: python
# wait for up to 5 seconds for a rising edge (timeout is in milliseconds)
channel = GPIO.wait_for_edge(channel, GPIO_RISING, timeout=5000)
if channel is None:
print('Timeout occurred')
else:
print('Edge detected on channel', channel)
"""
_check_configured(channel, direction=IN)
pin = get_gpio_pin(_mode, channel)
if event.blocking_wait_for_edge(pin, trigger, timeout) is not None:
return channel
|
This function is designed to block execution of your program until an edge
is detected.
:param channel: the channel based on the numbering system you have specified
(:py:attr:`GPIO.BOARD`, :py:attr:`GPIO.BCM` or :py:attr:`GPIO.SUNXI`).
:param trigger: The event to detect, one of: :py:attr:`GPIO.RISING`,
:py:attr:`GPIO.FALLING` or :py:attr:`GPIO.BOTH`.
:param timeout: (optional) TODO
In other words, the polling example above that waits for a button press
could be rewritten as:
.. code:: python
GPIO.wait_for_edge(channel, GPIO.RISING)
Note that you can detect edges of type :py:attr:`GPIO.RISING`,
:py:attr`GPIO.FALLING` or :py:attr:`GPIO.BOTH`. The advantage of doing it
this way is that it uses a negligible amount of CPU, so there is plenty left
for other tasks.
If you only want to wait for a certain length of time, you can use the
timeout parameter:
.. code:: python
# wait for up to 5 seconds for a rising edge (timeout is in milliseconds)
channel = GPIO.wait_for_edge(channel, GPIO_RISING, timeout=5000)
if channel is None:
print('Timeout occurred')
else:
print('Edge detected on channel', channel)
|
def list_of_list(self):
"""
This will convert the data from a list of dict to a list of list
:return: list of dict
"""
ret = [[row.get(key, '') for key in self._col_names] for row in self]
return ReprListList(ret, col_names=self._col_names,
col_types=self._col_types,
width_limit=self._width_limit,
digits=self._digits,
convert_unicode=self._convert_unicode)
|
This will convert the data from a list of dict to a list of list
:return: list of dict
|
def transform(row, table):
'Transform row "link" into full URL and add "state" based on "name"'
data = row._asdict()
data["link"] = urljoin("https://pt.wikipedia.org", data["link"])
data["name"], data["state"] = regexp_city_state.findall(data["name"])[0]
return data
|
Transform row "link" into full URL and add "state" based on "name"
|
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._filename):
old_umask = os.umask(0o177)
try:
open(self._filename, 'a+b').close()
finally:
os.umask(old_umask)
|
Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
|
def push_stream(cache, user_id, stream):
"""
Push a stream onto the stream stack in cache.
:param cache: werkzeug BasicCache-like object
:param user_id: id of user, used as key in cache
:param stream: stream object to push onto stack
:return: True on successful update,
False if failed to update,
None if invalid input was given
"""
stack = cache.get(user_id)
if stack is None:
stack = []
if stream:
stack.append(stream)
return cache.set(user_id, stack)
return None
|
Push a stream onto the stream stack in cache.
:param cache: werkzeug BasicCache-like object
:param user_id: id of user, used as key in cache
:param stream: stream object to push onto stack
:return: True on successful update,
False if failed to update,
None if invalid input was given
|
def cancel_ride(self, ride_id, cancel_confirmation_token=None):
"""Cancel an ongoing ride on behalf of a user.
Params
ride_id (str)
The unique ID of the Ride Request.
cancel_confirmation_token (str)
Optional string containing the cancellation confirmation token.
Returns
(Response)
A Response object with successful status_code
if ride was canceled.
"""
args = {
"cancel_confirmation_token": cancel_confirmation_token
}
endpoint = 'v1/rides/{}/cancel'.format(ride_id)
return self._api_call('POST', endpoint, args=args)
|
Cancel an ongoing ride on behalf of a user.
Params
ride_id (str)
The unique ID of the Ride Request.
cancel_confirmation_token (str)
Optional string containing the cancellation confirmation token.
Returns
(Response)
A Response object with successful status_code
if ride was canceled.
|
def _initialize_from_dict(self, data):
"""
Loads serializer from a request object
"""
self._json = data
self._validate()
for name, value in self._json.items():
if name in self._properties:
if '$ref' in self._properties[name]:
if 'decimal' in self._properties[name]['$ref']:
value = Decimal(value)
# applying proper formatting when required
if 'format' in self._properties[name]:
format = self._properties[name]['format']
if 'date-time' == format:
value = utils.string_to_datetime(value)
elif 'date' == format:
value = utils.string_to_date(value)
setattr(self, name, value)
|
Loads serializer from a request object
|
def create_serving_logger() -> Logger:
"""Create a logger for serving.
This creates a logger named quart.serving.
"""
logger = getLogger('quart.serving')
if logger.level == NOTSET:
logger.setLevel(INFO)
logger.addHandler(serving_handler)
return logger
|
Create a logger for serving.
This creates a logger named quart.serving.
|
def listen_for_events():
"""Pubsub event listener
Listen for events in the pubsub bus and calls the process function
when somebody comes to play.
"""
import_event_modules()
conn = redis_connection.get_connection()
pubsub = conn.pubsub()
pubsub.subscribe("eventlib")
for message in pubsub.listen():
if message['type'] != 'message':
continue
data = loads(message["data"])
if 'name' in data:
event_name = data.pop('name')
process_external(event_name, data)
|
Pubsub event listener
Listen for events in the pubsub bus and calls the process function
when somebody comes to play.
|
def get_content_hashes(image_path,
level=None,
regexp=None,
include_files=None,
tag_root=True,
level_filter=None,
skip_files=None,
version=None,
include_sizes=True):
'''get_content_hashes is like get_image_hash, but it returns a complete dictionary
of file names (keys) and their respective hashes (values). This function is intended
for more research purposes and was used to generate the levels in the first place.
If include_sizes is True, we include a second data structure with sizes
'''
if level_filter is not None:
file_filter = level_filter
elif level is None:
file_filter = get_level("REPLICATE",version=version,
skip_files=skip_files,
include_files=include_files)
else:
file_filter = get_level(level,version=version,
skip_files=skip_files,
include_files=include_files)
file_obj,tar = get_image_tar(image_path)
results = extract_guts(image_path=image_path,
tar=tar,
file_filter=file_filter,
tag_root=tag_root,
include_sizes=include_sizes)
delete_image_tar(file_obj, tar)
return results
|
get_content_hashes is like get_image_hash, but it returns a complete dictionary
of file names (keys) and their respective hashes (values). This function is intended
for more research purposes and was used to generate the levels in the first place.
If include_sizes is True, we include a second data structure with sizes
|
def inverse_mod( a, m ):
"""Inverse of a mod m."""
if a < 0 or m <= a: a = a % m
# From Ferguson and Schneier, roughly:
c, d = a, m
uc, vc, ud, vd = 1, 0, 0, 1
while c != 0:
q, c, d = divmod( d, c ) + ( c, )
uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc
# At this point, d is the GCD, and ud*a+vd*m = d.
# If d == 1, this means that ud is a inverse.
assert d == 1
if ud > 0: return ud
else: return ud + m
|
Inverse of a mod m.
|
def fasta(self):
"""
Create FASTA files of the PointFinder results to be fed into PointFinder
"""
logging.info('Extracting FASTA sequences matching PointFinder database')
for sample in self.runmetadata.samples:
# Ensure that there are sequence data to extract from the GenObject
if GenObject.isattr(sample[self.analysistype], 'sequences'):
# Set the name of the FASTA file
sample[self.analysistype].pointfinderfasta = \
os.path.join(sample[self.analysistype].outputdir,
'{seqid}_pointfinder.fasta'.format(seqid=sample.name))
# Create a list to store all the SeqRecords created
sequences = list()
with open(sample[self.analysistype].pointfinderfasta, 'w') as fasta:
for gene, sequence in sample[self.analysistype].sequences.items():
# Create a SeqRecord using a Seq() of the sequence - both SeqRecord and Seq are from BioPython
seq = SeqRecord(seq=Seq(sequence),
id=gene,
name=str(),
description=str())
sequences.append(seq)
# Write all the SeqRecords to file
SeqIO.write(sequences, fasta, 'fasta')
|
Create FASTA files of the PointFinder results to be fed into PointFinder
|
async def addRelation(self, endpoint1, endpoint2):
"""
:param endpoint1 string:
:param endpoint2 string:
Endpoint1 and Endpoint2 hold relation endpoints in the
"application:interface" form, where the application is always a
placeholder pointing to an application change, and the interface is
optional. Examples are "$deploy-42:web" or just "$deploy-42".
"""
endpoints = [endpoint1, endpoint2]
# resolve indirect references
for i in range(len(endpoints)):
parts = endpoints[i].split(':')
parts[0] = self.resolve(parts[0])
endpoints[i] = ':'.join(parts)
log.info('Relating %s <-> %s', *endpoints)
return await self.model.add_relation(*endpoints)
|
:param endpoint1 string:
:param endpoint2 string:
Endpoint1 and Endpoint2 hold relation endpoints in the
"application:interface" form, where the application is always a
placeholder pointing to an application change, and the interface is
optional. Examples are "$deploy-42:web" or just "$deploy-42".
|
def transformer_wikitext103_l4k_memory_v0():
"""HParams for training languagemodel_wikitext103_l4k with memory."""
hparams = transformer_wikitext103_l4k_v0()
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = 64
hparams.split_targets_strided_training = True
hparams.add_hparam("memory_type", "transformer_xl")
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length)) # 262144
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
hparams.add_hparam("unconditional", True)
hparams.add_hparam("recurrent_memory_batch_size", 0) # 0 = try to guess
# By default, cache one chunk only (like Transformer-XL)
hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length)
return hparams
|
HParams for training languagemodel_wikitext103_l4k with memory.
|
def parsed_stack(self):
"""The parsed_stack property.
Returns:
(list). the property value. (defaults to: [])
"""
if 'parsedStack' in self._values:
return self._values['parsedStack']
self._values['parsedStack'] = copy.deepcopy(self._defaults['parsedStack'])
return self._values['parsedStack']
|
The parsed_stack property.
Returns:
(list). the property value. (defaults to: [])
|
def nucmer(args):
"""
%prog nucmer ref.fasta query.fasta
Run NUCMER using query against reference. Parallel implementation derived
from: <https://github.com/fritzsedlazeck/sge_mummer>
"""
from itertools import product
from jcvi.apps.grid import MakeManager
from jcvi.formats.base import split
p = OptionParser(nucmer.__doc__)
p.add_option("--chunks", type="int",
help="Split both query and subject into chunks")
p.set_params(prog="nucmer", params="-l 100 -c 500")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ref, query = args
cpus = opts.cpus
nrefs = nqueries = opts.chunks or int(cpus ** .5)
refdir = ref.split(".")[0] + "-outdir"
querydir = query.split(".")[0] + "-outdir"
reflist = split([ref, refdir, str(nrefs)]).names
querylist = split([query, querydir, str(nqueries)]).names
mm = MakeManager()
for i, (r, q) in enumerate(product(reflist, querylist)):
pf = "{0:04d}".format(i)
cmd = "nucmer -maxmatch"
cmd += " {0}".format(opts.extra)
cmd += " {0} {1} -p {2}".format(r, q, pf)
deltafile = pf + ".delta"
mm.add((r, q), deltafile, cmd)
print(cmd)
mm.write()
|
%prog nucmer ref.fasta query.fasta
Run NUCMER using query against reference. Parallel implementation derived
from: <https://github.com/fritzsedlazeck/sge_mummer>
|
def _metrics_get_endpoints(options):
""" Determine the start and end dates based on user-supplied options. """
if bool(options.start) ^ bool(options.end):
log.error('--start and --end must be specified together')
sys.exit(1)
if options.start and options.end:
start = options.start
end = options.end
else:
end = datetime.utcnow()
start = end - timedelta(options.days)
return start, end
|
Determine the start and end dates based on user-supplied options.
|
def import_cert(name,
cert_format=_DEFAULT_FORMAT,
context=_DEFAULT_CONTEXT,
store=_DEFAULT_STORE,
exportable=True,
password='',
saltenv='base'):
'''
Import the certificate file into the given certificate store.
:param str name: The path of the certificate file to import.
:param str cert_format: The certificate format. Specify 'cer' for X.509, or
'pfx' for PKCS #12.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
:param bool exportable: Mark the certificate as exportable. Only applicable
to pfx format.
:param str password: The password of the certificate. Only applicable to pfx
format. Note that if used interactively, the password will be seen by all minions.
To protect the password, use a state and get the password from pillar.
:param str saltenv: The environment the file resides in.
:return: A boolean representing whether all changes succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_pki.import_cert name='salt://cert.cer'
'''
cmd = list()
thumbprint = None
store_path = r'Cert:\{0}\{1}'.format(context, store)
cert_format = cert_format.lower()
_validate_cert_format(name=cert_format)
cached_source_path = __salt__['cp.cache_file'](name, saltenv)
if not cached_source_path:
_LOG.error('Unable to get cached copy of file: %s', name)
return False
if password:
cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password)
else:
cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format)
current_certs = get_certs(context=context, store=store)
if cert_props['thumbprint'] in current_certs:
_LOG.debug("Certificate thumbprint '%s' already present in store: %s",
cert_props['thumbprint'], store_path)
return True
if cert_format == 'pfx':
# In instances where an empty password is needed, we use a
# System.Security.SecureString object since ConvertTo-SecureString will
# not convert an empty string.
if password:
cmd.append(r"$Password = ConvertTo-SecureString "
r"-String '{0}'".format(password))
cmd.append(' -AsPlainText -Force; ')
else:
cmd.append('$Password = New-Object System.Security.SecureString; ')
cmd.append(r"Import-PfxCertificate "
r"-FilePath '{0}'".format(cached_source_path))
cmd.append(r" -CertStoreLocation '{0}'".format(store_path))
cmd.append(r" -Password $Password")
if exportable:
cmd.append(' -Exportable')
else:
cmd.append(r"Import-Certificate "
r"-FilePath '{0}'".format(cached_source_path))
cmd.append(r" -CertStoreLocation '{0}'".format(store_path))
_cmd_run(cmd=six.text_type().join(cmd))
new_certs = get_certs(context=context, store=store)
for new_cert in new_certs:
if new_cert not in current_certs:
thumbprint = new_cert
if thumbprint:
_LOG.debug('Certificate imported successfully: %s', name)
return True
_LOG.error('Unable to import certificate: %s', name)
return False
|
Import the certificate file into the given certificate store.
:param str name: The path of the certificate file to import.
:param str cert_format: The certificate format. Specify 'cer' for X.509, or
'pfx' for PKCS #12.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
:param bool exportable: Mark the certificate as exportable. Only applicable
to pfx format.
:param str password: The password of the certificate. Only applicable to pfx
format. Note that if used interactively, the password will be seen by all minions.
To protect the password, use a state and get the password from pillar.
:param str saltenv: The environment the file resides in.
:return: A boolean representing whether all changes succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_pki.import_cert name='salt://cert.cer'
|
def fit(
self,
df,
duration_col=None,
event_col=None,
show_progress=False,
initial_point=None,
strata=None,
step_size=None,
weights_col=None,
cluster_col=None,
robust=False,
batch_mode=None,
):
"""
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
"""
if duration_col is None:
raise TypeError("duration_col cannot be None.")
self._censoring_type = CensoringType.RIGHT
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
self._batch_mode = batch_mode
self.strata = coalesce(strata, self.strata)
X, T, E, weights, original_index, self._clusters = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.weights.index = original_index
self._norm_mean = X.mean(0)
self._norm_std = X.std(0)
X_norm = normalize(X, self._norm_mean, self._norm_std)
hazards_ = self._fit_model(
X_norm, T, E, weights=weights, initial_point=initial_point, show_progress=show_progress, step_size=step_size
)
self.hazards_ = pd.Series(hazards_, index=X.columns, name="coef") / self._norm_std
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_std, self._norm_std)
self.standard_errors_ = self._compute_standard_errors(X_norm, T, E, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self._predicted_partial_hazards_ = (
self.predict_partial_hazard(X)
.rename(columns={0: "P"})
.assign(T=self.durations.values, E=self.event_observed.values, W=self.weights.values)
.set_index(X.index)
)
self.baseline_hazard_ = self._compute_baseline_hazards()
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
self.baseline_survival_ = self._compute_baseline_survival()
if hasattr(self, "_concordance_score_"):
# we have already fit the model.
del self._concordance_score_
return self
|
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
|
def _user_yes_no_query(self, question):
""" Helper asking if the user want to download the file
Note:
Dowloading huge file can take a while
"""
sys.stdout.write('%s [y/n]\n' % question)
while True:
try:
return strtobool(raw_input().lower())
except ValueError:
sys.stdout.write('Please respond with \'y\' or \'n\'.\n')
|
Helper asking if the user want to download the file
Note:
Dowloading huge file can take a while
|
def advise(self, name, f, *a, **kw):
"""
Add an advice that will be handled later by the handle method.
Arguments:
name
The name of the advice group
f
A callable method or function.
The rest of the arguments will be passed as arguments and
keyword arguments to f when it's invoked.
"""
if name is None:
return
advice = (f, a, kw)
debug = self.get(DEBUG)
frame = currentframe()
if frame is None:
logger.debug('currentframe() failed to return frame')
else:
if name in self._called:
self.__advice_stack_frame_protection(frame)
if debug:
logger.debug(
"advise '%s' invoked by %s:%d",
name,
frame.f_back.f_code.co_filename, frame.f_back.f_lineno,
)
if debug > 1:
# use the memory address of the tuple which should
# be stable
self._frames[id(advice)] = ''.join(
format_stack(frame.f_back))
self._advices[name] = self._advices.get(name, [])
self._advices[name].append(advice)
|
Add an advice that will be handled later by the handle method.
Arguments:
name
The name of the advice group
f
A callable method or function.
The rest of the arguments will be passed as arguments and
keyword arguments to f when it's invoked.
|
def rpc_call(payload):
"""Simple Flask implementation for making asynchronous Rpc calls. """
# Send the request and store the requests Unique ID.
corr_id = RPC_CLIENT.send_request(payload)
# Wait until we have received a response.
while RPC_CLIENT.queue[corr_id] is None:
sleep(0.1)
# Return the response to the user.
return RPC_CLIENT.queue[corr_id]
|
Simple Flask implementation for making asynchronous Rpc calls.
|
def sparse_var(X):
'''
Compute variance from
:param X:
:return:
'''
Xc = X.copy()
Xc.data **= 2
return np.array(Xc.mean(axis=0) - np.power(X.mean(axis=0), 2))[0]
|
Compute variance from
:param X:
:return:
|
def _create_state_data(self, context, resp_args, relay_state):
"""
Returns a dict containing the state needed in the response flow.
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
:param context: The current context
:param resp_args: Response arguments
:param relay_state: Request relay state
:return: A state as a dict
"""
if "name_id_policy" in resp_args and resp_args["name_id_policy"] is not None:
resp_args["name_id_policy"] = resp_args["name_id_policy"].to_string().decode("utf-8")
return {"resp_args": resp_args, "relay_state": relay_state}
|
Returns a dict containing the state needed in the response flow.
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
:param context: The current context
:param resp_args: Response arguments
:param relay_state: Request relay state
:return: A state as a dict
|
def Parse(self, parser_mediator):
"""Parsers the file entry and extracts event objects.
Args:
parser_mediator (ParserMediator): a parser mediator.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_entry = parser_mediator.GetFileEntry()
if not file_entry:
raise errors.UnableToParseFile('Invalid file entry')
parser_mediator.AppendToParserChain(self)
try:
self.ParseFileEntry(parser_mediator, file_entry)
finally:
parser_mediator.PopFromParserChain()
|
Parsers the file entry and extracts event objects.
Args:
parser_mediator (ParserMediator): a parser mediator.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
def get_variable_groups(self, project, group_name=None, action_filter=None, top=None, continuation_token=None, query_order=None):
"""GetVariableGroups.
[Preview API] Get variable groups.
:param str project: Project ID or project name
:param str group_name: Name of variable group.
:param str action_filter: Action filter for the variable group. It specifies the action which can be performed on the variable groups.
:param int top: Number of variable groups to get.
:param int continuation_token: Gets the variable groups after the continuation token provided.
:param str query_order: Gets the results in the defined order. Default is 'IdDescending'.
:rtype: [VariableGroup]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if group_name is not None:
query_parameters['groupName'] = self._serialize.query('group_name', group_name, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
response = self._send(http_method='GET',
location_id='f5b09dd5-9d54-45a1-8b5a-1c8287d634cc',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[VariableGroup]', self._unwrap_collection(response))
|
GetVariableGroups.
[Preview API] Get variable groups.
:param str project: Project ID or project name
:param str group_name: Name of variable group.
:param str action_filter: Action filter for the variable group. It specifies the action which can be performed on the variable groups.
:param int top: Number of variable groups to get.
:param int continuation_token: Gets the variable groups after the continuation token provided.
:param str query_order: Gets the results in the defined order. Default is 'IdDescending'.
:rtype: [VariableGroup]
|
def to_output(self, value):
"""Convert value to process output format."""
return {self.name: [self.inner.to_output(v)[self.name] for v in value]}
|
Convert value to process output format.
|
def comb(delay, tau=inf):
"""
Feedback comb filter for a given time constant (and delay).
``y[n] = x[n] + alpha * y[n - delay]``
Parameters
----------
delay :
Feedback delay (lag), in number of samples.
tau :
Time decay (up to ``1/e``, or -8.686 dB), in number of samples, which
allows finding ``alpha = e ** (-delay / tau)``. Defaults to ``inf``
(infinite), which means alpha = 1.
Returns
-------
A ZFilter instance with the comb filter.
See Also
--------
freq2lag :
Frequency (in rad/sample) to delay (in samples) converter.
"""
alpha = e ** (-delay / tau)
return 1 / (1 - alpha * z ** -delay)
|
Feedback comb filter for a given time constant (and delay).
``y[n] = x[n] + alpha * y[n - delay]``
Parameters
----------
delay :
Feedback delay (lag), in number of samples.
tau :
Time decay (up to ``1/e``, or -8.686 dB), in number of samples, which
allows finding ``alpha = e ** (-delay / tau)``. Defaults to ``inf``
(infinite), which means alpha = 1.
Returns
-------
A ZFilter instance with the comb filter.
See Also
--------
freq2lag :
Frequency (in rad/sample) to delay (in samples) converter.
|
def update_hslice(self, blob):
"""Update the Hamiltonian slice proposal scale based
on the relative amount of time spent moving vs reflecting."""
nmove, nreflect = blob['nmove'], blob['nreflect']
ncontract = blob.get('ncontract', 0)
fmove = (1. * nmove) / (nmove + nreflect + ncontract + 2)
norm = max(self.fmove, 1. - self.fmove)
self.scale *= math.exp((fmove - self.fmove) / norm)
|
Update the Hamiltonian slice proposal scale based
on the relative amount of time spent moving vs reflecting.
|
def get_graphics(vm_, **kwargs):
'''
Returns the information on vnc for a given vm
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_graphics <domain>
'''
conn = __get_conn(**kwargs)
graphics = _get_graphics(_get_domain(conn, vm_))
conn.close()
return graphics
|
Returns the information on vnc for a given vm
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_graphics <domain>
|
def parameterized_expectations(model, verbose=False, initial_dr=None,
pert_order=1, with_complementarities=True,
grid={}, distribution={},
maxit=100, tol=1e-8, inner_maxit=10,
direct=False):
'''
Find global solution for ``model`` via parameterized expectations.
Controls must be expressed as a direct function of equilibrium objects.
Algorithm iterates over the expectations function in the arbitrage equation.
Parameters:
----------
model : NumericModel
``dtcscc`` model to be solved
verbose : boolean
if True, display iterations
initial_dr : decision rule
initial guess for the decision rule
pert_order : {1}
if no initial guess is supplied, the perturbation solution at order
``pert_order`` is used as initial guess
grid : grid options
distribution : distribution options
maxit : maximum number of iterations
tol : tolerance criterium for successive approximations
inner_maxit : maximum number of iteration for inner solver
direct : if True, solve with direct method. If false, solve indirectly
Returns
-------
decision rule :
approximated solution
'''
t1 = time.time()
g = model.functions['transition']
h = model.functions['expectation']
d = model.functions['direct_response']
f = model.functions['arbitrage_exp'] # f(s, x, z, p, out)
parms = model.calibration['parameters']
if initial_dr is None:
if pert_order == 1:
initial_dr = approximate_controls(model)
if pert_order > 1:
raise Exception("Perturbation order > 1 not supported (yet).")
approx = model.get_grid(**grid)
grid = approx.grid
interp_type = approx.interpolation
dr = create_interpolator(approx, interp_type)
expect = create_interpolator(approx, interp_type)
distrib = model.get_distribution(**distribution)
nodes, weights = distrib.discretize()
N = grid.shape[0]
z = np.zeros((N, len(model.symbols['expectations'])))
x_0 = initial_dr(grid)
x_0 = x_0.real # just in case ...
h_0 = h(grid, x_0, parms)
it = 0
err = 10
err_0 = 10
verbit = True if verbose == 'full' else False
if with_complementarities is True:
lbfun = model.functions['controls_lb']
ubfun = model.functions['controls_ub']
lb = lbfun(grid, parms)
ub = ubfun(grid, parms)
else:
lb = None
ub = None
if verbose:
headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |'
headline = headline.format('N', ' Error', 'Gain', 'Time')
stars = '-'*len(headline)
print(stars)
print(headline)
print(stars)
# format string for within loop
fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'
while err > tol and it <= maxit:
it += 1
t_start = time.time()
# dr.set_values(x_0)
expect.set_values(h_0)
# evaluate expectation over the future state
z[...] = 0
for i in range(weights.shape[0]):
e = nodes[i, :]
S = g(grid, x_0, e, parms)
z += weights[i]*expect(S)
if direct is True:
# Use control as direct function of arbitrage equation
new_x = d(grid, z, parms)
if with_complementarities is True:
new_x = np.minimum(new_x, ub)
new_x = np.maximum(new_x, lb)
else:
# Find control by solving arbitrage equation
def fun(x): return f(grid, x, z, parms)
sdfun = SerialDifferentiableFunction(fun)
if with_complementarities is True:
[new_x, nit] = ncpsolve(sdfun, lb, ub, x_0, verbose=verbit,
maxit=inner_maxit)
else:
[new_x, nit] = serial_newton(sdfun, x_0, verbose=verbit)
new_h = h(grid, new_x, parms)
# update error
err = (abs(new_h - h_0).max())
# Update guess for decision rule and expectations function
x_0 = new_x
h_0 = new_h
# print error infomation if `verbose`
err_SA = err/err_0
err_0 = err
t_finish = time.time()
elapsed = t_finish - t_start
if verbose:
print(fmt_str.format(it, err, err_SA, elapsed))
if it == maxit:
import warnings
warnings.warn(UserWarning("Maximum number of iterations reached"))
# compute final fime and do final printout if `verbose`
t2 = time.time()
if verbose:
print(stars)
print('Elapsed: {} seconds.'.format(t2 - t1))
print(stars)
# Interpolation for the decision rule
dr.set_values(x_0)
return dr
|
Find global solution for ``model`` via parameterized expectations.
Controls must be expressed as a direct function of equilibrium objects.
Algorithm iterates over the expectations function in the arbitrage equation.
Parameters:
----------
model : NumericModel
``dtcscc`` model to be solved
verbose : boolean
if True, display iterations
initial_dr : decision rule
initial guess for the decision rule
pert_order : {1}
if no initial guess is supplied, the perturbation solution at order
``pert_order`` is used as initial guess
grid : grid options
distribution : distribution options
maxit : maximum number of iterations
tol : tolerance criterium for successive approximations
inner_maxit : maximum number of iteration for inner solver
direct : if True, solve with direct method. If false, solve indirectly
Returns
-------
decision rule :
approximated solution
|
def in_repo(self, filepath):
"""
This excludes repository directories because they cause some exceptions
occationally.
"""
filepath = set(filepath.replace('\\', '/').split('/'))
for p in ('.git', '.hg', '.svn', '.cvs', '.bzr'):
if p in filepath:
return True
return False
|
This excludes repository directories because they cause some exceptions
occationally.
|
def def_linear(fun):
"""Flags that a function is linear wrt all args"""
defjvp_argnum(fun, lambda argnum, g, ans, args, kwargs:
fun(*subval(args, argnum, g), **kwargs))
|
Flags that a function is linear wrt all args
|
def start_semester_view(request):
"""
Initiates a semester"s worth of workshift, with the option to copy
workshift types from the previous semester.
"""
page_name = "Start Semester"
year, season = utils.get_year_season()
start_date, end_date = utils.get_semester_start_end(year, season)
semester_form = SemesterForm(
data=request.POST or None,
initial={
"year": year,
"season": season,
"start_date": start_date.strftime(date_formats[0]),
"end_date": end_date.strftime(date_formats[0]),
},
prefix="semester",
)
pool_forms = []
try:
prev_semester = Semester.objects.latest("end_date")
except Semester.DoesNotExist:
pass
else:
pools = WorkshiftPool.objects.filter(
semester=prev_semester,
is_primary=False,
)
for pool in pools:
form = StartPoolForm(
data=request.POST or None,
initial={
"title": pool.title,
"hours": pool.hours,
},
prefix="pool-{}".format(pool.pk),
)
pool_forms.append(form)
if semester_form.is_valid() and all(i.is_valid() for i in pool_forms):
# And save this semester
semester = semester_form.save()
for pool_form in pool_forms:
pool_form.save(semester=semester)
return HttpResponseRedirect(wurl("workshift:manage",
sem_url=semester.sem_url))
return render_to_response("start_semester.html", {
"page_name": page_name,
"semester_form": semester_form,
"pool_forms": pool_forms,
}, context_instance=RequestContext(request))
|
Initiates a semester"s worth of workshift, with the option to copy
workshift types from the previous semester.
|
def join(*paths):
r"""
Wrapper around os.path.join that works with Windows drive letters.
>>> join('d:\\foo', '\\bar')
'd:\\bar'
"""
paths_with_drives = map(os.path.splitdrive, paths)
drives, paths = zip(*paths_with_drives)
# the drive we care about is the last one in the list
drive = next(filter(None, reversed(drives)), '')
return os.path.join(drive, os.path.join(*paths))
|
r"""
Wrapper around os.path.join that works with Windows drive letters.
>>> join('d:\\foo', '\\bar')
'd:\\bar'
|
def get_paths_cfg(
sys_file='pythran.cfg',
platform_file='pythran-{}.cfg'.format(sys.platform),
user_file='.pythranrc'
):
"""
>>> os.environ['HOME'] = '/tmp/test'
>>> get_paths_cfg()['user']
'/tmp/test/.pythranrc'
>>> os.environ['HOME'] = '/tmp/test'
>>> os.environ['XDG_CONFIG_HOME'] = '/tmp/test2'
>>> get_paths_cfg()['user']
'/tmp/test2/.pythranrc'
>>> os.environ['HOME'] = '/tmp/test'
>>> os.environ['XDG_CONFIG_HOME'] = '/tmp/test2'
>>> os.environ['PYTHRANRC'] = '/tmp/test3/pythranrc'
>>> get_paths_cfg()['user']
'/tmp/test3/pythranrc'
"""
sys_config_dir = os.path.dirname(__file__)
sys_config_path = os.path.join(sys_config_dir, sys_file)
platform_config_path = os.path.join(sys_config_dir, platform_file)
user_config_path = os.environ.get('PYTHRANRC', None)
if not user_config_path:
user_config_dir = os.environ.get('XDG_CONFIG_HOME', '~')
user_config_path = os.path.expanduser(
os.path.join(user_config_dir, user_file))
return {"sys": sys_config_path, "platform": platform_config_path, "user": user_config_path}
|
>>> os.environ['HOME'] = '/tmp/test'
>>> get_paths_cfg()['user']
'/tmp/test/.pythranrc'
>>> os.environ['HOME'] = '/tmp/test'
>>> os.environ['XDG_CONFIG_HOME'] = '/tmp/test2'
>>> get_paths_cfg()['user']
'/tmp/test2/.pythranrc'
>>> os.environ['HOME'] = '/tmp/test'
>>> os.environ['XDG_CONFIG_HOME'] = '/tmp/test2'
>>> os.environ['PYTHRANRC'] = '/tmp/test3/pythranrc'
>>> get_paths_cfg()['user']
'/tmp/test3/pythranrc'
|
def _get(self, end_point, params=None, **kwargs):
"""Send a HTTP GET request to a Todoist API end-point.
:param end_point: The Todoist API end-point.
:type end_point: str
:param params: The required request parameters.
:type params: dict
:param kwargs: Any optional parameters.
:type kwargs: dict
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
"""
return self._request(requests.get, end_point, params, **kwargs)
|
Send a HTTP GET request to a Todoist API end-point.
:param end_point: The Todoist API end-point.
:type end_point: str
:param params: The required request parameters.
:type params: dict
:param kwargs: Any optional parameters.
:type kwargs: dict
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
|
def transform_annotation(self, ann, duration):
'''Apply the structure agreement transformation.
Parameters
----------
ann : jams.Annotation
The segment annotation
duration : number > 0
The target duration
Returns
-------
data : dict
data['agree'] : np.ndarray, shape=(n, n), dtype=bool
'''
intervals, values = ann.to_interval_values()
intervals, values = adjust_intervals(intervals, values,
t_min=0, t_max=duration)
# Re-index the labels
ids, _ = index_labels(values)
rate = float(self.hop_length) / self.sr
# Sample segment labels on our frame grid
_, labels = intervals_to_samples(intervals, ids, sample_size=rate)
# Make the agreement matrix
return {'agree': np.equal.outer(labels, labels)}
|
Apply the structure agreement transformation.
Parameters
----------
ann : jams.Annotation
The segment annotation
duration : number > 0
The target duration
Returns
-------
data : dict
data['agree'] : np.ndarray, shape=(n, n), dtype=bool
|
def get_training_image_text_data_iters(source_root: str,
source: str, target: str,
validation_source_root: str,
validation_source: str, validation_target: str,
vocab_target: vocab.Vocab,
vocab_target_path: Optional[str],
batch_size: int,
batch_by_words: bool,
batch_num_devices: int,
source_image_size: tuple,
max_seq_len_target: int,
bucketing: bool,
bucket_width: int,
use_feature_loader: bool = False,
preload_features: bool = False) -> Tuple['ParallelSampleIter',
'ParallelSampleIter',
'DataConfig', 'DataInfo']:
"""
Returns data iterators for training and validation data.
:param source_root: Path to source images since the file in source contains relative paths.
:param source: Path to source training data.
:param target: Path to target training data.
:param validation_source_root: Path to validation source images since the file in validation_source contains relative paths.
:param validation_source: Path to source validation data.
:param validation_target: Path to target validation data.
:param vocab_target: Target vocabulary.
:param vocab_target_path: Path to target vocabulary.
:param batch_size: Batch size.
:param batch_by_words: Size batches by words rather than sentences.
:param batch_num_devices: Number of devices batches will be parallelized across.
:param source_image_size: size to resize the image to (for iterator)
:param max_seq_len_target: Maximum target sequence length.
:param bucketing: Whether to use bucketing.
:param bucket_width: Size of buckets.
:param use_feature_loader: If True, features are loaded instead of images.
:param preload_features: If use_feature_loader si True, this enables load all the feature to memory
:return: Tuple of (training data iterator, validation data iterator, data config).
"""
logger.info("===============================")
logger.info("Creating training data iterator")
logger.info("===============================")
# define buckets
buckets = define_empty_source_parallel_buckets(max_seq_len_target, bucket_width) if bucketing else [
(0, max_seq_len_target)]
source_images = [FileListReader(source, source_root)]
target_sentences = SequenceReader(target, vocab_target, add_bos=True)
# 2. pass: Get data statistics only on target (source not considered)
data_statistics = get_data_statistics(source_readers=None,
target_reader=target_sentences,
buckets=buckets,
length_ratio_mean=1.0,
length_ratio_std=1.0,
source_vocabs=None,
target_vocab=vocab_target)
bucket_batch_sizes = define_bucket_batch_sizes(buckets,
batch_size,
batch_by_words,
batch_num_devices,
data_statistics.average_len_target_per_bucket)
data_statistics.log(bucket_batch_sizes)
data_loader = RawListTextDatasetLoader(buckets=buckets,
eos_id=vocab_target[C.EOS_SYMBOL],
pad_id=C.PAD_ID)
training_data = data_loader.load(source_images[0], target_sentences,
data_statistics.num_sents_per_bucket).fill_up(bucket_batch_sizes)
data_info = DataInfo(sources=source_images,
target=target,
source_vocabs=None,
target_vocab=vocab_target_path,
shared_vocab=False,
num_shards=1)
config_data = DataConfig(data_statistics=data_statistics,
max_seq_len_source=0,
max_seq_len_target=max_seq_len_target,
num_source_factors=len(source_images))
# Add useful stuff to config_data
config_data.source_root = source_root
config_data.validation_source_root = validation_source_root
config_data.use_feature_loader = use_feature_loader
train_iter = ImageTextSampleIter(data=training_data,
buckets=buckets,
batch_size=batch_size,
bucket_batch_sizes=bucket_batch_sizes,
image_size=source_image_size,
use_feature_loader=use_feature_loader,
preload_features=preload_features)
validation_iter = get_validation_image_text_data_iter(data_loader=data_loader,
validation_source_root=validation_source_root,
validation_source=validation_source,
validation_target=validation_target,
buckets=buckets,
bucket_batch_sizes=bucket_batch_sizes,
source_image_size=source_image_size,
vocab_target=vocab_target,
max_seq_len_target=max_seq_len_target,
batch_size=batch_size,
use_feature_loader=use_feature_loader,
preload_features=preload_features)
return train_iter, validation_iter, config_data, data_info
|
Returns data iterators for training and validation data.
:param source_root: Path to source images since the file in source contains relative paths.
:param source: Path to source training data.
:param target: Path to target training data.
:param validation_source_root: Path to validation source images since the file in validation_source contains relative paths.
:param validation_source: Path to source validation data.
:param validation_target: Path to target validation data.
:param vocab_target: Target vocabulary.
:param vocab_target_path: Path to target vocabulary.
:param batch_size: Batch size.
:param batch_by_words: Size batches by words rather than sentences.
:param batch_num_devices: Number of devices batches will be parallelized across.
:param source_image_size: size to resize the image to (for iterator)
:param max_seq_len_target: Maximum target sequence length.
:param bucketing: Whether to use bucketing.
:param bucket_width: Size of buckets.
:param use_feature_loader: If True, features are loaded instead of images.
:param preload_features: If use_feature_loader si True, this enables load all the feature to memory
:return: Tuple of (training data iterator, validation data iterator, data config).
|
def detect_worksheets(archive):
"""Return a list of worksheets"""
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel
|
Return a list of worksheets
|
def encrypt_files(selected_host, only_link, file_name):
"""
Encrypts file with gpg and random generated password
"""
if ENCRYPTION_DISABLED:
print('For encryption please install gpg')
exit()
passphrase = '%030x' % random.randrange(16**30)
source_filename = file_name
cmd = 'gpg --batch --symmetric --cipher-algo AES256 --passphrase-fd 0 ' \
'--output - {}'.format(source_filename)
encrypted_output = Popen(shlex.split(cmd), stdout=PIPE, stdin=PIPE, stderr=PIPE)
encrypted_data = encrypted_output.communicate(passphrase.encode())[0]
return upload_files(encrypted_data, selected_host, only_link, file_name)+'#'+passphrase
|
Encrypts file with gpg and random generated password
|
def _fetch_seq_ensembl(ac, start_i=None, end_i=None):
"""Fetch the specified sequence slice from Ensembl using the public
REST interface.
An interbase interval may be optionally provided with start_i and
end_i. However, the Ensembl REST interface does not currently
accept intervals, so the entire sequence is returned and sliced
locally.
>> len(_fetch_seq_ensembl('ENSP00000288602'))
766
>> _fetch_seq_ensembl('ENSP00000288602',0,10)
u'MAALSGGGGG'
>> _fetch_seq_ensembl('ENSP00000288602')[0:10]
u'MAALSGGGGG'
>> ac = 'ENSP00000288602'
>> _fetch_seq_ensembl(ac ,0, 10) == _fetch_seq_ensembl(ac)[0:10]
True
"""
url_fmt = "http://rest.ensembl.org/sequence/id/{ac}"
url = url_fmt.format(ac=ac)
r = requests.get(url, headers={"Content-Type": "application/json"})
r.raise_for_status()
seq = r.json()["seq"]
return seq if (start_i is None or end_i is None) else seq[start_i:end_i]
|
Fetch the specified sequence slice from Ensembl using the public
REST interface.
An interbase interval may be optionally provided with start_i and
end_i. However, the Ensembl REST interface does not currently
accept intervals, so the entire sequence is returned and sliced
locally.
>> len(_fetch_seq_ensembl('ENSP00000288602'))
766
>> _fetch_seq_ensembl('ENSP00000288602',0,10)
u'MAALSGGGGG'
>> _fetch_seq_ensembl('ENSP00000288602')[0:10]
u'MAALSGGGGG'
>> ac = 'ENSP00000288602'
>> _fetch_seq_ensembl(ac ,0, 10) == _fetch_seq_ensembl(ac)[0:10]
True
|
def set_locked_variable(self, key, access_key, value):
"""Set an already locked global variable
:param key: the key of the global variable to be set
:param access_key: the access key to the already locked global variable
:param value: the new value of the global variable
"""
return self.set_variable(key, value, per_reference=False, access_key=access_key)
|
Set an already locked global variable
:param key: the key of the global variable to be set
:param access_key: the access key to the already locked global variable
:param value: the new value of the global variable
|
def register_task_with_maintenance_window(WindowId=None, Targets=None, TaskArn=None, ServiceRoleArn=None, TaskType=None, TaskParameters=None, Priority=None, MaxConcurrency=None, MaxErrors=None, LoggingInfo=None, ClientToken=None):
"""
Adds a new task to a Maintenance Window.
See also: AWS API Documentation
:example: response = client.register_task_with_maintenance_window(
WindowId='string',
Targets=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
TaskArn='string',
ServiceRoleArn='string',
TaskType='RUN_COMMAND',
TaskParameters={
'string': {
'Values': [
'string',
]
}
},
Priority=123,
MaxConcurrency='string',
MaxErrors='string',
LoggingInfo={
'S3BucketName': 'string',
'S3KeyPrefix': 'string',
'S3Region': 'string'
},
ClientToken='string'
)
:type WindowId: string
:param WindowId: [REQUIRED]
The id of the Maintenance Window the task should be added to.
:type Targets: list
:param Targets: [REQUIRED]
The targets (either instances or tags). Instances are specified using Key=instanceids,Values=instanceid1,instanceid2. Tags are specified using Key=tag name,Values=tag value.
(dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.
Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:Amazon EC2 tagor InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command .
Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command .
(string) --
:type TaskArn: string
:param TaskArn: [REQUIRED]
The ARN of the task to execute
:type ServiceRoleArn: string
:param ServiceRoleArn: [REQUIRED]
The role that should be assumed when executing the task.
:type TaskType: string
:param TaskType: [REQUIRED]
The type of task being registered.
:type TaskParameters: dict
:param TaskParameters: The parameters that should be passed to the task when it is executed.
(string) --
(dict) --Defines the values for a task parameter.
Values (list) --This field contains an array of 0 or more strings, each 1 to 255 characters in length.
(string) --
:type Priority: integer
:param Priority: The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel.
:type MaxConcurrency: string
:param MaxConcurrency: [REQUIRED]
The maximum number of targets this task can be run for in parallel.
:type MaxErrors: string
:param MaxErrors: [REQUIRED]
The maximum number of errors allowed before this task stops being scheduled.
:type LoggingInfo: dict
:param LoggingInfo: A structure containing information about an Amazon S3 bucket to write instance-level logs to.
S3BucketName (string) -- [REQUIRED]The name of an Amazon S3 bucket where execution logs are stored .
S3KeyPrefix (string) --(Optional) The Amazon S3 bucket subfolder.
S3Region (string) -- [REQUIRED]The region where the Amazon S3 bucket is located.
:type ClientToken: string
:param ClientToken: User-provided idempotency token.
This field is autopopulated if not provided.
:rtype: dict
:return: {
'WindowTaskId': 'string'
}
"""
pass
|
Adds a new task to a Maintenance Window.
See also: AWS API Documentation
:example: response = client.register_task_with_maintenance_window(
WindowId='string',
Targets=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
TaskArn='string',
ServiceRoleArn='string',
TaskType='RUN_COMMAND',
TaskParameters={
'string': {
'Values': [
'string',
]
}
},
Priority=123,
MaxConcurrency='string',
MaxErrors='string',
LoggingInfo={
'S3BucketName': 'string',
'S3KeyPrefix': 'string',
'S3Region': 'string'
},
ClientToken='string'
)
:type WindowId: string
:param WindowId: [REQUIRED]
The id of the Maintenance Window the task should be added to.
:type Targets: list
:param Targets: [REQUIRED]
The targets (either instances or tags). Instances are specified using Key=instanceids,Values=instanceid1,instanceid2. Tags are specified using Key=tag name,Values=tag value.
(dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.
Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:Amazon EC2 tagor InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command .
Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command .
(string) --
:type TaskArn: string
:param TaskArn: [REQUIRED]
The ARN of the task to execute
:type ServiceRoleArn: string
:param ServiceRoleArn: [REQUIRED]
The role that should be assumed when executing the task.
:type TaskType: string
:param TaskType: [REQUIRED]
The type of task being registered.
:type TaskParameters: dict
:param TaskParameters: The parameters that should be passed to the task when it is executed.
(string) --
(dict) --Defines the values for a task parameter.
Values (list) --This field contains an array of 0 or more strings, each 1 to 255 characters in length.
(string) --
:type Priority: integer
:param Priority: The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel.
:type MaxConcurrency: string
:param MaxConcurrency: [REQUIRED]
The maximum number of targets this task can be run for in parallel.
:type MaxErrors: string
:param MaxErrors: [REQUIRED]
The maximum number of errors allowed before this task stops being scheduled.
:type LoggingInfo: dict
:param LoggingInfo: A structure containing information about an Amazon S3 bucket to write instance-level logs to.
S3BucketName (string) -- [REQUIRED]The name of an Amazon S3 bucket where execution logs are stored .
S3KeyPrefix (string) --(Optional) The Amazon S3 bucket subfolder.
S3Region (string) -- [REQUIRED]The region where the Amazon S3 bucket is located.
:type ClientToken: string
:param ClientToken: User-provided idempotency token.
This field is autopopulated if not provided.
:rtype: dict
:return: {
'WindowTaskId': 'string'
}
|
def init_db(sqlalchemy_url):
"""
Initialize database with gsshapy tables
"""
engine = create_engine(sqlalchemy_url)
start = time.time()
metadata.create_all(engine)
return time.time() - start
|
Initialize database with gsshapy tables
|
def invoke(self, results):
"""
Handles invocation of the component. The default implementation invokes
it with positional arguments based on order of dependency declaration.
"""
args = [results.get(d) for d in self.deps]
return self.component(*args)
|
Handles invocation of the component. The default implementation invokes
it with positional arguments based on order of dependency declaration.
|
def bootstrap_noise(data, func, n=10000, std=1, symmetric=True):
"""
Bootstrap by adding noise
"""
boot_dist = []
arr = N.zeros(data.shape)
for i in range(n):
if symmetric:
# Noise on all three axes
arr = N.random.randn(*data.shape)*std
else:
# Only z-coordinate noise
arr[:,-1] = N.random.randn(data.shape[0])*std
boot_dist.append(func(data+arr))
return N.array(boot_dist)
|
Bootstrap by adding noise
|
def _get_total_services_problems_unhandled(self):
"""Get the number of services that are a problem and that are not acknowledged
:return: number of problem services which are not acknowledged
:rtype: int
"""
return sum(1 for s in self.services if s.is_problem and not s.problem_has_been_acknowledged)
|
Get the number of services that are a problem and that are not acknowledged
:return: number of problem services which are not acknowledged
:rtype: int
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.